vendor: sync with upstream masters

Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
This commit is contained in:
Gyu-Ho Lee 2017-06-23 09:06:03 -07:00
parent 372d4bbe96
commit 4fbdc93631
102 changed files with 8075 additions and 2099 deletions

View File

@ -59,7 +59,7 @@ Use a custom context to set timeouts on your operations:
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
// set a new key, ignoring it's previous state
// set a new key, ignoring its previous state
_, err := kAPI.Set(ctx, "/ping", "pong", nil)
if err != nil {
if err == context.DeadlineExceeded {

View File

@ -44,7 +44,7 @@ type Member struct {
PeerURLs []string `json:"peerURLs"`
// ClientURLs represents the HTTP(S) endpoints on which this Member
// serves it's client-facing APIs.
// serves its client-facing APIs.
ClientURLs []string `json:"clientURLs"`
}

View File

@ -182,7 +182,7 @@ func parseEndpoint(endpoint string) (proto string, host string, scheme string) {
host = url.Host
switch url.Scheme {
case "http", "https":
case "unix":
case "unix", "unixs":
proto = "unix"
host = url.Host + url.Path
default:
@ -197,7 +197,7 @@ func (c *Client) processCreds(scheme string) (creds *credentials.TransportCreden
case "unix":
case "http":
creds = nil
case "https":
case "https", "unixs":
if creds != nil {
break
}
@ -322,7 +322,7 @@ func (c *Client) dial(endpoint string, dopts ...grpc.DialOption) (*grpc.ClientCo
opts = append(opts, c.cfg.DialOptions...)
conn, err := grpc.Dial(host, opts...)
conn, err := grpc.DialContext(c.ctx, host, opts...)
if err != nil {
return nil, err
}
@ -333,7 +333,7 @@ func (c *Client) dial(endpoint string, dopts ...grpc.DialOption) (*grpc.ClientCo
// when the cluster has a leader.
func WithRequireLeader(ctx context.Context) context.Context {
md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader)
return metadata.NewContext(ctx, md)
return metadata.NewOutgoingContext(ctx, md)
}
func newClient(cfg *Config) (*Client, error) {
@ -367,7 +367,9 @@ func newClient(cfg *Config) (*Client, error) {
}
client.balancer = newSimpleBalancer(cfg.Endpoints)
conn, err := client.dial("", grpc.WithBalancer(client.balancer))
// use Endpoints[0] so that for https:// without any tls config given, then
// grpc will assume the ServerName is in the endpoint.
conn, err := client.dial(cfg.Endpoints[0], grpc.WithBalancer(client.balancer))
if err != nil {
client.cancel()
client.balancer.Close()

View File

@ -99,6 +99,18 @@ func (cmp *Cmp) ValueBytes() []byte {
// WithValueBytes sets the byte slice for the comparison's value.
func (cmp *Cmp) WithValueBytes(v []byte) { cmp.TargetUnion.(*pb.Compare_Value).Value = v }
// WithRange sets the comparison to scan the range [key, end).
func (cmp Cmp) WithRange(end string) Cmp {
cmp.RangeEnd = []byte(end)
return cmp
}
// WithPrefix sets the comparison to scan all keys prefixed by the key.
func (cmp Cmp) WithPrefix() Cmp {
cmp.RangeEnd = getPrefix(cmp.Key)
return cmp
}
func mustInt64(val interface{}) int64 {
if v, ok := val.(int64); ok {
return v

View File

@ -66,11 +66,13 @@ type OpResponse struct {
put *PutResponse
get *GetResponse
del *DeleteResponse
txn *TxnResponse
}
func (op OpResponse) Put() *PutResponse { return op.put }
func (op OpResponse) Get() *GetResponse { return op.get }
func (op OpResponse) Del() *DeleteResponse { return op.del }
func (op OpResponse) Txn() *TxnResponse { return op.txn }
type kv struct {
remote pb.KVClient
@ -134,7 +136,6 @@ func (kv *kv) Do(ctx context.Context, op Op) (OpResponse, error) {
func (kv *kv) do(ctx context.Context, op Op) (OpResponse, error) {
var err error
switch op.t {
// TODO: handle other ops
case tRange:
var resp *pb.RangeResponse
resp, err = kv.remote.Range(ctx, op.toRangeRequest(), grpc.FailFast(false))
@ -155,6 +156,12 @@ func (kv *kv) do(ctx context.Context, op Op) (OpResponse, error) {
if err == nil {
return OpResponse{del: (*DeleteResponse)(resp)}, nil
}
case tTxn:
var resp *pb.TxnResponse
resp, err = kv.remote.Txn(ctx, op.toTxnRequest())
if err == nil {
return OpResponse{txn: (*TxnResponse)(resp)}, nil
}
default:
panic("Unknown op")
}

View File

@ -323,7 +323,7 @@ func (l *lessor) closeRequireLeader() {
reqIdxs := 0
// find all required leader channels, close, mark as nil
for i, ctx := range ka.ctxs {
md, ok := metadata.FromContext(ctx)
md, ok := metadata.FromOutgoingContext(ctx)
if !ok {
continue
}
@ -386,7 +386,7 @@ func (l *lessor) recvKeepAliveLoop() (gerr error) {
close(l.donec)
l.loopErr = gerr
for _, ka := range l.keepAlives {
ka.Close()
ka.close()
}
l.keepAlives = make(map[LeaseID]*keepAlive)
l.mu.Unlock()
@ -467,7 +467,7 @@ func (l *lessor) recvKeepAlive(resp *pb.LeaseKeepAliveResponse) {
if karesp.TTL <= 0 {
// lease expired; close all keep alive channels
delete(l.keepAlives, karesp.ID)
ka.Close()
ka.close()
return
}
@ -497,7 +497,7 @@ func (l *lessor) deadlineLoop() {
for id, ka := range l.keepAlives {
if ka.deadline.Before(now) {
// waited too long for response; lease may be expired
ka.Close()
ka.close()
delete(l.keepAlives, id)
}
}
@ -539,7 +539,7 @@ func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) {
}
}
func (ka *keepAlive) Close() {
func (ka *keepAlive) close() {
close(ka.donec)
for _, ch := range ka.chs {
close(ch)

View File

@ -23,6 +23,7 @@ const (
tRange opType = iota + 1
tPut
tDeleteRange
tTxn
)
var (
@ -67,10 +68,18 @@ type Op struct {
// for put
val []byte
leaseID LeaseID
// txn
cmps []Cmp
thenOps []Op
elseOps []Op
}
// accesors / mutators
func (op Op) IsTxn() bool { return op.t == tTxn }
func (op Op) Txn() ([]Cmp, []Op, []Op) { return op.cmps, op.thenOps, op.elseOps }
// KeyBytes returns the byte slice holding the Op's key.
func (op Op) KeyBytes() []byte { return op.key }
@ -113,6 +122,22 @@ func (op Op) toRangeRequest() *pb.RangeRequest {
return r
}
func (op Op) toTxnRequest() *pb.TxnRequest {
thenOps := make([]*pb.RequestOp, len(op.thenOps))
for i, tOp := range op.thenOps {
thenOps[i] = tOp.toRequestOp()
}
elseOps := make([]*pb.RequestOp, len(op.elseOps))
for i, eOp := range op.elseOps {
elseOps[i] = eOp.toRequestOp()
}
cmps := make([]*pb.Compare, len(op.cmps))
for i := range op.cmps {
cmps[i] = (*pb.Compare)(&op.cmps[i])
}
return &pb.TxnRequest{Compare: cmps, Success: thenOps, Failure: elseOps}
}
func (op Op) toRequestOp() *pb.RequestOp {
switch op.t {
case tRange:
@ -123,12 +148,27 @@ func (op Op) toRequestOp() *pb.RequestOp {
case tDeleteRange:
r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV}
return &pb.RequestOp{Request: &pb.RequestOp_RequestDeleteRange{RequestDeleteRange: r}}
case tTxn:
return &pb.RequestOp{Request: &pb.RequestOp_RequestTxn{RequestTxn: op.toTxnRequest()}}
default:
panic("Unknown Op")
}
}
func (op Op) isWrite() bool {
if op.t == tTxn {
for _, tOp := range op.thenOps {
if tOp.isWrite() {
return true
}
}
for _, tOp := range op.elseOps {
if tOp.isWrite() {
return true
}
}
return false
}
return op.t != tRange
}
@ -194,6 +234,10 @@ func OpPut(key, val string, opts ...OpOption) Op {
return ret
}
func OpTxn(cmps []Cmp, thenOps []Op, elseOps []Op) Op {
return Op{t: tTxn, cmps: cmps, thenOps: thenOps, elseOps: elseOps}
}
func opWatch(key string, opts ...OpOption) Op {
ret := Op{t: tRange, key: []byte(key)}
ret.applyOpts(opts)

View File

@ -24,6 +24,7 @@ import (
mvccpb "github.com/coreos/etcd/mvcc/mvccpb"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
)
const (
@ -39,10 +40,9 @@ type WatchChan <-chan WatchResponse
type Watcher interface {
// Watch watches on a key or prefix. The watched events will be returned
// through the returned channel.
// If the watch is slow or the required rev is compacted, the watch request
// might be canceled from the server-side and the chan will be closed.
// 'opts' can be: 'WithRev' and/or 'WithPrefix'.
// through the returned channel. If revisions waiting to be sent over the
// watch are compacted, then the watch will be canceled by the server, the
// client will post a compacted error watch response, and the channel will close.
Watch(ctx context.Context, key string, opts ...OpOption) WatchChan
// Close closes the watcher and cancels all watch requests.
@ -65,6 +65,9 @@ type WatchResponse struct {
Created bool
closeErr error
// cancelReason is a reason of canceling watch
cancelReason string
}
// IsCreate returns true if the event tells that the key is newly created.
@ -85,6 +88,9 @@ func (wr *WatchResponse) Err() error {
case wr.CompactRevision != 0:
return v3rpc.ErrCompacted
case wr.Canceled:
if len(wr.cancelReason) != 0 {
return v3rpc.Error(grpc.Errorf(codes.FailedPrecondition, "%s", wr.cancelReason))
}
return v3rpc.ErrFutureRev
}
return nil
@ -310,14 +316,14 @@ func (w *watcher) Close() (err error) {
w.streams = nil
w.mu.Unlock()
for _, wgs := range streams {
if werr := wgs.Close(); werr != nil {
if werr := wgs.close(); werr != nil {
err = werr
}
}
return err
}
func (w *watchGrpcStream) Close() (err error) {
func (w *watchGrpcStream) close() (err error) {
w.cancel()
<-w.donec
select {
@ -520,10 +526,6 @@ func (w *watchGrpcStream) nextResume() *watcherStream {
// dispatchEvent sends a WatchResponse to the appropriate watcher stream
func (w *watchGrpcStream) dispatchEvent(pbresp *pb.WatchResponse) bool {
ws, ok := w.substreams[pbresp.WatchId]
if !ok {
return false
}
events := make([]*Event, len(pbresp.Events))
for i, ev := range pbresp.Events {
events[i] = (*Event)(ev)
@ -534,6 +536,11 @@ func (w *watchGrpcStream) dispatchEvent(pbresp *pb.WatchResponse) bool {
CompactRevision: pbresp.CompactRevision,
Created: pbresp.Created,
Canceled: pbresp.Canceled,
cancelReason: pbresp.CancelReason,
}
ws, ok := w.substreams[pbresp.WatchId]
if !ok {
return false
}
select {
case ws.recvc <- wr:

File diff suppressed because it is too large Load Diff

View File

@ -30,9 +30,10 @@ const (
// Result describes the timings for an operation.
type Result struct {
Start time.Time
End time.Time
Err error
Start time.Time
End time.Time
Err error
Weight float64
}
func (res *Result) Duration() time.Duration { return res.End.Sub(res.Start) }
@ -41,18 +42,8 @@ type report struct {
results chan Result
precision string
avgTotal float64
fastest float64
slowest float64
average float64
stddev float64
rps float64
total time.Duration
errorDist map[string]int
lats []float64
sps *secondPoints
stats Stats
sps *secondPoints
}
// Stats exposes results raw data.
@ -69,6 +60,13 @@ type Stats struct {
TimeSeries TimeSeries
}
func (s *Stats) copy() Stats {
ss := *s
ss.ErrorDist = copyMap(ss.ErrorDist)
ss.Lats = copyFloats(ss.Lats)
return ss
}
// Report processes a result stream until it is closed, then produces a
// string with information about the consumed result data.
type Report interface {
@ -81,12 +79,15 @@ type Report interface {
Stats() <-chan Stats
}
func NewReport(precision string) Report {
return &report{
func NewReport(precision string) Report { return newReport(precision) }
func newReport(precision string) *report {
r := &report{
results: make(chan Result, 16),
precision: precision,
errorDist: make(map[string]int),
}
r.stats.ErrorDist = make(map[string]int)
return r
}
func NewReportSample(precision string) Report {
@ -112,22 +113,11 @@ func (r *report) Stats() <-chan Stats {
go func() {
defer close(donec)
r.processResults()
var ts TimeSeries
s := r.stats.copy()
if r.sps != nil {
ts = r.sps.getTimeSeries()
}
donec <- Stats{
AvgTotal: r.avgTotal,
Fastest: r.fastest,
Slowest: r.slowest,
Average: r.average,
Stddev: r.stddev,
RPS: r.rps,
Total: r.total,
ErrorDist: copyMap(r.errorDist),
Lats: copyFloats(r.lats),
TimeSeries: ts,
s.TimeSeries = r.sps.getTimeSeries()
}
donec <- s
}()
return donec
}
@ -147,21 +137,21 @@ func copyFloats(s []float64) (c []float64) {
}
func (r *report) String() (s string) {
if len(r.lats) > 0 {
if len(r.stats.Lats) > 0 {
s += fmt.Sprintf("\nSummary:\n")
s += fmt.Sprintf(" Total:\t%s.\n", r.sec2str(r.total.Seconds()))
s += fmt.Sprintf(" Slowest:\t%s.\n", r.sec2str(r.slowest))
s += fmt.Sprintf(" Fastest:\t%s.\n", r.sec2str(r.fastest))
s += fmt.Sprintf(" Average:\t%s.\n", r.sec2str(r.average))
s += fmt.Sprintf(" Stddev:\t%s.\n", r.sec2str(r.stddev))
s += fmt.Sprintf(" Requests/sec:\t"+r.precision+"\n", r.rps)
s += fmt.Sprintf(" Total:\t%s.\n", r.sec2str(r.stats.Total.Seconds()))
s += fmt.Sprintf(" Slowest:\t%s.\n", r.sec2str(r.stats.Slowest))
s += fmt.Sprintf(" Fastest:\t%s.\n", r.sec2str(r.stats.Fastest))
s += fmt.Sprintf(" Average:\t%s.\n", r.sec2str(r.stats.Average))
s += fmt.Sprintf(" Stddev:\t%s.\n", r.sec2str(r.stats.Stddev))
s += fmt.Sprintf(" Requests/sec:\t"+r.precision+"\n", r.stats.RPS)
s += r.histogram()
s += r.sprintLatencies()
if r.sps != nil {
s += fmt.Sprintf("%v\n", r.sps.getTimeSeries())
}
}
if len(r.errorDist) > 0 {
if len(r.stats.ErrorDist) > 0 {
s += r.errors()
}
return s
@ -176,17 +166,17 @@ func NewReportRate(precision string) Report {
}
func (r *reportRate) String() string {
return fmt.Sprintf(" Requests/sec:\t"+r.precision+"\n", r.rps)
return fmt.Sprintf(" Requests/sec:\t"+r.precision+"\n", r.stats.RPS)
}
func (r *report) processResult(res *Result) {
if res.Err != nil {
r.errorDist[res.Err.Error()]++
r.stats.ErrorDist[res.Err.Error()]++
return
}
dur := res.Duration()
r.lats = append(r.lats, dur.Seconds())
r.avgTotal += dur.Seconds()
r.stats.Lats = append(r.stats.Lats, dur.Seconds())
r.stats.AvgTotal += dur.Seconds()
if r.sps != nil {
r.sps.Add(res.Start, dur)
}
@ -197,19 +187,19 @@ func (r *report) processResults() {
for res := range r.results {
r.processResult(&res)
}
r.total = time.Since(st)
r.stats.Total = time.Since(st)
r.rps = float64(len(r.lats)) / r.total.Seconds()
r.average = r.avgTotal / float64(len(r.lats))
for i := range r.lats {
dev := r.lats[i] - r.average
r.stddev += dev * dev
r.stats.RPS = float64(len(r.stats.Lats)) / r.stats.Total.Seconds()
r.stats.Average = r.stats.AvgTotal / float64(len(r.stats.Lats))
for i := range r.stats.Lats {
dev := r.stats.Lats[i] - r.stats.Average
r.stats.Stddev += dev * dev
}
r.stddev = math.Sqrt(r.stddev / float64(len(r.lats)))
sort.Float64s(r.lats)
if len(r.lats) > 0 {
r.fastest = r.lats[0]
r.slowest = r.lats[len(r.lats)-1]
r.stats.Stddev = math.Sqrt(r.stats.Stddev / float64(len(r.stats.Lats)))
sort.Float64s(r.stats.Lats)
if len(r.stats.Lats) > 0 {
r.stats.Fastest = r.stats.Lats[0]
r.stats.Slowest = r.stats.Lats[len(r.stats.Lats)-1]
}
}
@ -235,7 +225,7 @@ func percentiles(nums []float64) (data []float64) {
}
func (r *report) sprintLatencies() string {
data := percentiles(r.lats)
data := percentiles(r.stats.Lats)
s := fmt.Sprintf("\nLatency distribution:\n")
for i := 0; i < len(pctls); i++ {
if data[i] > 0 {
@ -249,15 +239,15 @@ func (r *report) histogram() string {
bc := 10
buckets := make([]float64, bc+1)
counts := make([]int, bc+1)
bs := (r.slowest - r.fastest) / float64(bc)
bs := (r.stats.Slowest - r.stats.Fastest) / float64(bc)
for i := 0; i < bc; i++ {
buckets[i] = r.fastest + bs*float64(i)
buckets[i] = r.stats.Fastest + bs*float64(i)
}
buckets[bc] = r.slowest
buckets[bc] = r.stats.Slowest
var bi int
var max int
for i := 0; i < len(r.lats); {
if r.lats[i] <= buckets[bi] {
for i := 0; i < len(r.stats.Lats); {
if r.stats.Lats[i] <= buckets[bi] {
i++
counts[bi]++
if max < counts[bi] {
@ -281,7 +271,7 @@ func (r *report) histogram() string {
func (r *report) errors() string {
s := fmt.Sprintf("\nError distribution:\n")
for err, num := range r.errorDist {
for err, num := range r.stats.ErrorDist {
s += fmt.Sprintf(" [%d]\t%s\n", num, err)
}
return s

101
vendor/github.com/coreos/etcd/pkg/report/weighted.go generated vendored Normal file
View File

@ -0,0 +1,101 @@
// Copyright 2017 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// the file is borrowed from github.com/rakyll/boom/boomer/print.go
package report
import (
"time"
)
type weightedReport struct {
baseReport Report
report *report
results chan Result
weightTotal float64
}
// NewWeightedReport returns a report that includes
// both weighted and unweighted statistics.
func NewWeightedReport(r Report, precision string) Report {
return &weightedReport{
baseReport: r,
report: newReport(precision),
results: make(chan Result, 16),
}
}
func (wr *weightedReport) Results() chan<- Result { return wr.results }
func (wr *weightedReport) Run() <-chan string {
donec := make(chan string, 2)
go func() {
defer close(donec)
basec, rc := make(chan string, 1), make(chan Stats, 1)
go func() { basec <- (<-wr.baseReport.Run()) }()
go func() { rc <- (<-wr.report.Stats()) }()
go wr.processResults()
wr.report.stats = wr.reweighStat(<-rc)
donec <- wr.report.String()
donec <- (<-basec)
}()
return donec
}
func (wr *weightedReport) Stats() <-chan Stats {
donec := make(chan Stats, 2)
go func() {
defer close(donec)
basec, rc := make(chan Stats, 1), make(chan Stats, 1)
go func() { basec <- (<-wr.baseReport.Stats()) }()
go func() { rc <- (<-wr.report.Stats()) }()
go wr.processResults()
donec <- wr.reweighStat(<-rc)
donec <- (<-basec)
}()
return donec
}
func (wr *weightedReport) processResults() {
defer close(wr.report.results)
defer close(wr.baseReport.Results())
for res := range wr.results {
wr.processResult(res)
wr.baseReport.Results() <- res
}
}
func (wr *weightedReport) processResult(res Result) {
if res.Err != nil {
wr.report.results <- res
return
}
if res.Weight == 0 {
res.Weight = 1.0
}
wr.weightTotal += res.Weight
res.End = res.Start.Add(time.Duration(float64(res.End.Sub(res.Start)) / res.Weight))
res.Weight = 1.0
wr.report.results <- res
}
func (wr *weightedReport) reweighStat(s Stats) Stats {
weightCoef := wr.weightTotal / float64(len(s.Lats))
// weight > 1 => processing more than one request
s.RPS *= weightCoef
s.AvgTotal *= weightCoef * weightCoef
return s
}

View File

@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// Package srv looks up DNS SRV records.
package srv
import (

View File

@ -26,7 +26,7 @@ import (
var (
// MinClusterVersion is the min cluster version this etcd binary is compatible with.
MinClusterVersion = "3.0.0"
Version = "3.2.0-rc.0+git"
Version = "3.2.0-rc.1+git"
APIVersion = "unknown"
// Git SHA Value will be set during build

View File

@ -1,3 +1,18 @@
// Copyright 2013-2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Semantic Versions http://semver.org
package semver
import (
@ -29,35 +44,21 @@ func splitOff(input *string, delim string) (val string) {
return val
}
func New(version string) *Version {
return Must(NewVersion(version))
}
func NewVersion(version string) (*Version, error) {
v := Version{}
dotParts := strings.SplitN(version, ".", 3)
if len(dotParts) != 3 {
return nil, errors.New(fmt.Sprintf("%s is not in dotted-tri format", version))
if err := v.Set(version); err != nil {
return nil, err
}
v.Metadata = splitOff(&dotParts[2], "+")
v.PreRelease = PreRelease(splitOff(&dotParts[2], "-"))
parsed := make([]int64, 3, 3)
for i, v := range dotParts[:3] {
val, err := strconv.ParseInt(v, 10, 64)
parsed[i] = val
if err != nil {
return nil, err
}
}
v.Major = parsed[0]
v.Minor = parsed[1]
v.Patch = parsed[2]
return &v, nil
}
// Must is a helper for wrapping NewVersion and will panic if err is not nil.
func Must(v *Version, err error) *Version {
if err != nil {
panic(err)
@ -65,45 +66,99 @@ func Must(v *Version, err error) *Version {
return v
}
func (v *Version) String() string {
// Set parses and updates v from the given version string. Implements flag.Value
func (v *Version) Set(version string) error {
metadata := splitOff(&version, "+")
preRelease := PreRelease(splitOff(&version, "-"))
dotParts := strings.SplitN(version, ".", 3)
if len(dotParts) != 3 {
return fmt.Errorf("%s is not in dotted-tri format", version)
}
parsed := make([]int64, 3, 3)
for i, v := range dotParts[:3] {
val, err := strconv.ParseInt(v, 10, 64)
parsed[i] = val
if err != nil {
return err
}
}
v.Metadata = metadata
v.PreRelease = preRelease
v.Major = parsed[0]
v.Minor = parsed[1]
v.Patch = parsed[2]
return nil
}
func (v Version) String() string {
var buffer bytes.Buffer
base := fmt.Sprintf("%d.%d.%d", v.Major, v.Minor, v.Patch)
buffer.WriteString(base)
fmt.Fprintf(&buffer, "%d.%d.%d", v.Major, v.Minor, v.Patch)
if v.PreRelease != "" {
buffer.WriteString(fmt.Sprintf("-%s", v.PreRelease))
fmt.Fprintf(&buffer, "-%s", v.PreRelease)
}
if v.Metadata != "" {
buffer.WriteString(fmt.Sprintf("+%s", v.Metadata))
fmt.Fprintf(&buffer, "+%s", v.Metadata)
}
return buffer.String()
}
func (v *Version) LessThan(versionB Version) bool {
versionA := *v
cmp := recursiveCompare(versionA.Slice(), versionB.Slice())
if cmp == 0 {
cmp = preReleaseCompare(versionA, versionB)
func (v *Version) UnmarshalYAML(unmarshal func(interface{}) error) error {
var data string
if err := unmarshal(&data); err != nil {
return err
}
if cmp == -1 {
return true
}
return false
return v.Set(data)
}
/* Slice converts the comparable parts of the semver into a slice of strings */
func (v *Version) Slice() []int64 {
func (v Version) MarshalJSON() ([]byte, error) {
return []byte(`"` + v.String() + `"`), nil
}
func (v *Version) UnmarshalJSON(data []byte) error {
l := len(data)
if l == 0 || string(data) == `""` {
return nil
}
if l < 2 || data[0] != '"' || data[l-1] != '"' {
return errors.New("invalid semver string")
}
return v.Set(string(data[1 : l-1]))
}
// Compare tests if v is less than, equal to, or greater than versionB,
// returning -1, 0, or +1 respectively.
func (v Version) Compare(versionB Version) int {
if cmp := recursiveCompare(v.Slice(), versionB.Slice()); cmp != 0 {
return cmp
}
return preReleaseCompare(v, versionB)
}
// Equal tests if v is equal to versionB.
func (v Version) Equal(versionB Version) bool {
return v.Compare(versionB) == 0
}
// LessThan tests if v is less than versionB.
func (v Version) LessThan(versionB Version) bool {
return v.Compare(versionB) < 0
}
// Slice converts the comparable parts of the semver into a slice of integers.
func (v Version) Slice() []int64 {
return []int64{v.Major, v.Minor, v.Patch}
}
func (p *PreRelease) Slice() []string {
preRelease := string(*p)
func (p PreRelease) Slice() []string {
preRelease := string(p)
return strings.Split(preRelease, ".")
}
@ -119,7 +174,7 @@ func preReleaseCompare(versionA Version, versionB Version) int {
return -1
}
// If there is a prelease, check and compare each part.
// If there is a prerelease, check and compare each part.
return recursivePreReleaseCompare(a.Slice(), b.Slice())
}
@ -141,9 +196,12 @@ func recursiveCompare(versionA []int64, versionB []int64) int {
}
func recursivePreReleaseCompare(versionA []string, versionB []string) int {
// Handle slice length disparity.
// A larger set of pre-release fields has a higher precedence than a smaller set,
// if all of the preceding identifiers are equal.
if len(versionA) == 0 {
// Nothing to compare too, so we return 0
if len(versionB) > 0 {
return -1
}
return 0
} else if len(versionB) == 0 {
// We're longer than versionB so return 1.
@ -153,7 +211,8 @@ func recursivePreReleaseCompare(versionA []string, versionB []string) int {
a := versionA[0]
b := versionB[0]
aInt := false; bInt := false
aInt := false
bInt := false
aI, err := strconv.Atoi(versionA[0])
if err == nil {

View File

@ -1,3 +1,17 @@
// Copyright 2013-2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package semver
import (

View File

@ -148,6 +148,7 @@ The enumprefix, getters and stringer extensions can be used to remove some of th
- goproto_stringer, if false, the message is generated without the default string method, this is useful for rather using stringer, or allowing you to write your own string method.
- goproto_extensions_map (beta), if false, the extensions field is generated as type []byte instead of type map[int32]proto.Extension
- goproto_unrecognized (beta), if false, XXX_unrecognized field is not generated. This is useful in conjunction with gogoproto.nullable=false, to generate structures completely devoid of pointers and reduce GC pressure at the cost of losing information about unrecognized fields.
- goproto_registration (beta), if true, the generated files will register all messages and types against both gogo/protobuf and golang/protobuf. This is necessary when using third-party packages which read registrations from golang/protobuf (such as the grpc-gateway).
Less Typing and Peace of Mind is explained in their specific plugin folders godoc:

View File

@ -34,6 +34,7 @@ var E_GoprotoEnumPrefix = &proto.ExtensionDesc{
Field: 62001,
Name: "gogoproto.goproto_enum_prefix",
Tag: "varint,62001,opt,name=goproto_enum_prefix,json=goprotoEnumPrefix",
Filename: "gogo.proto",
}
var E_GoprotoEnumStringer = &proto.ExtensionDesc{
@ -42,6 +43,7 @@ var E_GoprotoEnumStringer = &proto.ExtensionDesc{
Field: 62021,
Name: "gogoproto.goproto_enum_stringer",
Tag: "varint,62021,opt,name=goproto_enum_stringer,json=goprotoEnumStringer",
Filename: "gogo.proto",
}
var E_EnumStringer = &proto.ExtensionDesc{
@ -50,6 +52,7 @@ var E_EnumStringer = &proto.ExtensionDesc{
Field: 62022,
Name: "gogoproto.enum_stringer",
Tag: "varint,62022,opt,name=enum_stringer,json=enumStringer",
Filename: "gogo.proto",
}
var E_EnumCustomname = &proto.ExtensionDesc{
@ -58,6 +61,16 @@ var E_EnumCustomname = &proto.ExtensionDesc{
Field: 62023,
Name: "gogoproto.enum_customname",
Tag: "bytes,62023,opt,name=enum_customname,json=enumCustomname",
Filename: "gogo.proto",
}
var E_Enumdecl = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.EnumOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 62024,
Name: "gogoproto.enumdecl",
Tag: "varint,62024,opt,name=enumdecl",
Filename: "gogo.proto",
}
var E_EnumvalueCustomname = &proto.ExtensionDesc{
@ -66,6 +79,7 @@ var E_EnumvalueCustomname = &proto.ExtensionDesc{
Field: 66001,
Name: "gogoproto.enumvalue_customname",
Tag: "bytes,66001,opt,name=enumvalue_customname,json=enumvalueCustomname",
Filename: "gogo.proto",
}
var E_GoprotoGettersAll = &proto.ExtensionDesc{
@ -74,6 +88,7 @@ var E_GoprotoGettersAll = &proto.ExtensionDesc{
Field: 63001,
Name: "gogoproto.goproto_getters_all",
Tag: "varint,63001,opt,name=goproto_getters_all,json=goprotoGettersAll",
Filename: "gogo.proto",
}
var E_GoprotoEnumPrefixAll = &proto.ExtensionDesc{
@ -82,6 +97,7 @@ var E_GoprotoEnumPrefixAll = &proto.ExtensionDesc{
Field: 63002,
Name: "gogoproto.goproto_enum_prefix_all",
Tag: "varint,63002,opt,name=goproto_enum_prefix_all,json=goprotoEnumPrefixAll",
Filename: "gogo.proto",
}
var E_GoprotoStringerAll = &proto.ExtensionDesc{
@ -90,6 +106,7 @@ var E_GoprotoStringerAll = &proto.ExtensionDesc{
Field: 63003,
Name: "gogoproto.goproto_stringer_all",
Tag: "varint,63003,opt,name=goproto_stringer_all,json=goprotoStringerAll",
Filename: "gogo.proto",
}
var E_VerboseEqualAll = &proto.ExtensionDesc{
@ -98,6 +115,7 @@ var E_VerboseEqualAll = &proto.ExtensionDesc{
Field: 63004,
Name: "gogoproto.verbose_equal_all",
Tag: "varint,63004,opt,name=verbose_equal_all,json=verboseEqualAll",
Filename: "gogo.proto",
}
var E_FaceAll = &proto.ExtensionDesc{
@ -106,6 +124,7 @@ var E_FaceAll = &proto.ExtensionDesc{
Field: 63005,
Name: "gogoproto.face_all",
Tag: "varint,63005,opt,name=face_all,json=faceAll",
Filename: "gogo.proto",
}
var E_GostringAll = &proto.ExtensionDesc{
@ -114,6 +133,7 @@ var E_GostringAll = &proto.ExtensionDesc{
Field: 63006,
Name: "gogoproto.gostring_all",
Tag: "varint,63006,opt,name=gostring_all,json=gostringAll",
Filename: "gogo.proto",
}
var E_PopulateAll = &proto.ExtensionDesc{
@ -122,6 +142,7 @@ var E_PopulateAll = &proto.ExtensionDesc{
Field: 63007,
Name: "gogoproto.populate_all",
Tag: "varint,63007,opt,name=populate_all,json=populateAll",
Filename: "gogo.proto",
}
var E_StringerAll = &proto.ExtensionDesc{
@ -130,6 +151,7 @@ var E_StringerAll = &proto.ExtensionDesc{
Field: 63008,
Name: "gogoproto.stringer_all",
Tag: "varint,63008,opt,name=stringer_all,json=stringerAll",
Filename: "gogo.proto",
}
var E_OnlyoneAll = &proto.ExtensionDesc{
@ -138,6 +160,7 @@ var E_OnlyoneAll = &proto.ExtensionDesc{
Field: 63009,
Name: "gogoproto.onlyone_all",
Tag: "varint,63009,opt,name=onlyone_all,json=onlyoneAll",
Filename: "gogo.proto",
}
var E_EqualAll = &proto.ExtensionDesc{
@ -146,6 +169,7 @@ var E_EqualAll = &proto.ExtensionDesc{
Field: 63013,
Name: "gogoproto.equal_all",
Tag: "varint,63013,opt,name=equal_all,json=equalAll",
Filename: "gogo.proto",
}
var E_DescriptionAll = &proto.ExtensionDesc{
@ -154,6 +178,7 @@ var E_DescriptionAll = &proto.ExtensionDesc{
Field: 63014,
Name: "gogoproto.description_all",
Tag: "varint,63014,opt,name=description_all,json=descriptionAll",
Filename: "gogo.proto",
}
var E_TestgenAll = &proto.ExtensionDesc{
@ -162,6 +187,7 @@ var E_TestgenAll = &proto.ExtensionDesc{
Field: 63015,
Name: "gogoproto.testgen_all",
Tag: "varint,63015,opt,name=testgen_all,json=testgenAll",
Filename: "gogo.proto",
}
var E_BenchgenAll = &proto.ExtensionDesc{
@ -170,6 +196,7 @@ var E_BenchgenAll = &proto.ExtensionDesc{
Field: 63016,
Name: "gogoproto.benchgen_all",
Tag: "varint,63016,opt,name=benchgen_all,json=benchgenAll",
Filename: "gogo.proto",
}
var E_MarshalerAll = &proto.ExtensionDesc{
@ -178,6 +205,7 @@ var E_MarshalerAll = &proto.ExtensionDesc{
Field: 63017,
Name: "gogoproto.marshaler_all",
Tag: "varint,63017,opt,name=marshaler_all,json=marshalerAll",
Filename: "gogo.proto",
}
var E_UnmarshalerAll = &proto.ExtensionDesc{
@ -186,6 +214,7 @@ var E_UnmarshalerAll = &proto.ExtensionDesc{
Field: 63018,
Name: "gogoproto.unmarshaler_all",
Tag: "varint,63018,opt,name=unmarshaler_all,json=unmarshalerAll",
Filename: "gogo.proto",
}
var E_StableMarshalerAll = &proto.ExtensionDesc{
@ -194,6 +223,7 @@ var E_StableMarshalerAll = &proto.ExtensionDesc{
Field: 63019,
Name: "gogoproto.stable_marshaler_all",
Tag: "varint,63019,opt,name=stable_marshaler_all,json=stableMarshalerAll",
Filename: "gogo.proto",
}
var E_SizerAll = &proto.ExtensionDesc{
@ -202,6 +232,7 @@ var E_SizerAll = &proto.ExtensionDesc{
Field: 63020,
Name: "gogoproto.sizer_all",
Tag: "varint,63020,opt,name=sizer_all,json=sizerAll",
Filename: "gogo.proto",
}
var E_GoprotoEnumStringerAll = &proto.ExtensionDesc{
@ -210,6 +241,7 @@ var E_GoprotoEnumStringerAll = &proto.ExtensionDesc{
Field: 63021,
Name: "gogoproto.goproto_enum_stringer_all",
Tag: "varint,63021,opt,name=goproto_enum_stringer_all,json=goprotoEnumStringerAll",
Filename: "gogo.proto",
}
var E_EnumStringerAll = &proto.ExtensionDesc{
@ -218,6 +250,7 @@ var E_EnumStringerAll = &proto.ExtensionDesc{
Field: 63022,
Name: "gogoproto.enum_stringer_all",
Tag: "varint,63022,opt,name=enum_stringer_all,json=enumStringerAll",
Filename: "gogo.proto",
}
var E_UnsafeMarshalerAll = &proto.ExtensionDesc{
@ -226,6 +259,7 @@ var E_UnsafeMarshalerAll = &proto.ExtensionDesc{
Field: 63023,
Name: "gogoproto.unsafe_marshaler_all",
Tag: "varint,63023,opt,name=unsafe_marshaler_all,json=unsafeMarshalerAll",
Filename: "gogo.proto",
}
var E_UnsafeUnmarshalerAll = &proto.ExtensionDesc{
@ -234,6 +268,7 @@ var E_UnsafeUnmarshalerAll = &proto.ExtensionDesc{
Field: 63024,
Name: "gogoproto.unsafe_unmarshaler_all",
Tag: "varint,63024,opt,name=unsafe_unmarshaler_all,json=unsafeUnmarshalerAll",
Filename: "gogo.proto",
}
var E_GoprotoExtensionsMapAll = &proto.ExtensionDesc{
@ -242,6 +277,7 @@ var E_GoprotoExtensionsMapAll = &proto.ExtensionDesc{
Field: 63025,
Name: "gogoproto.goproto_extensions_map_all",
Tag: "varint,63025,opt,name=goproto_extensions_map_all,json=goprotoExtensionsMapAll",
Filename: "gogo.proto",
}
var E_GoprotoUnrecognizedAll = &proto.ExtensionDesc{
@ -250,6 +286,7 @@ var E_GoprotoUnrecognizedAll = &proto.ExtensionDesc{
Field: 63026,
Name: "gogoproto.goproto_unrecognized_all",
Tag: "varint,63026,opt,name=goproto_unrecognized_all,json=goprotoUnrecognizedAll",
Filename: "gogo.proto",
}
var E_GogoprotoImport = &proto.ExtensionDesc{
@ -258,6 +295,7 @@ var E_GogoprotoImport = &proto.ExtensionDesc{
Field: 63027,
Name: "gogoproto.gogoproto_import",
Tag: "varint,63027,opt,name=gogoproto_import,json=gogoprotoImport",
Filename: "gogo.proto",
}
var E_ProtosizerAll = &proto.ExtensionDesc{
@ -266,6 +304,7 @@ var E_ProtosizerAll = &proto.ExtensionDesc{
Field: 63028,
Name: "gogoproto.protosizer_all",
Tag: "varint,63028,opt,name=protosizer_all,json=protosizerAll",
Filename: "gogo.proto",
}
var E_CompareAll = &proto.ExtensionDesc{
@ -274,6 +313,34 @@ var E_CompareAll = &proto.ExtensionDesc{
Field: 63029,
Name: "gogoproto.compare_all",
Tag: "varint,63029,opt,name=compare_all,json=compareAll",
Filename: "gogo.proto",
}
var E_TypedeclAll = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.FileOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 63030,
Name: "gogoproto.typedecl_all",
Tag: "varint,63030,opt,name=typedecl_all,json=typedeclAll",
Filename: "gogo.proto",
}
var E_EnumdeclAll = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.FileOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 63031,
Name: "gogoproto.enumdecl_all",
Tag: "varint,63031,opt,name=enumdecl_all,json=enumdeclAll",
Filename: "gogo.proto",
}
var E_GoprotoRegistration = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.FileOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 63032,
Name: "gogoproto.goproto_registration",
Tag: "varint,63032,opt,name=goproto_registration,json=goprotoRegistration",
Filename: "gogo.proto",
}
var E_GoprotoGetters = &proto.ExtensionDesc{
@ -282,6 +349,7 @@ var E_GoprotoGetters = &proto.ExtensionDesc{
Field: 64001,
Name: "gogoproto.goproto_getters",
Tag: "varint,64001,opt,name=goproto_getters,json=goprotoGetters",
Filename: "gogo.proto",
}
var E_GoprotoStringer = &proto.ExtensionDesc{
@ -290,6 +358,7 @@ var E_GoprotoStringer = &proto.ExtensionDesc{
Field: 64003,
Name: "gogoproto.goproto_stringer",
Tag: "varint,64003,opt,name=goproto_stringer,json=goprotoStringer",
Filename: "gogo.proto",
}
var E_VerboseEqual = &proto.ExtensionDesc{
@ -298,6 +367,7 @@ var E_VerboseEqual = &proto.ExtensionDesc{
Field: 64004,
Name: "gogoproto.verbose_equal",
Tag: "varint,64004,opt,name=verbose_equal,json=verboseEqual",
Filename: "gogo.proto",
}
var E_Face = &proto.ExtensionDesc{
@ -306,6 +376,7 @@ var E_Face = &proto.ExtensionDesc{
Field: 64005,
Name: "gogoproto.face",
Tag: "varint,64005,opt,name=face",
Filename: "gogo.proto",
}
var E_Gostring = &proto.ExtensionDesc{
@ -314,6 +385,7 @@ var E_Gostring = &proto.ExtensionDesc{
Field: 64006,
Name: "gogoproto.gostring",
Tag: "varint,64006,opt,name=gostring",
Filename: "gogo.proto",
}
var E_Populate = &proto.ExtensionDesc{
@ -322,6 +394,7 @@ var E_Populate = &proto.ExtensionDesc{
Field: 64007,
Name: "gogoproto.populate",
Tag: "varint,64007,opt,name=populate",
Filename: "gogo.proto",
}
var E_Stringer = &proto.ExtensionDesc{
@ -330,6 +403,7 @@ var E_Stringer = &proto.ExtensionDesc{
Field: 67008,
Name: "gogoproto.stringer",
Tag: "varint,67008,opt,name=stringer",
Filename: "gogo.proto",
}
var E_Onlyone = &proto.ExtensionDesc{
@ -338,6 +412,7 @@ var E_Onlyone = &proto.ExtensionDesc{
Field: 64009,
Name: "gogoproto.onlyone",
Tag: "varint,64009,opt,name=onlyone",
Filename: "gogo.proto",
}
var E_Equal = &proto.ExtensionDesc{
@ -346,6 +421,7 @@ var E_Equal = &proto.ExtensionDesc{
Field: 64013,
Name: "gogoproto.equal",
Tag: "varint,64013,opt,name=equal",
Filename: "gogo.proto",
}
var E_Description = &proto.ExtensionDesc{
@ -354,6 +430,7 @@ var E_Description = &proto.ExtensionDesc{
Field: 64014,
Name: "gogoproto.description",
Tag: "varint,64014,opt,name=description",
Filename: "gogo.proto",
}
var E_Testgen = &proto.ExtensionDesc{
@ -362,6 +439,7 @@ var E_Testgen = &proto.ExtensionDesc{
Field: 64015,
Name: "gogoproto.testgen",
Tag: "varint,64015,opt,name=testgen",
Filename: "gogo.proto",
}
var E_Benchgen = &proto.ExtensionDesc{
@ -370,6 +448,7 @@ var E_Benchgen = &proto.ExtensionDesc{
Field: 64016,
Name: "gogoproto.benchgen",
Tag: "varint,64016,opt,name=benchgen",
Filename: "gogo.proto",
}
var E_Marshaler = &proto.ExtensionDesc{
@ -378,6 +457,7 @@ var E_Marshaler = &proto.ExtensionDesc{
Field: 64017,
Name: "gogoproto.marshaler",
Tag: "varint,64017,opt,name=marshaler",
Filename: "gogo.proto",
}
var E_Unmarshaler = &proto.ExtensionDesc{
@ -386,6 +466,7 @@ var E_Unmarshaler = &proto.ExtensionDesc{
Field: 64018,
Name: "gogoproto.unmarshaler",
Tag: "varint,64018,opt,name=unmarshaler",
Filename: "gogo.proto",
}
var E_StableMarshaler = &proto.ExtensionDesc{
@ -394,6 +475,7 @@ var E_StableMarshaler = &proto.ExtensionDesc{
Field: 64019,
Name: "gogoproto.stable_marshaler",
Tag: "varint,64019,opt,name=stable_marshaler,json=stableMarshaler",
Filename: "gogo.proto",
}
var E_Sizer = &proto.ExtensionDesc{
@ -402,6 +484,7 @@ var E_Sizer = &proto.ExtensionDesc{
Field: 64020,
Name: "gogoproto.sizer",
Tag: "varint,64020,opt,name=sizer",
Filename: "gogo.proto",
}
var E_UnsafeMarshaler = &proto.ExtensionDesc{
@ -410,6 +493,7 @@ var E_UnsafeMarshaler = &proto.ExtensionDesc{
Field: 64023,
Name: "gogoproto.unsafe_marshaler",
Tag: "varint,64023,opt,name=unsafe_marshaler,json=unsafeMarshaler",
Filename: "gogo.proto",
}
var E_UnsafeUnmarshaler = &proto.ExtensionDesc{
@ -418,6 +502,7 @@ var E_UnsafeUnmarshaler = &proto.ExtensionDesc{
Field: 64024,
Name: "gogoproto.unsafe_unmarshaler",
Tag: "varint,64024,opt,name=unsafe_unmarshaler,json=unsafeUnmarshaler",
Filename: "gogo.proto",
}
var E_GoprotoExtensionsMap = &proto.ExtensionDesc{
@ -426,6 +511,7 @@ var E_GoprotoExtensionsMap = &proto.ExtensionDesc{
Field: 64025,
Name: "gogoproto.goproto_extensions_map",
Tag: "varint,64025,opt,name=goproto_extensions_map,json=goprotoExtensionsMap",
Filename: "gogo.proto",
}
var E_GoprotoUnrecognized = &proto.ExtensionDesc{
@ -434,6 +520,7 @@ var E_GoprotoUnrecognized = &proto.ExtensionDesc{
Field: 64026,
Name: "gogoproto.goproto_unrecognized",
Tag: "varint,64026,opt,name=goproto_unrecognized,json=goprotoUnrecognized",
Filename: "gogo.proto",
}
var E_Protosizer = &proto.ExtensionDesc{
@ -442,6 +529,7 @@ var E_Protosizer = &proto.ExtensionDesc{
Field: 64028,
Name: "gogoproto.protosizer",
Tag: "varint,64028,opt,name=protosizer",
Filename: "gogo.proto",
}
var E_Compare = &proto.ExtensionDesc{
@ -450,6 +538,16 @@ var E_Compare = &proto.ExtensionDesc{
Field: 64029,
Name: "gogoproto.compare",
Tag: "varint,64029,opt,name=compare",
Filename: "gogo.proto",
}
var E_Typedecl = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.MessageOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 64030,
Name: "gogoproto.typedecl",
Tag: "varint,64030,opt,name=typedecl",
Filename: "gogo.proto",
}
var E_Nullable = &proto.ExtensionDesc{
@ -458,6 +556,7 @@ var E_Nullable = &proto.ExtensionDesc{
Field: 65001,
Name: "gogoproto.nullable",
Tag: "varint,65001,opt,name=nullable",
Filename: "gogo.proto",
}
var E_Embed = &proto.ExtensionDesc{
@ -466,6 +565,7 @@ var E_Embed = &proto.ExtensionDesc{
Field: 65002,
Name: "gogoproto.embed",
Tag: "varint,65002,opt,name=embed",
Filename: "gogo.proto",
}
var E_Customtype = &proto.ExtensionDesc{
@ -474,6 +574,7 @@ var E_Customtype = &proto.ExtensionDesc{
Field: 65003,
Name: "gogoproto.customtype",
Tag: "bytes,65003,opt,name=customtype",
Filename: "gogo.proto",
}
var E_Customname = &proto.ExtensionDesc{
@ -482,6 +583,7 @@ var E_Customname = &proto.ExtensionDesc{
Field: 65004,
Name: "gogoproto.customname",
Tag: "bytes,65004,opt,name=customname",
Filename: "gogo.proto",
}
var E_Jsontag = &proto.ExtensionDesc{
@ -490,6 +592,7 @@ var E_Jsontag = &proto.ExtensionDesc{
Field: 65005,
Name: "gogoproto.jsontag",
Tag: "bytes,65005,opt,name=jsontag",
Filename: "gogo.proto",
}
var E_Moretags = &proto.ExtensionDesc{
@ -498,6 +601,7 @@ var E_Moretags = &proto.ExtensionDesc{
Field: 65006,
Name: "gogoproto.moretags",
Tag: "bytes,65006,opt,name=moretags",
Filename: "gogo.proto",
}
var E_Casttype = &proto.ExtensionDesc{
@ -506,6 +610,7 @@ var E_Casttype = &proto.ExtensionDesc{
Field: 65007,
Name: "gogoproto.casttype",
Tag: "bytes,65007,opt,name=casttype",
Filename: "gogo.proto",
}
var E_Castkey = &proto.ExtensionDesc{
@ -514,6 +619,7 @@ var E_Castkey = &proto.ExtensionDesc{
Field: 65008,
Name: "gogoproto.castkey",
Tag: "bytes,65008,opt,name=castkey",
Filename: "gogo.proto",
}
var E_Castvalue = &proto.ExtensionDesc{
@ -522,6 +628,7 @@ var E_Castvalue = &proto.ExtensionDesc{
Field: 65009,
Name: "gogoproto.castvalue",
Tag: "bytes,65009,opt,name=castvalue",
Filename: "gogo.proto",
}
var E_Stdtime = &proto.ExtensionDesc{
@ -530,6 +637,7 @@ var E_Stdtime = &proto.ExtensionDesc{
Field: 65010,
Name: "gogoproto.stdtime",
Tag: "varint,65010,opt,name=stdtime",
Filename: "gogo.proto",
}
var E_Stdduration = &proto.ExtensionDesc{
@ -538,6 +646,7 @@ var E_Stdduration = &proto.ExtensionDesc{
Field: 65011,
Name: "gogoproto.stdduration",
Tag: "varint,65011,opt,name=stdduration",
Filename: "gogo.proto",
}
func init() {
@ -545,6 +654,7 @@ func init() {
proto.RegisterExtension(E_GoprotoEnumStringer)
proto.RegisterExtension(E_EnumStringer)
proto.RegisterExtension(E_EnumCustomname)
proto.RegisterExtension(E_Enumdecl)
proto.RegisterExtension(E_EnumvalueCustomname)
proto.RegisterExtension(E_GoprotoGettersAll)
proto.RegisterExtension(E_GoprotoEnumPrefixAll)
@ -572,6 +682,9 @@ func init() {
proto.RegisterExtension(E_GogoprotoImport)
proto.RegisterExtension(E_ProtosizerAll)
proto.RegisterExtension(E_CompareAll)
proto.RegisterExtension(E_TypedeclAll)
proto.RegisterExtension(E_EnumdeclAll)
proto.RegisterExtension(E_GoprotoRegistration)
proto.RegisterExtension(E_GoprotoGetters)
proto.RegisterExtension(E_GoprotoStringer)
proto.RegisterExtension(E_VerboseEqual)
@ -594,6 +707,7 @@ func init() {
proto.RegisterExtension(E_GoprotoUnrecognized)
proto.RegisterExtension(E_Protosizer)
proto.RegisterExtension(E_Compare)
proto.RegisterExtension(E_Typedecl)
proto.RegisterExtension(E_Nullable)
proto.RegisterExtension(E_Embed)
proto.RegisterExtension(E_Customtype)
@ -610,76 +724,81 @@ func init() {
func init() { proto.RegisterFile("gogo.proto", fileDescriptorGogo) }
var fileDescriptorGogo = []byte{
// 1129 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x94, 0x97, 0xc9, 0x6f, 0x1c, 0x45,
0x14, 0x87, 0x85, 0x70, 0x64, 0xcf, 0xf3, 0x86, 0xc7, 0xc6, 0x84, 0x08, 0x44, 0x72, 0xe3, 0xe4,
0x9c, 0x22, 0x94, 0xb2, 0x22, 0xcb, 0xb1, 0x9c, 0x51, 0x10, 0x86, 0x91, 0x89, 0x03, 0x88, 0xc3,
0xa8, 0x67, 0xa6, 0xdc, 0x69, 0xe8, 0xee, 0x6a, 0xba, 0xaa, 0xa3, 0x38, 0x37, 0x14, 0x16, 0x21,
0xc4, 0x8e, 0x04, 0x09, 0x09, 0xcb, 0x81, 0x7d, 0x0d, 0xcb, 0x9d, 0x0b, 0x70, 0xe6, 0x7f, 0xe0,
0x02, 0x84, 0xdd, 0x37, 0x5f, 0x50, 0x75, 0xbf, 0xd7, 0x53, 0xdd, 0x1e, 0xa9, 0x6a, 0x6e, 0xe3,
0x71, 0x7d, 0xdf, 0x54, 0xbf, 0x37, 0xf5, 0x7e, 0x53, 0x00, 0xbe, 0xf0, 0xc5, 0x52, 0x92, 0x0a,
0x25, 0x9a, 0x0d, 0xfd, 0x3a, 0x7f, 0x79, 0xe8, 0xb0, 0x2f, 0x84, 0x1f, 0xf2, 0xa3, 0xf9, 0x5f,
0xdd, 0x6c, 0xfb, 0x68, 0x9f, 0xcb, 0x5e, 0x1a, 0x24, 0x4a, 0xa4, 0xc5, 0x62, 0x76, 0x3f, 0xcc,
0xe3, 0xe2, 0x0e, 0x8f, 0xb3, 0xa8, 0x93, 0xa4, 0x7c, 0x3b, 0xb8, 0xd0, 0xbc, 0x63, 0xa9, 0x20,
0x97, 0x88, 0x5c, 0x5a, 0x8f, 0xb3, 0xe8, 0x81, 0x44, 0x05, 0x22, 0x96, 0x07, 0xaf, 0xff, 0x72,
0xf3, 0xe1, 0x9b, 0xee, 0x9e, 0xd8, 0x9c, 0x43, 0x54, 0xff, 0xaf, 0x9d, 0x83, 0x6c, 0x13, 0x6e,
0xad, 0xf8, 0xa4, 0x4a, 0x83, 0xd8, 0xe7, 0xa9, 0xc5, 0xf8, 0x03, 0x1a, 0xe7, 0x0d, 0xe3, 0x83,
0x88, 0xb2, 0x35, 0x98, 0x1e, 0xc5, 0xf5, 0x23, 0xba, 0xa6, 0xb8, 0x29, 0x69, 0xc1, 0x6c, 0x2e,
0xe9, 0x65, 0x52, 0x89, 0x28, 0xf6, 0x22, 0x6e, 0xd1, 0xfc, 0x94, 0x6b, 0x1a, 0x9b, 0x33, 0x1a,
0x5b, 0x2b, 0x29, 0x76, 0x16, 0x16, 0xf4, 0x3b, 0xe7, 0xbd, 0x30, 0xe3, 0xa6, 0xed, 0xc8, 0x50,
0xdb, 0x59, 0xbd, 0x8c, 0x94, 0x3f, 0x5f, 0x1a, 0xcb, 0x95, 0xf3, 0xa5, 0xc0, 0xf0, 0x1a, 0x9d,
0xf0, 0xb9, 0x52, 0x3c, 0x95, 0x1d, 0x2f, 0x0c, 0x87, 0x6c, 0xf2, 0x54, 0x10, 0x96, 0xc6, 0xcb,
0x37, 0xaa, 0x9d, 0x68, 0x15, 0xe4, 0x6a, 0x18, 0xb2, 0x2d, 0xb8, 0x6d, 0x48, 0x67, 0x1d, 0x9c,
0x57, 0xd0, 0xb9, 0xb0, 0xaf, 0xbb, 0x5a, 0xdb, 0x06, 0x7a, 0xbf, 0xec, 0x87, 0x83, 0xf3, 0x2d,
0x74, 0x36, 0x91, 0xa5, 0xb6, 0x68, 0xe3, 0xbd, 0x30, 0x77, 0x9e, 0xa7, 0x5d, 0x21, 0x79, 0x87,
0x3f, 0x91, 0x79, 0xa1, 0x83, 0xee, 0x2a, 0xea, 0x66, 0x11, 0x5c, 0xd7, 0x9c, 0x76, 0x1d, 0x87,
0x89, 0x6d, 0xaf, 0xc7, 0x1d, 0x14, 0xd7, 0x50, 0x31, 0xae, 0xd7, 0x6b, 0x74, 0x15, 0xa6, 0x7c,
0x51, 0x3c, 0x92, 0x03, 0xfe, 0x36, 0xe2, 0x93, 0xc4, 0xa0, 0x22, 0x11, 0x49, 0x16, 0x7a, 0xca,
0x65, 0x07, 0xef, 0x90, 0x82, 0x18, 0x54, 0x8c, 0x50, 0xd6, 0x77, 0x49, 0x21, 0x8d, 0x7a, 0xae,
0xc0, 0xa4, 0x88, 0xc3, 0x1d, 0x11, 0xbb, 0x6c, 0xe2, 0x3d, 0x34, 0x00, 0x22, 0x5a, 0xb0, 0x0c,
0x0d, 0xd7, 0x46, 0xbc, 0x8f, 0xf8, 0x04, 0xa7, 0x0e, 0xb4, 0x60, 0x96, 0x86, 0x4c, 0x20, 0x62,
0x07, 0xc5, 0x07, 0xa8, 0x98, 0x31, 0x30, 0x7c, 0x0c, 0xc5, 0xa5, 0xf2, 0xb9, 0x8b, 0xe4, 0x43,
0x7a, 0x0c, 0x44, 0xb0, 0x94, 0x5d, 0x1e, 0xf7, 0xce, 0xb9, 0x19, 0x3e, 0xa2, 0x52, 0x12, 0xa3,
0x15, 0x6b, 0x30, 0x1d, 0x79, 0xa9, 0x3c, 0xe7, 0x85, 0x4e, 0xed, 0xf8, 0x18, 0x1d, 0x53, 0x25,
0x84, 0x15, 0xc9, 0xe2, 0x51, 0x34, 0x9f, 0x50, 0x45, 0x0c, 0x0c, 0x8f, 0x9e, 0x54, 0x5e, 0x37,
0xe4, 0x9d, 0x51, 0x6c, 0x9f, 0xd2, 0xd1, 0x2b, 0xd8, 0x0d, 0xd3, 0xb8, 0x0c, 0x0d, 0x19, 0x5c,
0x74, 0xd2, 0x7c, 0x46, 0x9d, 0xce, 0x01, 0x0d, 0x3f, 0x02, 0xb7, 0x0f, 0x1d, 0xf5, 0x0e, 0xb2,
0xcf, 0x51, 0xb6, 0x38, 0x64, 0xdc, 0xe3, 0x48, 0x18, 0x55, 0xf9, 0x05, 0x8d, 0x04, 0x5e, 0x73,
0xb5, 0x61, 0x21, 0x8b, 0xa5, 0xb7, 0x3d, 0x5a, 0xd5, 0xbe, 0xa4, 0xaa, 0x15, 0x6c, 0xa5, 0x6a,
0x67, 0x60, 0x11, 0x8d, 0xa3, 0xf5, 0xf5, 0x2b, 0x1a, 0xac, 0x05, 0xbd, 0x55, 0xed, 0xee, 0xa3,
0x70, 0xa8, 0x2c, 0xe7, 0x05, 0xc5, 0x63, 0xa9, 0x99, 0x4e, 0xe4, 0x25, 0x0e, 0xe6, 0xeb, 0x68,
0xa6, 0x89, 0xbf, 0x5e, 0x0a, 0x36, 0xbc, 0x44, 0xcb, 0x1f, 0x86, 0x83, 0x24, 0xcf, 0xe2, 0x94,
0xf7, 0x84, 0x1f, 0x07, 0x17, 0x79, 0xdf, 0x41, 0xfd, 0x75, 0xad, 0x55, 0x5b, 0x06, 0xae, 0xcd,
0xa7, 0xe1, 0x96, 0xf2, 0xf7, 0x46, 0x27, 0x88, 0x12, 0x91, 0x2a, 0x8b, 0xf1, 0x1b, 0xea, 0x54,
0xc9, 0x9d, 0xce, 0x31, 0xb6, 0x0e, 0x33, 0xf9, 0x9f, 0xae, 0x5f, 0xc9, 0x6f, 0x51, 0x34, 0x3d,
0xa0, 0x70, 0x70, 0xf4, 0x44, 0x94, 0x78, 0xa9, 0xcb, 0xfc, 0xfb, 0x8e, 0x06, 0x07, 0x22, 0xc5,
0xb7, 0x6f, 0xb6, 0x96, 0xc4, 0xcd, 0xbb, 0xf6, 0x49, 0x36, 0xb8, 0x94, 0x9e, 0x5f, 0x7a, 0x9e,
0xdc, 0xc5, 0x33, 0x5b, 0x0d, 0x62, 0x76, 0x9f, 0x2e, 0x4f, 0x35, 0x2e, 0xed, 0xb2, 0x4b, 0xbb,
0x65, 0x85, 0x2a, 0x69, 0xc9, 0x4e, 0xc1, 0x74, 0x25, 0x2a, 0xed, 0xaa, 0xa7, 0x50, 0x35, 0x65,
0x26, 0x25, 0x3b, 0x06, 0x63, 0x3a, 0xf6, 0xec, 0xf8, 0xd3, 0x88, 0xe7, 0xcb, 0xd9, 0x09, 0x98,
0xa0, 0xb8, 0xb3, 0xa3, 0xcf, 0x20, 0x5a, 0x22, 0x1a, 0xa7, 0xa8, 0xb3, 0xe3, 0xcf, 0x12, 0x4e,
0x88, 0xc6, 0xdd, 0x4b, 0xf8, 0xfd, 0xf3, 0x63, 0x38, 0xae, 0xa8, 0x76, 0xcb, 0x30, 0x8e, 0x19,
0x67, 0xa7, 0x9f, 0xc3, 0x0f, 0x27, 0x82, 0xdd, 0x03, 0x07, 0x1c, 0x0b, 0xfe, 0x02, 0xa2, 0xc5,
0x7a, 0xb6, 0x06, 0x93, 0x46, 0xae, 0xd9, 0xf1, 0x17, 0x11, 0x37, 0x29, 0xbd, 0x75, 0xcc, 0x35,
0xbb, 0xe0, 0x25, 0xda, 0x3a, 0x12, 0xba, 0x6c, 0x14, 0x69, 0x76, 0xfa, 0x65, 0xaa, 0x3a, 0x21,
0x6c, 0x05, 0x1a, 0xe5, 0x98, 0xb2, 0xf3, 0xaf, 0x20, 0x3f, 0x60, 0x74, 0x05, 0x8c, 0x31, 0x69,
0x57, 0xbc, 0x4a, 0x15, 0x30, 0x28, 0x7d, 0x8c, 0xea, 0xd1, 0x67, 0x37, 0xbd, 0x46, 0xc7, 0xa8,
0x96, 0x7c, 0xba, 0x9b, 0xf9, 0xb4, 0xb0, 0x2b, 0x5e, 0xa7, 0x6e, 0xe6, 0xeb, 0xf5, 0x36, 0xea,
0x59, 0x62, 0x77, 0xbc, 0x41, 0xdb, 0xa8, 0x45, 0x09, 0x6b, 0x43, 0x73, 0x7f, 0x8e, 0xd8, 0x7d,
0x6f, 0xa2, 0x6f, 0x6e, 0x5f, 0x8c, 0xb0, 0x87, 0x60, 0x71, 0x78, 0x86, 0xd8, 0xad, 0x97, 0x77,
0x6b, 0xbf, 0xfa, 0xcd, 0x08, 0x61, 0x67, 0x06, 0xbf, 0xfa, 0xcd, 0xfc, 0xb0, 0x6b, 0xaf, 0xec,
0x56, 0x2f, 0x76, 0x66, 0x7c, 0xb0, 0x55, 0x80, 0xc1, 0xe8, 0xb6, 0xbb, 0xae, 0xa2, 0xcb, 0x80,
0xf4, 0xd1, 0xc0, 0xc9, 0x6d, 0xe7, 0xaf, 0xd1, 0xd1, 0x40, 0x82, 0x2d, 0xc3, 0x44, 0x9c, 0x85,
0xa1, 0xfe, 0x72, 0x34, 0xef, 0x1c, 0x12, 0x13, 0x3c, 0xec, 0x13, 0xfb, 0xeb, 0x1e, 0x1e, 0x0c,
0x02, 0xd8, 0x31, 0x38, 0xc0, 0xa3, 0x2e, 0xef, 0xdb, 0xc8, 0xdf, 0xf6, 0x68, 0x20, 0xe8, 0xd5,
0x6c, 0x05, 0xa0, 0xb8, 0x34, 0xaa, 0x9d, 0xc4, 0xfa, 0xa9, 0xbf, 0xef, 0x15, 0x77, 0x50, 0x03,
0x19, 0x08, 0xf2, 0x5b, 0xa7, 0x45, 0x70, 0xa3, 0x2a, 0xc8, 0x2f, 0x9a, 0xc7, 0x61, 0xfc, 0x31,
0x29, 0x62, 0xe5, 0xf9, 0x36, 0xfa, 0x0f, 0xa4, 0x69, 0xbd, 0x2e, 0x58, 0x24, 0x52, 0xae, 0x3c,
0x5f, 0xda, 0xd8, 0x3f, 0x91, 0x2d, 0x01, 0x0d, 0xf7, 0x3c, 0xa9, 0x5c, 0x9e, 0xfb, 0x2f, 0x82,
0x09, 0xd0, 0x9b, 0xd6, 0xaf, 0x1f, 0xe7, 0x3b, 0x36, 0xf6, 0x6f, 0xda, 0x34, 0xae, 0x67, 0x27,
0xa0, 0xa1, 0x5f, 0xe6, 0xf7, 0x6d, 0x1b, 0xfc, 0x0f, 0xc2, 0x03, 0x42, 0x7f, 0xb2, 0x54, 0x7d,
0x15, 0xd8, 0x8b, 0xfd, 0x2f, 0x76, 0x9a, 0xd6, 0xb3, 0x55, 0x98, 0x94, 0xaa, 0xdf, 0xcf, 0x52,
0x2f, 0x1f, 0xfe, 0x16, 0xfc, 0xbf, 0xbd, 0xf2, 0x32, 0x57, 0x32, 0x27, 0x8f, 0xc0, 0x7c, 0x4f,
0x44, 0x75, 0xf0, 0x24, 0xb4, 0x44, 0x4b, 0xb4, 0xf3, 0x63, 0xf0, 0x7f, 0x00, 0x00, 0x00, 0xff,
0xff, 0x3f, 0x9b, 0x2b, 0x54, 0xfc, 0x11, 0x00, 0x00,
// 1201 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x98, 0xcb, 0x6f, 0x1c, 0x45,
0x13, 0xc0, 0xf5, 0xe9, 0x73, 0x64, 0x6f, 0xf9, 0x85, 0xd7, 0xc6, 0x84, 0x08, 0x44, 0x72, 0xe3,
0xe4, 0x9c, 0x22, 0x94, 0xb6, 0x22, 0xcb, 0xb1, 0x1c, 0x2b, 0x11, 0x06, 0x63, 0xe2, 0x00, 0xe2,
0xb0, 0x9a, 0xdd, 0x6d, 0x4f, 0x06, 0x66, 0xa6, 0x87, 0x99, 0x9e, 0x28, 0xce, 0x0d, 0x85, 0x87,
0x10, 0xe2, 0x8d, 0x04, 0x09, 0x49, 0x80, 0x03, 0xef, 0x67, 0x78, 0x1f, 0xb9, 0xf0, 0xb8, 0xf2,
0x3f, 0x70, 0x01, 0xcc, 0xdb, 0x37, 0x5f, 0x50, 0xcd, 0x56, 0xcd, 0xf6, 0xac, 0x57, 0xea, 0xde,
0xdb, 0xec, 0xba, 0x7f, 0xbf, 0xad, 0xa9, 0x9a, 0xae, 0xea, 0x31, 0x80, 0xaf, 0x7c, 0x35, 0x97,
0xa4, 0x4a, 0xab, 0x7a, 0x0d, 0xaf, 0x8b, 0xcb, 0x03, 0x07, 0x7d, 0xa5, 0xfc, 0x50, 0x1e, 0x2e,
0x3e, 0x35, 0xf3, 0xcd, 0xc3, 0x6d, 0x99, 0xb5, 0xd2, 0x20, 0xd1, 0x2a, 0xed, 0x2c, 0x16, 0x77,
0xc1, 0x34, 0x2d, 0x6e, 0xc8, 0x38, 0x8f, 0x1a, 0x49, 0x2a, 0x37, 0x83, 0xf3, 0xf5, 0x5b, 0xe6,
0x3a, 0xe4, 0x1c, 0x93, 0x73, 0xcb, 0x71, 0x1e, 0xdd, 0x9d, 0xe8, 0x40, 0xc5, 0xd9, 0xfe, 0xeb,
0x3f, 0xff, 0xff, 0xe0, 0xff, 0x6e, 0x1f, 0x59, 0x9f, 0x22, 0x14, 0xff, 0xb6, 0x56, 0x80, 0x62,
0x1d, 0x6e, 0xac, 0xf8, 0x32, 0x9d, 0x06, 0xb1, 0x2f, 0x53, 0x8b, 0xf1, 0x3b, 0x32, 0x4e, 0x1b,
0xc6, 0x7b, 0x09, 0x15, 0x4b, 0x30, 0x3e, 0x88, 0xeb, 0x7b, 0x72, 0x8d, 0x49, 0x53, 0xb2, 0x02,
0x93, 0x85, 0xa4, 0x95, 0x67, 0x5a, 0x45, 0xb1, 0x17, 0x49, 0x8b, 0xe6, 0x87, 0x42, 0x53, 0x5b,
0x9f, 0x40, 0x6c, 0xa9, 0xa4, 0x84, 0x80, 0x11, 0xfc, 0xa6, 0x2d, 0x5b, 0xa1, 0xc5, 0xf0, 0x23,
0x05, 0x52, 0xae, 0x17, 0x67, 0x60, 0x06, 0xaf, 0xcf, 0x79, 0x61, 0x2e, 0xcd, 0x48, 0x0e, 0xf5,
0xf5, 0x9c, 0xc1, 0x65, 0x2c, 0xfb, 0xe9, 0xe2, 0x50, 0x11, 0xce, 0x74, 0x29, 0x30, 0x62, 0x32,
0xaa, 0xe8, 0x4b, 0xad, 0x65, 0x9a, 0x35, 0xbc, 0xb0, 0x5f, 0x78, 0x27, 0x82, 0xb0, 0x34, 0x5e,
0xda, 0xae, 0x56, 0x71, 0xa5, 0x43, 0x2e, 0x86, 0xa1, 0xd8, 0x80, 0x9b, 0xfa, 0x3c, 0x15, 0x0e,
0xce, 0xcb, 0xe4, 0x9c, 0xd9, 0xf3, 0x64, 0xa0, 0x76, 0x0d, 0xf8, 0xfb, 0xb2, 0x96, 0x0e, 0xce,
0xd7, 0xc8, 0x59, 0x27, 0x96, 0x4b, 0x8a, 0xc6, 0x53, 0x30, 0x75, 0x4e, 0xa6, 0x4d, 0x95, 0xc9,
0x86, 0x7c, 0x24, 0xf7, 0x42, 0x07, 0xdd, 0x15, 0xd2, 0x4d, 0x12, 0xb8, 0x8c, 0x1c, 0xba, 0x8e,
0xc2, 0xc8, 0xa6, 0xd7, 0x92, 0x0e, 0x8a, 0xab, 0xa4, 0x18, 0xc6, 0xf5, 0x88, 0x2e, 0xc2, 0x98,
0xaf, 0x3a, 0xb7, 0xe4, 0x80, 0x5f, 0x23, 0x7c, 0x94, 0x19, 0x52, 0x24, 0x2a, 0xc9, 0x43, 0x4f,
0xbb, 0x44, 0xf0, 0x3a, 0x2b, 0x98, 0x21, 0xc5, 0x00, 0x69, 0x7d, 0x83, 0x15, 0x99, 0x91, 0xcf,
0x05, 0x18, 0x55, 0x71, 0xb8, 0xa5, 0x62, 0x97, 0x20, 0xde, 0x24, 0x03, 0x10, 0x82, 0x82, 0x79,
0xa8, 0xb9, 0x16, 0xe2, 0xad, 0x6d, 0xde, 0x1e, 0x5c, 0x81, 0x15, 0x98, 0xe4, 0x06, 0x15, 0xa8,
0xd8, 0x41, 0xf1, 0x36, 0x29, 0x26, 0x0c, 0x8c, 0x6e, 0x43, 0xcb, 0x4c, 0xfb, 0xd2, 0x45, 0xf2,
0x0e, 0xdf, 0x06, 0x21, 0x94, 0xca, 0xa6, 0x8c, 0x5b, 0x67, 0xdd, 0x0c, 0xef, 0x72, 0x2a, 0x99,
0x41, 0xc5, 0x12, 0x8c, 0x47, 0x5e, 0x9a, 0x9d, 0xf5, 0x42, 0xa7, 0x72, 0xbc, 0x47, 0x8e, 0xb1,
0x12, 0xa2, 0x8c, 0xe4, 0xf1, 0x20, 0x9a, 0xf7, 0x39, 0x23, 0x06, 0x46, 0x5b, 0x2f, 0xd3, 0x5e,
0x33, 0x94, 0x8d, 0x41, 0x6c, 0x1f, 0xf0, 0xd6, 0xeb, 0xb0, 0xab, 0xa6, 0x71, 0x1e, 0x6a, 0x59,
0x70, 0xc1, 0x49, 0xf3, 0x21, 0x57, 0xba, 0x00, 0x10, 0x7e, 0x00, 0x6e, 0xee, 0x3b, 0x26, 0x1c,
0x64, 0x1f, 0x91, 0x6c, 0xb6, 0xcf, 0xa8, 0xa0, 0x96, 0x30, 0xa8, 0xf2, 0x63, 0x6e, 0x09, 0xb2,
0xc7, 0xb5, 0x06, 0x33, 0x79, 0x9c, 0x79, 0x9b, 0x83, 0x65, 0xed, 0x13, 0xce, 0x5a, 0x87, 0xad,
0x64, 0xed, 0x34, 0xcc, 0x92, 0x71, 0xb0, 0xba, 0x7e, 0xca, 0x8d, 0xb5, 0x43, 0x6f, 0x54, 0xab,
0xfb, 0x20, 0x1c, 0x28, 0xd3, 0x79, 0x5e, 0xcb, 0x38, 0x43, 0xa6, 0x11, 0x79, 0x89, 0x83, 0xf9,
0x3a, 0x99, 0xb9, 0xe3, 0x2f, 0x97, 0x82, 0x55, 0x2f, 0x41, 0xf9, 0xfd, 0xb0, 0x9f, 0xe5, 0x79,
0x9c, 0xca, 0x96, 0xf2, 0xe3, 0xe0, 0x82, 0x6c, 0x3b, 0xa8, 0x3f, 0xeb, 0x29, 0xd5, 0x86, 0x81,
0xa3, 0xf9, 0x24, 0xdc, 0x50, 0x9e, 0x55, 0x1a, 0x41, 0x94, 0xa8, 0x54, 0x5b, 0x8c, 0x9f, 0x73,
0xa5, 0x4a, 0xee, 0x64, 0x81, 0x89, 0x65, 0x98, 0x28, 0x3e, 0xba, 0x3e, 0x92, 0x5f, 0x90, 0x68,
0xbc, 0x4b, 0x51, 0xe3, 0x68, 0xa9, 0x28, 0xf1, 0x52, 0x97, 0xfe, 0xf7, 0x25, 0x37, 0x0e, 0x42,
0xa8, 0x71, 0xe8, 0xad, 0x44, 0xe2, 0xb4, 0x77, 0x30, 0x7c, 0xc5, 0x8d, 0x83, 0x19, 0x52, 0xf0,
0x81, 0xc1, 0x41, 0xf1, 0x35, 0x2b, 0x98, 0x41, 0xc5, 0x3d, 0xdd, 0x41, 0x9b, 0x4a, 0x3f, 0xc8,
0x74, 0xea, 0xe1, 0x6a, 0x8b, 0xea, 0x9b, 0xed, 0xea, 0x21, 0x6c, 0xdd, 0x40, 0xc5, 0x29, 0x98,
0xec, 0x39, 0x62, 0xd4, 0x6f, 0xdb, 0x63, 0x5b, 0x95, 0x59, 0xe6, 0xf9, 0xa5, 0xf0, 0xd1, 0x1d,
0x6a, 0x46, 0xd5, 0x13, 0x86, 0xb8, 0x13, 0xeb, 0x5e, 0x3d, 0x07, 0xd8, 0x65, 0x17, 0x77, 0xca,
0xd2, 0x57, 0x8e, 0x01, 0xe2, 0x04, 0x8c, 0x57, 0xce, 0x00, 0x76, 0xd5, 0x63, 0xa4, 0x1a, 0x33,
0x8f, 0x00, 0xe2, 0x08, 0x0c, 0xe1, 0x3c, 0xb7, 0xe3, 0x8f, 0x13, 0x5e, 0x2c, 0x17, 0xc7, 0x60,
0x84, 0xe7, 0xb8, 0x1d, 0x7d, 0x82, 0xd0, 0x12, 0x41, 0x9c, 0x67, 0xb8, 0x1d, 0x7f, 0x92, 0x71,
0x46, 0x10, 0x77, 0x4f, 0xe1, 0xb7, 0x4f, 0x0f, 0x51, 0x1f, 0xe6, 0xdc, 0xcd, 0xc3, 0x30, 0x0d,
0x6f, 0x3b, 0xfd, 0x14, 0xfd, 0x38, 0x13, 0xe2, 0x0e, 0xd8, 0xe7, 0x98, 0xf0, 0x67, 0x08, 0xed,
0xac, 0x17, 0x4b, 0x30, 0x6a, 0x0c, 0x6c, 0x3b, 0xfe, 0x2c, 0xe1, 0x26, 0x85, 0xa1, 0xd3, 0xc0,
0xb6, 0x0b, 0x9e, 0xe3, 0xd0, 0x89, 0xc0, 0xb4, 0xf1, 0xac, 0xb6, 0xd3, 0xcf, 0x73, 0xd6, 0x19,
0x11, 0x0b, 0x50, 0x2b, 0xfb, 0xaf, 0x9d, 0x7f, 0x81, 0xf8, 0x2e, 0x83, 0x19, 0x30, 0xfa, 0xbf,
0x5d, 0xf1, 0x22, 0x67, 0xc0, 0xa0, 0x70, 0x1b, 0xf5, 0xce, 0x74, 0xbb, 0xe9, 0x25, 0xde, 0x46,
0x3d, 0x23, 0x1d, 0xab, 0x59, 0xb4, 0x41, 0xbb, 0xe2, 0x65, 0xae, 0x66, 0xb1, 0x1e, 0xc3, 0xe8,
0x1d, 0x92, 0x76, 0xc7, 0x2b, 0x1c, 0x46, 0xcf, 0x8c, 0x14, 0x6b, 0x50, 0xdf, 0x3b, 0x20, 0xed,
0xbe, 0x57, 0xc9, 0x37, 0xb5, 0x67, 0x3e, 0x8a, 0xfb, 0x60, 0xb6, 0xff, 0x70, 0xb4, 0x5b, 0x2f,
0xed, 0xf4, 0xbc, 0xce, 0x98, 0xb3, 0x51, 0x9c, 0xee, 0x76, 0x59, 0x73, 0x30, 0xda, 0xb5, 0x97,
0x77, 0xaa, 0x8d, 0xd6, 0x9c, 0x8b, 0x62, 0x11, 0xa0, 0x3b, 0x93, 0xec, 0xae, 0x2b, 0xe4, 0x32,
0x20, 0xdc, 0x1a, 0x34, 0x92, 0xec, 0xfc, 0x55, 0xde, 0x1a, 0x44, 0xe0, 0xd6, 0xe0, 0x69, 0x64,
0xa7, 0xaf, 0xf1, 0xd6, 0x60, 0x44, 0xcc, 0xc3, 0x48, 0x9c, 0x87, 0x21, 0x3e, 0x5b, 0xf5, 0x5b,
0xfb, 0x8c, 0x1b, 0x19, 0xb6, 0x19, 0xfe, 0x65, 0x97, 0x60, 0x06, 0xc4, 0x11, 0xd8, 0x27, 0xa3,
0xa6, 0x6c, 0xdb, 0xc8, 0x5f, 0x77, 0xb9, 0x9f, 0xe0, 0x6a, 0xb1, 0x00, 0xd0, 0x79, 0x99, 0xc6,
0x28, 0x6c, 0xec, 0x6f, 0xbb, 0x9d, 0xf7, 0x7a, 0x03, 0xe9, 0x0a, 0x8a, 0xb7, 0x71, 0x8b, 0x60,
0xbb, 0x2a, 0x28, 0x5e, 0xc0, 0x8f, 0xc2, 0xf0, 0x43, 0x99, 0x8a, 0xb5, 0xe7, 0xdb, 0xe8, 0xdf,
0x89, 0xe6, 0xf5, 0x98, 0xb0, 0x48, 0xa5, 0x52, 0x7b, 0x7e, 0x66, 0x63, 0xff, 0x20, 0xb6, 0x04,
0x10, 0x6e, 0x79, 0x99, 0x76, 0xb9, 0xef, 0x3f, 0x19, 0x66, 0x00, 0x83, 0xc6, 0xeb, 0x87, 0xe5,
0x96, 0x8d, 0xfd, 0x8b, 0x83, 0xa6, 0xf5, 0xe2, 0x18, 0xd4, 0xf0, 0xb2, 0xf8, 0x3f, 0x84, 0x0d,
0xfe, 0x9b, 0xe0, 0x2e, 0x81, 0xbf, 0x9c, 0xe9, 0xb6, 0x0e, 0xec, 0xc9, 0xfe, 0x87, 0x2a, 0xcd,
0xeb, 0xc5, 0x22, 0x8c, 0x66, 0xba, 0xdd, 0xce, 0xe9, 0x44, 0x63, 0xc1, 0xff, 0xdd, 0x2d, 0x5f,
0x72, 0x4b, 0xe6, 0xf8, 0x21, 0x98, 0x6e, 0xa9, 0xa8, 0x17, 0x3c, 0x0e, 0x2b, 0x6a, 0x45, 0xad,
0x15, 0xbb, 0xe8, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x0a, 0x9c, 0xec, 0xd8, 0x50, 0x13, 0x00,
0x00,
}

View File

@ -90,6 +90,14 @@ func IsCastValue(field *google_protobuf.FieldDescriptorProto) bool {
return false
}
func HasEnumDecl(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool {
return proto.GetBoolExtension(enum.Options, E_Enumdecl, proto.GetBoolExtension(file.Options, E_EnumdeclAll, true))
}
func HasTypeDecl(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
return proto.GetBoolExtension(message.Options, E_Typedecl, proto.GetBoolExtension(file.Options, E_TypedeclAll, true))
}
func GetCustomType(field *google_protobuf.FieldDescriptorProto) string {
if field == nil {
return ""
@ -343,3 +351,7 @@ func ImportsGoGoProto(file *google_protobuf.FileDescriptorProto) bool {
func HasCompare(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
return proto.GetBoolExtension(message.Options, E_Compare, proto.GetBoolExtension(file.Options, E_CompareAll, false))
}
func RegistersGolangProto(file *google_protobuf.FileDescriptorProto) bool {
return proto.GetBoolExtension(file.Options, E_GoprotoRegistration, false)
}

View File

@ -98,7 +98,7 @@ func setPtrCustomType(base structPointer, f field, v interface{}) {
if v == nil {
return
}
structPointer_SetStructPointer(base, f, structPointer(reflect.ValueOf(v).Pointer()))
structPointer_SetStructPointer(base, f, toStructPointer(reflect.ValueOf(v)))
}
func setCustomType(base structPointer, f field, value interface{}) {
@ -165,7 +165,8 @@ func (o *Buffer) dec_custom_slice_bytes(p *Properties, base structPointer) error
}
newBas := appendStructPointer(base, p.field, p.ctype)
setCustomType(newBas, 0, custom)
var zero field
setCustomType(newBas, zero, custom)
return nil
}

View File

@ -84,7 +84,8 @@ func (o *Buffer) dec_slice_duration(p *Properties, base structPointer) error {
return err
}
newBas := appendStructPointer(base, p.field, reflect.SliceOf(reflect.PtrTo(durationType)))
setPtrCustomType(newBas, 0, &d)
var zero field
setPtrCustomType(newBas, zero, &d)
return nil
}

View File

@ -1075,10 +1075,17 @@ func (o *Buffer) enc_map(p *Properties, base structPointer) error {
func (o *Buffer) enc_exts(p *Properties, base structPointer) error {
exts := structPointer_Extensions(base, p.field)
if err := encodeExtensions(exts); err != nil {
v, mu := exts.extensionsRead()
if v == nil {
return nil
}
mu.Lock()
defer mu.Unlock()
if err := encodeExtensionsMap(v); err != nil {
return err
}
v, _ := exts.extensionsRead()
return o.enc_map_body(v)
}

View File

@ -196,12 +196,10 @@ func size_ref_struct_message(p *Properties, base structPointer) int {
// Encode a slice of references to message struct pointers ([]struct).
func (o *Buffer) enc_slice_ref_struct_message(p *Properties, base structPointer) error {
var state errorState
ss := structPointer_GetStructPointer(base, p.field)
ss1 := structPointer_GetRefStructPointer(ss, field(0))
size := p.stype.Size()
l := structPointer_Len(base, p.field)
ss := structPointer_StructRefSlice(base, p.field, p.stype.Size())
l := ss.Len()
for i := 0; i < l; i++ {
structp := structPointer_Add(ss1, field(uintptr(i)*size))
structp := ss.Index(i)
if structPointer_IsNil(structp) {
return errRepeatedHasNil
}
@ -233,13 +231,11 @@ func (o *Buffer) enc_slice_ref_struct_message(p *Properties, base structPointer)
//TODO this is only copied, please fix this
func size_slice_ref_struct_message(p *Properties, base structPointer) (n int) {
ss := structPointer_GetStructPointer(base, p.field)
ss1 := structPointer_GetRefStructPointer(ss, field(0))
size := p.stype.Size()
l := structPointer_Len(base, p.field)
ss := structPointer_StructRefSlice(base, p.field, p.stype.Size())
l := ss.Len()
n += l * len(p.tagcode)
for i := 0; i < l; i++ {
structp := structPointer_Add(ss1, field(uintptr(i)*size))
structp := ss.Index(i)
if structPointer_IsNil(structp) {
return // return the size up to this point
}

View File

@ -167,6 +167,7 @@ type ExtensionDesc struct {
Field int32 // field number
Name string // fully-qualified name of extension, for text formatting
Tag string // protobuf tag style
Filename string // name of the file in which the extension is defined
}
func (ed *ExtensionDesc) repeated() bool {

View File

@ -0,0 +1,85 @@
// Protocol Buffers for Go with Gadgets
//
// Copyright (c) 2016, The GoGo Authors. All rights reserved.
// http://github.com/gogo/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// +build appengine js
package proto
import (
"reflect"
)
func structPointer_FieldPointer(p structPointer, f field) structPointer {
panic("not implemented")
}
func appendStructPointer(base structPointer, f field, typ reflect.Type) structPointer {
panic("not implemented")
}
func structPointer_InterfaceAt(p structPointer, f field, t reflect.Type) interface{} {
panic("not implemented")
}
func structPointer_InterfaceRef(p structPointer, f field, t reflect.Type) interface{} {
panic("not implemented")
}
func structPointer_GetRefStructPointer(p structPointer, f field) structPointer {
panic("not implemented")
}
func structPointer_Add(p structPointer, size field) structPointer {
panic("not implemented")
}
func structPointer_Len(p structPointer, f field) int {
panic("not implemented")
}
func structPointer_GetSliceHeader(p structPointer, f field) *reflect.SliceHeader {
panic("not implemented")
}
func structPointer_Copy(oldptr structPointer, newptr structPointer, size int) {
panic("not implemented")
}
func structPointer_StructRefSlice(p structPointer, f field, size uintptr) *structRefSlice {
panic("not implemented")
}
type structRefSlice struct{}
func (v *structRefSlice) Len() int {
panic("not implemented")
}
func (v *structRefSlice) Index(i int) structPointer {
panic("not implemented")
}

View File

@ -26,7 +26,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// +build !appengine
// +build !appengine,!js
// This file contains the implementation of the proto field accesses using package unsafe.
@ -105,3 +105,24 @@ func structPointer_Add(p structPointer, size field) structPointer {
func structPointer_Len(p structPointer, f field) int {
return len(*(*[]interface{})(unsafe.Pointer(structPointer_GetRefStructPointer(p, f))))
}
func structPointer_StructRefSlice(p structPointer, f field, size uintptr) *structRefSlice {
return &structRefSlice{p: p, f: f, size: size}
}
// A structRefSlice represents a slice of structs (themselves submessages or groups).
type structRefSlice struct {
p structPointer
f field
size uintptr
}
func (v *structRefSlice) Len() int {
return structPointer_Len(v.p, v.f)
}
func (v *structRefSlice) Index(i int) structPointer {
ss := structPointer_GetStructPointer(v.p, v.f)
ss1 := structPointer_GetRefStructPointer(ss, 0)
return structPointer_Add(ss1, field(uintptr(i)*v.size))
}

View File

@ -84,7 +84,8 @@ func (o *Buffer) dec_slice_time(p *Properties, base structPointer) error {
return err
}
newBas := appendStructPointer(base, p.field, reflect.SliceOf(reflect.PtrTo(timeType)))
setPtrCustomType(newBas, 0, &t)
var zero field
setPtrCustomType(newBas, zero, &t)
return nil
}
@ -94,7 +95,8 @@ func (o *Buffer) dec_slice_ref_time(p *Properties, base structPointer) error {
return err
}
newBas := appendStructPointer(base, p.field, reflect.SliceOf(timeType))
setCustomType(newBas, 0, &t)
var zero field
setCustomType(newBas, zero, &t)
return nil
}

View File

@ -0,0 +1,118 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2016 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Package descriptor provides functions for obtaining protocol buffer
// descriptors for generated Go types.
//
// These functions cannot go in package proto because they depend on the
// generated protobuf descriptor messages, which themselves depend on proto.
package descriptor
import (
"bytes"
"compress/gzip"
"fmt"
"io/ioutil"
"github.com/gogo/protobuf/proto"
)
// extractFile extracts a FileDescriptorProto from a gzip'd buffer.
func extractFile(gz []byte) (*FileDescriptorProto, error) {
r, err := gzip.NewReader(bytes.NewReader(gz))
if err != nil {
return nil, fmt.Errorf("failed to open gzip reader: %v", err)
}
defer r.Close()
b, err := ioutil.ReadAll(r)
if err != nil {
return nil, fmt.Errorf("failed to uncompress descriptor: %v", err)
}
fd := new(FileDescriptorProto)
if err := proto.Unmarshal(b, fd); err != nil {
return nil, fmt.Errorf("malformed FileDescriptorProto: %v", err)
}
return fd, nil
}
// Message is a proto.Message with a method to return its descriptor.
//
// Message types generated by the protocol compiler always satisfy
// the Message interface.
type Message interface {
proto.Message
Descriptor() ([]byte, []int)
}
// ForMessage returns a FileDescriptorProto and a DescriptorProto from within it
// describing the given message.
func ForMessage(msg Message) (fd *FileDescriptorProto, md *DescriptorProto) {
gz, path := msg.Descriptor()
fd, err := extractFile(gz)
if err != nil {
panic(fmt.Sprintf("invalid FileDescriptorProto for %T: %v", msg, err))
}
md = fd.MessageType[path[0]]
for _, i := range path[1:] {
md = md.NestedType[i]
}
return fd, md
}
// Is this field a scalar numeric type?
func (field *FieldDescriptorProto) IsScalar() bool {
if field.Type == nil {
return false
}
switch *field.Type {
case FieldDescriptorProto_TYPE_DOUBLE,
FieldDescriptorProto_TYPE_FLOAT,
FieldDescriptorProto_TYPE_INT64,
FieldDescriptorProto_TYPE_UINT64,
FieldDescriptorProto_TYPE_INT32,
FieldDescriptorProto_TYPE_FIXED64,
FieldDescriptorProto_TYPE_FIXED32,
FieldDescriptorProto_TYPE_BOOL,
FieldDescriptorProto_TYPE_UINT32,
FieldDescriptorProto_TYPE_ENUM,
FieldDescriptorProto_TYPE_SFIXED32,
FieldDescriptorProto_TYPE_SFIXED64,
FieldDescriptorProto_TYPE_SINT32,
FieldDescriptorProto_TYPE_SINT64:
return true
default:
return false
}
}

View File

@ -1942,7 +1942,7 @@ func init() { proto.RegisterFile("descriptor.proto", fileDescriptorDescriptor) }
var fileDescriptorDescriptor = []byte{
// 2273 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xcc, 0x59, 0xcd, 0x6f, 0xdb, 0xc8,
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x59, 0xcd, 0x6f, 0xdb, 0xc8,
0x15, 0x5f, 0xea, 0xcb, 0xd2, 0x93, 0x2c, 0x8f, 0xc7, 0xde, 0x84, 0x71, 0x36, 0x1b, 0x47, 0x9b,
0x34, 0x4e, 0xd2, 0x3a, 0x0b, 0xe7, 0x63, 0xb3, 0xde, 0x62, 0x0b, 0x59, 0x62, 0xbc, 0x0a, 0x64,
0x4b, 0xa5, 0xec, 0x36, 0xbb, 0x3d, 0x10, 0x63, 0x72, 0x24, 0x33, 0xa1, 0x86, 0x2c, 0x49, 0x25,

View File

@ -99,6 +99,17 @@ func (field *FieldDescriptorProto) GetKeyUint64() (x uint64) {
return x
}
func (field *FieldDescriptorProto) GetKey3Uint64() (x uint64) {
packed := field.IsPacked3()
wireType := field.WireType()
fieldNumber := field.GetNumber()
if packed {
wireType = 2
}
x = uint64(uint32(fieldNumber)<<3 | uint32(wireType))
return x
}
func (field *FieldDescriptorProto) GetKey() []byte {
x := field.GetKeyUint64()
i := 0
@ -111,6 +122,18 @@ func (field *FieldDescriptorProto) GetKey() []byte {
return keybuf
}
func (field *FieldDescriptorProto) GetKey3() []byte {
x := field.GetKey3Uint64()
i := 0
keybuf := make([]byte, 0)
for i = 0; x > 127; i++ {
keybuf = append(keybuf, 0x80|uint8(x&0x7F))
x >>= 7
}
keybuf = append(keybuf, uint8(x))
return keybuf
}
func (desc *FileDescriptorSet) GetField(packageName, messageName, fieldName string) *FieldDescriptorProto {
msg := desc.GetMessage(packageName, messageName)
if msg == nil {
@ -352,6 +375,16 @@ func (f *FieldDescriptorProto) IsPacked() bool {
return f.Options != nil && f.GetOptions().GetPacked()
}
func (f *FieldDescriptorProto) IsPacked3() bool {
if f.IsRepeated() && f.IsScalar() {
if f.Options == nil || f.GetOptions().Packed == nil {
return true
}
return f.Options != nil && f.GetOptions().GetPacked()
}
return false
}
func (m *DescriptorProto) HasExtension() bool {
return len(m.ExtensionRange) > 0
}

155
vendor/github.com/golang/protobuf/ptypes/any/any.pb.go generated vendored Normal file
View File

@ -0,0 +1,155 @@
// Code generated by protoc-gen-go.
// source: github.com/golang/protobuf/ptypes/any/any.proto
// DO NOT EDIT!
/*
Package any is a generated protocol buffer package.
It is generated from these files:
github.com/golang/protobuf/ptypes/any/any.proto
It has these top-level messages:
Any
*/
package any
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// `Any` contains an arbitrary serialized protocol buffer message along with a
// URL that describes the type of the serialized message.
//
// Protobuf library provides support to pack/unpack Any values in the form
// of utility functions or additional generated methods of the Any type.
//
// Example 1: Pack and unpack a message in C++.
//
// Foo foo = ...;
// Any any;
// any.PackFrom(foo);
// ...
// if (any.UnpackTo(&foo)) {
// ...
// }
//
// Example 2: Pack and unpack a message in Java.
//
// Foo foo = ...;
// Any any = Any.pack(foo);
// ...
// if (any.is(Foo.class)) {
// foo = any.unpack(Foo.class);
// }
//
// Example 3: Pack and unpack a message in Python.
//
// foo = Foo(...)
// any = Any()
// any.Pack(foo)
// ...
// if any.Is(Foo.DESCRIPTOR):
// any.Unpack(foo)
// ...
//
// The pack methods provided by protobuf library will by default use
// 'type.googleapis.com/full.type.name' as the type URL and the unpack
// methods only use the fully qualified type name after the last '/'
// in the type URL, for example "foo.bar.com/x/y.z" will yield type
// name "y.z".
//
//
// JSON
// ====
// The JSON representation of an `Any` value uses the regular
// representation of the deserialized, embedded message, with an
// additional field `@type` which contains the type URL. Example:
//
// package google.profile;
// message Person {
// string first_name = 1;
// string last_name = 2;
// }
//
// {
// "@type": "type.googleapis.com/google.profile.Person",
// "firstName": <string>,
// "lastName": <string>
// }
//
// If the embedded message type is well-known and has a custom JSON
// representation, that representation will be embedded adding a field
// `value` which holds the custom JSON in addition to the `@type`
// field. Example (for message [google.protobuf.Duration][]):
//
// {
// "@type": "type.googleapis.com/google.protobuf.Duration",
// "value": "1.212s"
// }
//
type Any struct {
// A URL/resource name whose content describes the type of the
// serialized protocol buffer message.
//
// For URLs which use the scheme `http`, `https`, or no scheme, the
// following restrictions and interpretations apply:
//
// * If no scheme is provided, `https` is assumed.
// * The last segment of the URL's path must represent the fully
// qualified name of the type (as in `path/google.protobuf.Duration`).
// The name should be in a canonical form (e.g., leading "." is
// not accepted).
// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
// value in binary format, or produce an error.
// * Applications are allowed to cache lookup results based on the
// URL, or have them precompiled into a binary to avoid any
// lookup. Therefore, binary compatibility needs to be preserved
// on changes to types. (Use versioned type names to manage
// breaking changes.)
//
// Schemes other than `http`, `https` (or the empty scheme) might be
// used with implementation specific semantics.
//
TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl" json:"type_url,omitempty"`
// Must be a valid serialized protocol buffer of the above specified type.
Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
}
func (m *Any) Reset() { *m = Any{} }
func (m *Any) String() string { return proto.CompactTextString(m) }
func (*Any) ProtoMessage() {}
func (*Any) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (*Any) XXX_WellKnownType() string { return "Any" }
func init() {
proto.RegisterType((*Any)(nil), "google.protobuf.Any")
}
func init() { proto.RegisterFile("github.com/golang/protobuf/ptypes/any/any.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 187 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xd2, 0x4f, 0xcf, 0x2c, 0xc9,
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28,
0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0xcc,
0xab, 0x04, 0x61, 0x3d, 0xb0, 0xb8, 0x10, 0x7f, 0x7a, 0x7e, 0x7e, 0x7a, 0x4e, 0xaa, 0x1e, 0x4c,
0x95, 0x92, 0x19, 0x17, 0xb3, 0x63, 0x5e, 0xa5, 0x90, 0x24, 0x17, 0x07, 0x48, 0x79, 0x7c, 0x69,
0x51, 0x8e, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10, 0x3b, 0x88, 0x1f, 0x5a, 0x94, 0x23, 0x24,
0xc2, 0xc5, 0x5a, 0x96, 0x98, 0x53, 0x9a, 0x2a, 0xc1, 0xa4, 0xc0, 0xa8, 0xc1, 0x13, 0x04, 0xe1,
0x38, 0x15, 0x71, 0x09, 0x27, 0xe7, 0xe7, 0xea, 0xa1, 0x19, 0xe7, 0xc4, 0xe1, 0x98, 0x57, 0x19,
0x00, 0xe2, 0x04, 0x30, 0x46, 0xa9, 0x12, 0xe5, 0xb8, 0x05, 0x8c, 0x8c, 0x8b, 0x98, 0x98, 0xdd,
0x03, 0x9c, 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x4c, 0x0b, 0x80, 0xaa, 0xd2, 0x0b, 0x4f, 0xcd, 0xc9,
0xf1, 0xce, 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0xa9, 0x4e, 0x62, 0x03, 0x6b, 0x37, 0x06, 0x04, 0x00,
0x00, 0xff, 0xff, 0xc6, 0x4d, 0x03, 0x23, 0xf6, 0x00, 0x00, 0x00,
}

View File

@ -38,57 +38,105 @@
#include "textflag.h"
#define X_PTR SI
#define Y_PTR DI
#define DST_PTR DI
#define IDX AX
#define LEN CX
#define TAIL BX
#define INC_X R8
#define INCx3_X R11
#define INC_Y R9
#define INCx3_Y R12
#define INC_DST R9
#define INCx3_DST R12
#define ALPHA X0
#define ALPHA_2 X1
// func AxpyInc(alpha float64, x, y []float64, n, incX, incY, ix, iy uintptr)
TEXT ·AxpyInc(SB), NOSPLIT, $0
MOVHPD alpha+0(FP), X7
MOVLPD alpha+0(FP), X7
MOVQ x+8(FP), R8
MOVQ y+32(FP), R9
MOVQ n+56(FP), DX
MOVQ incX+64(FP), R11
MOVQ incY+72(FP), R12
MOVQ ix+80(FP), SI
MOVQ iy+88(FP), DI
MOVQ x_base+8(FP), X_PTR // X_PTR = &x
MOVQ y_base+32(FP), Y_PTR // Y_PTR = &y
MOVQ n+56(FP), LEN // LEN = n
CMPQ LEN, $0 // if LEN == 0 { return }
JE end
MOVQ SI, AX // nextX = ix
MOVQ DI, BX // nextY = iy
ADDQ R11, AX // nextX += incX
ADDQ R12, BX // nextY += incY
SHLQ $1, R11 // incX *= 2
SHLQ $1, R12 // incY *= 2
MOVQ ix+80(FP), INC_X
MOVQ iy+88(FP), INC_Y
LEAQ (X_PTR)(INC_X*8), X_PTR // X_PTR = &(x[ix])
LEAQ (Y_PTR)(INC_Y*8), Y_PTR // Y_PTR = &(y[iy])
MOVQ Y_PTR, DST_PTR // DST_PTR = Y_PTR // Write pointer
SUBQ $2, DX // n -= 2
JL tail // if n < 0
MOVQ incX+64(FP), INC_X // INC_X = incX * sizeof(float64)
SHLQ $3, INC_X
MOVQ incY+72(FP), INC_Y // INC_Y = incY * sizeof(float64)
SHLQ $3, INC_Y
loop: // n >= 0
// y[i] += alpha * x[i] unrolled 2x.
MOVHPD 0(R8)(SI*8), X0
MOVHPD 0(R9)(DI*8), X1
MOVLPD 0(R8)(AX*8), X0
MOVLPD 0(R9)(BX*8), X1
MULPD X7, X0
ADDPD X0, X1
MOVHPD X1, 0(R9)(DI*8)
MOVLPD X1, 0(R9)(BX*8)
MOVSD alpha+0(FP), ALPHA // ALPHA = alpha
MOVQ LEN, TAIL
ANDQ $3, TAIL // TAIL = n % 4
SHRQ $2, LEN // LEN = floor( n / 4 )
JZ tail_start // if LEN == 0 { goto tail_start }
ADDQ R11, SI // ix += incX
ADDQ R12, DI // iy += incY
ADDQ R11, AX // nextX += incX
ADDQ R12, BX // nextY += incY
MOVAPS ALPHA, ALPHA_2 // ALPHA_2 = ALPHA for pipelining
LEAQ (INC_X)(INC_X*2), INCx3_X // INCx3_X = INC_X * 3
LEAQ (INC_Y)(INC_Y*2), INCx3_Y // INCx3_Y = INC_Y * 3
SUBQ $2, DX // n -= 2
JGE loop // if n >= 0 goto loop
loop: // do { // y[i] += alpha * x[i] unrolled 4x.
MOVSD (X_PTR), X2 // X_i = x[i]
MOVSD (X_PTR)(INC_X*1), X3
MOVSD (X_PTR)(INC_X*2), X4
MOVSD (X_PTR)(INCx3_X*1), X5
tail:
ADDQ $2, DX // n += 2
JLE end // if n <= 0
MULSD ALPHA, X2 // X_i *= a
MULSD ALPHA_2, X3
MULSD ALPHA, X4
MULSD ALPHA_2, X5
// y[i] += alpha * x[i] for the last iteration if n is odd.
MOVSD 0(R8)(SI*8), X0
MOVSD 0(R9)(DI*8), X1
MULSD X7, X0
ADDSD X0, X1
MOVSD X1, 0(R9)(DI*8)
ADDSD (Y_PTR), X2 // X_i += y[i]
ADDSD (Y_PTR)(INC_Y*1), X3
ADDSD (Y_PTR)(INC_Y*2), X4
ADDSD (Y_PTR)(INCx3_Y*1), X5
MOVSD X2, (DST_PTR) // y[i] = X_i
MOVSD X3, (DST_PTR)(INC_DST*1)
MOVSD X4, (DST_PTR)(INC_DST*2)
MOVSD X5, (DST_PTR)(INCx3_DST*1)
LEAQ (X_PTR)(INC_X*4), X_PTR // X_PTR = &(X_PTR[incX*4])
LEAQ (Y_PTR)(INC_Y*4), Y_PTR // Y_PTR = &(Y_PTR[incY*4])
DECQ LEN
JNZ loop // } while --LEN > 0
CMPQ TAIL, $0 // if TAIL == 0 { return }
JE end
tail_start: // Reset Loop registers
MOVQ TAIL, LEN // Loop counter: LEN = TAIL
SHRQ $1, LEN // LEN = floor( LEN / 2 )
JZ tail_one
tail_two:
MOVSD (X_PTR), X2 // X_i = x[i]
MOVSD (X_PTR)(INC_X*1), X3
MULSD ALPHA, X2 // X_i *= a
MULSD ALPHA, X3
ADDSD (Y_PTR), X2 // X_i += y[i]
ADDSD (Y_PTR)(INC_Y*1), X3
MOVSD X2, (DST_PTR) // y[i] = X_i
MOVSD X3, (DST_PTR)(INC_DST*1)
LEAQ (X_PTR)(INC_X*2), X_PTR // X_PTR = &(X_PTR[incX*2])
LEAQ (Y_PTR)(INC_Y*2), Y_PTR // Y_PTR = &(Y_PTR[incY*2])
ANDQ $1, TAIL
JZ end // if TAIL == 0 { goto end }
tail_one:
// y[i] += alpha * x[i] for the last n % 4 iterations.
MOVSD (X_PTR), X2 // X2 = x[i]
MULSD ALPHA, X2 // X2 *= a
ADDSD (Y_PTR), X2 // X2 += y[i]
MOVSD X2, (DST_PTR) // y[i] = X2
end:
RET

View File

@ -38,65 +38,111 @@
#include "textflag.h"
// func DaxpyIncTo(dst []float64, incDst, idst uintptr, alpha float64, x, y []float64, n, incX, incY, ix, iy uintptr)
#define X_PTR SI
#define Y_PTR DI
#define DST_PTR DX
#define IDX AX
#define LEN CX
#define TAIL BX
#define INC_X R8
#define INCx3_X R11
#define INC_Y R9
#define INCx3_Y R12
#define INC_DST R10
#define INCx3_DST R13
#define ALPHA X0
#define ALPHA_2 X1
// func AxpyIncTo(dst []float64, incDst, idst uintptr, alpha float64, x, y []float64, n, incX, incY, ix, iy uintptr)
TEXT ·AxpyIncTo(SB), NOSPLIT, $0
MOVQ dst+0(FP), R10
MOVQ incDst+24(FP), R13
MOVQ idst+32(FP), BP
MOVHPD alpha+40(FP), X7
MOVLPD alpha+40(FP), X7
MOVQ x+48(FP), R8
MOVQ y+72(FP), R9
MOVQ n+96(FP), DX
MOVQ incX+104(FP), R11
MOVQ incY+112(FP), R12
MOVQ ix+120(FP), SI
MOVQ iy+128(FP), DI
MOVQ dst_base+0(FP), DST_PTR // DST_PTR := &dst
MOVQ x_base+48(FP), X_PTR // X_PTR := &x
MOVQ y_base+72(FP), Y_PTR // Y_PTR := &y
MOVQ n+96(FP), LEN // LEN := n
CMPQ LEN, $0 // if LEN == 0 { return }
JE end
MOVQ SI, AX // nextX = ix
MOVQ DI, BX // nextY = iy
MOVQ BP, CX // nextDst = idst
ADDQ R11, AX // nextX += incX
ADDQ R12, BX // nextY += incY
ADDQ R13, CX // nextDst += incDst
SHLQ $1, R11 // incX *= 2
SHLQ $1, R12 // incY *= 2
SHLQ $1, R13 // incDst *= 2
MOVQ ix+120(FP), INC_X
LEAQ (X_PTR)(INC_X*8), X_PTR // X_PTR = &(x[ix])
MOVQ iy+128(FP), INC_Y
LEAQ (Y_PTR)(INC_Y*8), Y_PTR // Y_PTR = &(dst[idst])
MOVQ idst+32(FP), INC_DST
LEAQ (DST_PTR)(INC_DST*8), DST_PTR // DST_PTR = &(y[iy])
SUBQ $2, DX // n -= 2
JL tail // if n < 0
MOVQ incX+104(FP), INC_X // INC_X = incX * sizeof(float64)
SHLQ $3, INC_X
MOVQ incY+112(FP), INC_Y // INC_Y = incY * sizeof(float64)
SHLQ $3, INC_Y
MOVQ incDst+24(FP), INC_DST // INC_DST = incDst * sizeof(float64)
SHLQ $3, INC_DST
MOVSD alpha+40(FP), ALPHA
loop: // n >= 0
// dst[i] = alpha * x[i] + y[i] unrolled 2x.
MOVHPD 0(R8)(SI*8), X0
MOVHPD 0(R9)(DI*8), X1
MOVLPD 0(R8)(AX*8), X0
MOVLPD 0(R9)(BX*8), X1
MULPD X7, X0
ADDPD X0, X1
MOVHPD X1, 0(R10)(BP*8)
MOVLPD X1, 0(R10)(CX*8)
MOVQ LEN, TAIL
ANDQ $3, TAIL // TAIL = n % 4
SHRQ $2, LEN // LEN = floor( n / 4 )
JZ tail_start // if LEN == 0 { goto tail_start }
ADDQ R11, SI // ix += incX
ADDQ R12, DI // iy += incY
ADDQ R13, BP // idst += incDst
ADDQ R11, AX // nextX += incX
ADDQ R12, BX // nextY += incY
ADDQ R13, CX // nextDst += incDst
MOVSD ALPHA, ALPHA_2 // ALPHA_2 = ALPHA for pipelining
LEAQ (INC_X)(INC_X*2), INCx3_X // INCx3_X = INC_X * 3
LEAQ (INC_Y)(INC_Y*2), INCx3_Y // INCx3_Y = INC_Y * 3
LEAQ (INC_DST)(INC_DST*2), INCx3_DST // INCx3_DST = INC_DST * 3
SUBQ $2, DX // n -= 2
JGE loop // if n >= 0 goto loop
loop: // do { // y[i] += alpha * x[i] unrolled 2x.
MOVSD (X_PTR), X2 // X_i = x[i]
MOVSD (X_PTR)(INC_X*1), X3
MOVSD (X_PTR)(INC_X*2), X4
MOVSD (X_PTR)(INCx3_X*1), X5
tail:
ADDQ $2, DX // n += 2
JLE end // if n <= 0
MULSD ALPHA, X2 // X_i *= a
MULSD ALPHA_2, X3
MULSD ALPHA, X4
MULSD ALPHA_2, X5
// dst[i] = alpha * x[i] + y[i] for the last iteration if n is odd.
MOVSD 0(R8)(SI*8), X0
MOVSD 0(R9)(DI*8), X1
MULSD X7, X0
ADDSD X0, X1
MOVSD X1, 0(R10)(BP*8)
ADDSD (Y_PTR), X2 // X_i += y[i]
ADDSD (Y_PTR)(INC_Y*1), X3
ADDSD (Y_PTR)(INC_Y*2), X4
ADDSD (Y_PTR)(INCx3_Y*1), X5
MOVSD X2, (DST_PTR) // y[i] = X_i
MOVSD X3, (DST_PTR)(INC_DST*1)
MOVSD X4, (DST_PTR)(INC_DST*2)
MOVSD X5, (DST_PTR)(INCx3_DST*1)
LEAQ (X_PTR)(INC_X*4), X_PTR // X_PTR = &(X_PTR[incX*4])
LEAQ (Y_PTR)(INC_Y*4), Y_PTR // Y_PTR = &(Y_PTR[incY*4])
LEAQ (DST_PTR)(INC_DST*4), DST_PTR // DST_PTR = &(DST_PTR[incDst*4]
DECQ LEN
JNZ loop // } while --LEN > 0
CMPQ TAIL, $0 // if TAIL == 0 { return }
JE end
tail_start: // Reset Loop registers
MOVQ TAIL, LEN // Loop counter: LEN = TAIL
SHRQ $1, LEN // LEN = floor( LEN / 2 )
JZ tail_one
tail_two:
MOVSD (X_PTR), X2 // X_i = x[i]
MOVSD (X_PTR)(INC_X*1), X3
MULSD ALPHA, X2 // X_i *= a
MULSD ALPHA, X3
ADDSD (Y_PTR), X2 // X_i += y[i]
ADDSD (Y_PTR)(INC_Y*1), X3
MOVSD X2, (DST_PTR) // y[i] = X_i
MOVSD X3, (DST_PTR)(INC_DST*1)
LEAQ (X_PTR)(INC_X*2), X_PTR // X_PTR = &(X_PTR[incX*2])
LEAQ (Y_PTR)(INC_Y*2), Y_PTR // Y_PTR = &(Y_PTR[incY*2])
LEAQ (DST_PTR)(INC_DST*2), DST_PTR // DST_PTR = &(DST_PTR[incY*2]
ANDQ $1, TAIL
JZ end // if TAIL == 0 { goto end }
tail_one:
MOVSD (X_PTR), X2 // X2 = x[i]
MULSD ALPHA, X2 // X2 *= a
ADDSD (Y_PTR), X2 // X2 += y[i]
MOVSD X2, (DST_PTR) // y[i] = X2
end:
RET

View File

@ -38,41 +38,97 @@
#include "textflag.h"
// func DaxpyUnitary(alpha float64, x, y []float64)
// This function assumes len(y) >= len(x).
#define X_PTR SI
#define Y_PTR DI
#define DST_PTR DI
#define IDX AX
#define LEN CX
#define TAIL BX
#define ALPHA X0
#define ALPHA_2 X1
// func AxpyUnitary(alpha float64, x, y []float64)
TEXT ·AxpyUnitary(SB), NOSPLIT, $0
MOVHPD alpha+0(FP), X7
MOVLPD alpha+0(FP), X7
MOVQ x+8(FP), R8
MOVQ x_len+16(FP), DI // n = len(x)
MOVQ y+32(FP), R9
MOVQ x_base+8(FP), X_PTR // X_PTR := &x
MOVQ y_base+32(FP), Y_PTR // Y_PTR := &y
MOVQ x_len+16(FP), LEN // LEN = min( len(x), len(y) )
CMPQ y_len+40(FP), LEN
CMOVQLE y_len+40(FP), LEN
CMPQ LEN, $0 // if LEN == 0 { return }
JE end
XORQ IDX, IDX
MOVSD alpha+0(FP), ALPHA // ALPHA := { alpha, alpha }
SHUFPD $0, ALPHA, ALPHA
MOVUPS ALPHA, ALPHA_2 // ALPHA_2 := ALPHA for pipelining
MOVQ Y_PTR, TAIL // Check memory alignment
ANDQ $15, TAIL // TAIL = &y % 16
JZ no_trim // if TAIL == 0 { goto no_trim }
MOVQ $0, SI // i = 0
SUBQ $2, DI // n -= 2
JL tail // if n < 0 goto tail
// Align on 16-byte boundary
MOVSD (X_PTR), X2 // X2 := x[0]
MULSD ALPHA, X2 // X2 *= a
ADDSD (Y_PTR), X2 // X2 += y[0]
MOVSD X2, (DST_PTR) // y[0] = X2
INCQ IDX // i++
DECQ LEN // LEN--
JZ end // if LEN == 0 { return }
loop:
// y[i] += alpha * x[i] unrolled 2x.
MOVUPD 0(R8)(SI*8), X0
MOVUPD 0(R9)(SI*8), X1
MULPD X7, X0
ADDPD X0, X1
MOVUPD X1, 0(R9)(SI*8)
no_trim:
MOVQ LEN, TAIL
ANDQ $7, TAIL // TAIL := n % 8
SHRQ $3, LEN // LEN = floor( n / 8 )
JZ tail_start // if LEN == 0 { goto tail2_start }
ADDQ $2, SI // i += 2
SUBQ $2, DI // n -= 2
JGE loop // if n >= 0 goto loop
loop: // do {
// y[i] += alpha * x[i] unrolled 8x.
MOVUPS (X_PTR)(IDX*8), X2 // X_i = x[i]
MOVUPS 16(X_PTR)(IDX*8), X3
MOVUPS 32(X_PTR)(IDX*8), X4
MOVUPS 48(X_PTR)(IDX*8), X5
tail:
ADDQ $2, DI // n += 2
JLE end // if n <= 0 goto end
MULPD ALPHA, X2 // X_i *= a
MULPD ALPHA_2, X3
MULPD ALPHA, X4
MULPD ALPHA_2, X5
// y[i] += alpha * x[i] for the last iteration if n is odd.
MOVSD 0(R8)(SI*8), X0
MOVSD 0(R9)(SI*8), X1
MULSD X7, X0
ADDSD X0, X1
MOVSD X1, 0(R9)(SI*8)
ADDPD (Y_PTR)(IDX*8), X2 // X_i += y[i]
ADDPD 16(Y_PTR)(IDX*8), X3
ADDPD 32(Y_PTR)(IDX*8), X4
ADDPD 48(Y_PTR)(IDX*8), X5
MOVUPS X2, (DST_PTR)(IDX*8) // y[i] = X_i
MOVUPS X3, 16(DST_PTR)(IDX*8)
MOVUPS X4, 32(DST_PTR)(IDX*8)
MOVUPS X5, 48(DST_PTR)(IDX*8)
ADDQ $8, IDX // i += 8
DECQ LEN
JNZ loop // } while --LEN > 0
CMPQ TAIL, $0 // if TAIL == 0 { return }
JE end
tail_start: // Reset loop registers
MOVQ TAIL, LEN // Loop counter: LEN = TAIL
SHRQ $1, LEN // LEN = floor( TAIL / 2 )
JZ tail_one // if TAIL == 0 { goto tail }
tail_two: // do {
MOVUPS (X_PTR)(IDX*8), X2 // X2 = x[i]
MULPD ALPHA, X2 // X2 *= a
ADDPD (Y_PTR)(IDX*8), X2 // X2 += y[i]
MOVUPS X2, (DST_PTR)(IDX*8) // y[i] = X2
ADDQ $2, IDX // i += 2
DECQ LEN
JNZ tail_two // } while --LEN > 0
ANDQ $1, TAIL
JZ end // if TAIL == 0 { goto end }
tail_one:
MOVSD (X_PTR)(IDX*8), X2 // X2 = x[i]
MULSD ALPHA, X2 // X2 *= a
ADDSD (Y_PTR)(IDX*8), X2 // X2 += y[i]
MOVSD X2, (DST_PTR)(IDX*8) // y[i] = X2
end:
RET

View File

@ -38,42 +38,103 @@
#include "textflag.h"
// func DaxpyUnitaryTo(dst []float64, alpha float64, x, y []float64)
// This function assumes len(y) >= len(x) and len(dst) >= len(x).
#define X_PTR SI
#define Y_PTR DX
#define DST_PTR DI
#define IDX AX
#define LEN CX
#define TAIL BX
#define ALPHA X0
#define ALPHA_2 X1
// func AxpyUnitaryTo(dst []float64, alpha float64, x, y []float64)
TEXT ·AxpyUnitaryTo(SB), NOSPLIT, $0
MOVQ dst+0(FP), R10
MOVHPD alpha+24(FP), X7
MOVLPD alpha+24(FP), X7
MOVQ x+32(FP), R8
MOVQ x_len+40(FP), DI // n = len(x)
MOVQ y+56(FP), R9
MOVQ dst_base+0(FP), DST_PTR // DST_PTR := &dst
MOVQ x_base+32(FP), X_PTR // X_PTR := &x
MOVQ y_base+56(FP), Y_PTR // Y_PTR := &y
MOVQ x_len+40(FP), LEN // LEN = min( len(x), len(y), len(dst) )
CMPQ y_len+64(FP), LEN
CMOVQLE y_len+64(FP), LEN
CMPQ dst_len+8(FP), LEN
CMOVQLE dst_len+8(FP), LEN
MOVQ $0, SI // i = 0
SUBQ $2, DI // n -= 2
JL tail // if n < 0 goto tail
CMPQ LEN, $0
JE end // if LEN == 0 { return }
loop:
// dst[i] = alpha * x[i] + y[i] unrolled 2x.
MOVUPD 0(R8)(SI*8), X0
MOVUPD 0(R9)(SI*8), X1
MULPD X7, X0
ADDPD X0, X1
MOVUPD X1, 0(R10)(SI*8)
XORQ IDX, IDX // IDX = 0
MOVSD alpha+24(FP), ALPHA
SHUFPD $0, ALPHA, ALPHA // ALPHA := { alpha, alpha }
MOVQ Y_PTR, TAIL // Check memory alignment
ANDQ $15, TAIL // TAIL = &y % 16
JZ no_trim // if TAIL == 0 { goto no_trim }
ADDQ $2, SI // i += 2
SUBQ $2, DI // n -= 2
JGE loop // if n >= 0 goto loop
// Align on 16-byte boundary
MOVSD (X_PTR), X2 // X2 := x[0]
MULSD ALPHA, X2 // X2 *= a
ADDSD (Y_PTR), X2 // X2 += y[0]
MOVSD X2, (DST_PTR) // y[0] = X2
INCQ IDX // i++
DECQ LEN // LEN--
JZ end // if LEN == 0 { return }
tail:
ADDQ $2, DI // n += 2
JLE end // if n <= 0 goto end
no_trim:
MOVQ LEN, TAIL
ANDQ $7, TAIL // TAIL := n % 8
SHRQ $3, LEN // LEN = floor( n / 8 )
JZ tail_start // if LEN == 0 { goto tail_start }
// dst[i] = alpha * x[i] + y[i] for the last iteration if n is odd.
MOVSD 0(R8)(SI*8), X0
MOVSD 0(R9)(SI*8), X1
MULSD X7, X0
ADDSD X0, X1
MOVSD X1, 0(R10)(SI*8)
MOVUPS ALPHA, ALPHA_2 // ALPHA_2 := ALPHA for pipelining
loop: // do {
// y[i] += alpha * x[i] unrolled 8x.
MOVUPS (X_PTR)(IDX*8), X2 // X_i = x[i]
MOVUPS 16(X_PTR)(IDX*8), X3
MOVUPS 32(X_PTR)(IDX*8), X4
MOVUPS 48(X_PTR)(IDX*8), X5
MULPD ALPHA, X2 // X_i *= alpha
MULPD ALPHA_2, X3
MULPD ALPHA, X4
MULPD ALPHA_2, X5
ADDPD (Y_PTR)(IDX*8), X2 // X_i += y[i]
ADDPD 16(Y_PTR)(IDX*8), X3
ADDPD 32(Y_PTR)(IDX*8), X4
ADDPD 48(Y_PTR)(IDX*8), X5
MOVUPS X2, (DST_PTR)(IDX*8) // y[i] = X_i
MOVUPS X3, 16(DST_PTR)(IDX*8)
MOVUPS X4, 32(DST_PTR)(IDX*8)
MOVUPS X5, 48(DST_PTR)(IDX*8)
ADDQ $8, IDX // i += 8
DECQ LEN
JNZ loop // } while --LEN > 0
CMPQ TAIL, $0 // if TAIL == 0 { return }
JE end
tail_start: // Reset loop registers
MOVQ TAIL, LEN // Loop counter: LEN = TAIL
SHRQ $1, LEN // LEN = floor( TAIL / 2 )
JZ tail_one // if LEN == 0 { goto tail }
tail_two: // do {
MOVUPS (X_PTR)(IDX*8), X2 // X2 = x[i]
MULPD ALPHA, X2 // X2 *= alpha
ADDPD (Y_PTR)(IDX*8), X2 // X2 += y[i]
MOVUPS X2, (DST_PTR)(IDX*8) // y[i] = X2
ADDQ $2, IDX // i += 2
DECQ LEN
JNZ tail_two // } while --LEN > 0
ANDQ $1, TAIL
JZ end // if TAIL == 0 { goto end }
tail_one:
MOVSD (X_PTR)(IDX*8), X2 // X2 = x[i]
MULSD ALPHA, X2 // X2 *= a
ADDSD (Y_PTR)(IDX*8), X2 // X2 += y[i]
MOVSD X2, (DST_PTR)(IDX*8) // y[i] = X2
end:
RET

View File

@ -38,43 +38,76 @@
#include "textflag.h"
// func DscalInc(alpha float64, x []float64, n, incX uintptr)
#define X_PTR SI
#define LEN CX
#define TAIL BX
#define INC_X R8
#define INCx3_X R9
#define ALPHA X0
#define ALPHA_2 X1
// func ScalInc(alpha float64, x []float64, n, incX uintptr)
TEXT ·ScalInc(SB), NOSPLIT, $0
MOVHPD alpha+0(FP), X7
MOVLPD alpha+0(FP), X7
MOVQ x+8(FP), R8
MOVQ n+32(FP), DX
MOVQ incX+40(FP), R10
MOVSD alpha+0(FP), ALPHA // ALPHA = alpha
MOVQ x_base+8(FP), X_PTR // X_PTR = &x
MOVQ incX+40(FP), INC_X // INC_X = incX
SHLQ $3, INC_X // INC_X *= sizeof(float64)
MOVQ n+32(FP), LEN // LEN = n
CMPQ LEN, $0
JE end // if LEN == 0 { return }
MOVQ $0, SI
MOVQ R10, AX // nextX = incX
SHLQ $1, R10 // incX *= 2
MOVQ LEN, TAIL
ANDQ $3, TAIL // TAIL = LEN % 4
SHRQ $2, LEN // LEN = floor( LEN / 4 )
JZ tail_start // if LEN == 0 { goto tail_start }
SUBQ $2, DX // n -= 2
JL tail // if n < 0
MOVUPS ALPHA, ALPHA_2 // ALPHA_2 = ALPHA for pipelining
LEAQ (INC_X)(INC_X*2), INCx3_X // INCx3_X = INC_X * 3
loop:
// x[i] *= alpha unrolled 2x.
MOVHPD 0(R8)(SI*8), X0
MOVLPD 0(R8)(AX*8), X0
MULPD X7, X0
MOVHPD X0, 0(R8)(SI*8)
MOVLPD X0, 0(R8)(AX*8)
loop: // do { // x[i] *= alpha unrolled 4x.
MOVSD (X_PTR), X2 // X_i = x[i]
MOVSD (X_PTR)(INC_X*1), X3
MOVSD (X_PTR)(INC_X*2), X4
MOVSD (X_PTR)(INCx3_X*1), X5
ADDQ R10, SI // ix += incX
ADDQ R10, AX // nextX += incX
MULSD ALPHA, X2 // X_i *= a
MULSD ALPHA_2, X3
MULSD ALPHA, X4
MULSD ALPHA_2, X5
SUBQ $2, DX // n -= 2
JGE loop // if n >= 0 goto loop
MOVSD X2, (X_PTR) // x[i] = X_i
MOVSD X3, (X_PTR)(INC_X*1)
MOVSD X4, (X_PTR)(INC_X*2)
MOVSD X5, (X_PTR)(INCx3_X*1)
tail:
ADDQ $2, DX // n += 2
JLE end // if n <= 0
LEAQ (X_PTR)(INC_X*4), X_PTR // X_PTR = &(X_PTR[incX*4])
DECQ LEN
JNZ loop // } while --LEN > 0
CMPQ TAIL, $0
JE end // if TAIL == 0 { return }
// x[i] *= alpha for the last iteration if n is odd.
MOVSD 0(R8)(SI*8), X0
MULSD X7, X0
MOVSD X0, 0(R8)(SI*8)
tail_start: // Reset loop registers
MOVQ TAIL, LEN // Loop counter: LEN = TAIL
SHRQ $1, LEN // LEN = floor( LEN / 2 )
JZ tail_one
tail_two: // do {
MOVSD (X_PTR), X2 // X_i = x[i]
MOVSD (X_PTR)(INC_X*1), X3
MULSD ALPHA, X2 // X_i *= a
MULSD ALPHA, X3
MOVSD X2, (X_PTR) // x[i] = X_i
MOVSD X3, (X_PTR)(INC_X*1)
LEAQ (X_PTR)(INC_X*2), X_PTR // X_PTR = &(X_PTR[incX*2])
ANDQ $1, TAIL
JZ end
tail_one:
MOVSD (X_PTR), X2 // X_i = x[i]
MULSD ALPHA, X2 // X_i *= ALPHA
MOVSD X2, (X_PTR) // x[i] = X_i
end:
RET

View File

@ -38,50 +38,85 @@
#include "textflag.h"
// func DscalIncTo(dst []float64, incDst uintptr, alpha float64, x []float64, n, incX uintptr)
#define X_PTR SI
#define DST_PTR DI
#define LEN CX
#define TAIL BX
#define INC_X R8
#define INCx3_X R9
#define INC_DST R10
#define INCx3_DST R11
#define ALPHA X0
#define ALPHA_2 X1
// func ScalIncTo(dst []float64, incDst uintptr, alpha float64, x []float64, n, incX uintptr)
TEXT ·ScalIncTo(SB), NOSPLIT, $0
MOVQ dst+0(FP), R9
MOVQ incDst+24(FP), R11
MOVHPD alpha+32(FP), X7
MOVLPD alpha+32(FP), X7
MOVQ x+40(FP), R8
MOVQ n+64(FP), DX
MOVQ incX+72(FP), R10
MOVQ dst_base+0(FP), DST_PTR // DST_PTR = &dst
MOVQ incDst+24(FP), INC_DST // INC_DST = incDst
SHLQ $3, INC_DST // INC_DST *= sizeof(float64)
MOVSD alpha+32(FP), ALPHA // ALPHA = alpha
MOVQ x_base+40(FP), X_PTR // X_PTR = &x
MOVQ n+64(FP), LEN // LEN = n
MOVQ incX+72(FP), INC_X // INC_X = incX
SHLQ $3, INC_X // INC_X *= sizeof(float64)
CMPQ LEN, $0
JE end // if LEN == 0 { return }
MOVQ $0, SI
MOVQ $0, DI
MOVQ R10, AX // nextX = incX
MOVQ R11, BX // nextDst = incDst
SHLQ $1, R10 // incX *= 2
SHLQ $1, R11 // incDst *= 2
MOVQ LEN, TAIL
ANDQ $3, TAIL // TAIL = LEN % 4
SHRQ $2, LEN // LEN = floor( LEN / 4 )
JZ tail_start // if LEN == 0 { goto tail_start }
SUBQ $2, DX // n -= 2
JL tail // if n < 0
MOVUPS ALPHA, ALPHA_2 // ALPHA_2 = ALPHA for pipelining
LEAQ (INC_X)(INC_X*2), INCx3_X // INCx3_X = INC_X * 3
LEAQ (INC_DST)(INC_DST*2), INCx3_DST // INCx3_DST = INC_DST * 3
loop:
// dst[i] = alpha * x[i] unrolled 2x.
MOVHPD 0(R8)(SI*8), X0
MOVLPD 0(R8)(AX*8), X0
MULPD X7, X0
MOVHPD X0, 0(R9)(DI*8)
MOVLPD X0, 0(R9)(BX*8)
loop: // do { // x[i] *= alpha unrolled 4x.
MOVSD (X_PTR), X2 // X_i = x[i]
MOVSD (X_PTR)(INC_X*1), X3
MOVSD (X_PTR)(INC_X*2), X4
MOVSD (X_PTR)(INCx3_X*1), X5
ADDQ R10, SI // ix += incX
ADDQ R10, AX // nextX += incX
ADDQ R11, DI // idst += incDst
ADDQ R11, BX // nextDst += incDst
MULSD ALPHA, X2 // X_i *= a
MULSD ALPHA_2, X3
MULSD ALPHA, X4
MULSD ALPHA_2, X5
SUBQ $2, DX // n -= 2
JGE loop // if n >= 0 goto loop
MOVSD X2, (DST_PTR) // dst[i] = X_i
MOVSD X3, (DST_PTR)(INC_DST*1)
MOVSD X4, (DST_PTR)(INC_DST*2)
MOVSD X5, (DST_PTR)(INCx3_DST*1)
tail:
ADDQ $2, DX // n += 2
JLE end // if n <= 0
LEAQ (X_PTR)(INC_X*4), X_PTR // X_PTR = &(X_PTR[incX*4])
LEAQ (DST_PTR)(INC_DST*4), DST_PTR // DST_PTR = &(DST_PTR[incDst*4])
DECQ LEN
JNZ loop // } while --LEN > 0
CMPQ TAIL, $0
JE end // if TAIL == 0 { return }
// dst[i] = alpha * x[i] for the last iteration if n is odd.
MOVSD 0(R8)(SI*8), X0
MULSD X7, X0
MOVSD X0, 0(R9)(DI*8)
tail_start: // Reset loop registers
MOVQ TAIL, LEN // Loop counter: LEN = TAIL
SHRQ $1, LEN // LEN = floor( LEN / 2 )
JZ tail_one
tail_two:
MOVSD (X_PTR), X2 // X_i = x[i]
MOVSD (X_PTR)(INC_X*1), X3
MULSD ALPHA, X2 // X_i *= a
MULSD ALPHA, X3
MOVSD X2, (DST_PTR) // dst[i] = X_i
MOVSD X3, (DST_PTR)(INC_DST*1)
LEAQ (X_PTR)(INC_X*2), X_PTR // X_PTR = &(X_PTR[incX*2])
LEAQ (DST_PTR)(INC_DST*2), DST_PTR // DST_PTR = &(DST_PTR[incDst*2])
ANDQ $1, TAIL
JZ end
tail_one:
MOVSD (X_PTR), X2 // X_i = x[i]
MULSD ALPHA, X2 // X_i *= ALPHA
MOVSD X2, (DST_PTR) // x[i] = X_i
end:
RET

View File

@ -38,43 +38,75 @@
#include "textflag.h"
// func DscalUnitary(alpha float64, x []float64)
#define MOVDDUP_ALPHA LONG $0x44120FF2; WORD $0x0824 // @ MOVDDUP XMM0, 8[RSP]
#define X_PTR SI
#define DST_PTR DI
#define IDX AX
#define LEN CX
#define TAIL BX
#define ALPHA X0
#define ALPHA_2 X1
// func ScalUnitary(alpha float64, x []float64)
TEXT ·ScalUnitary(SB), NOSPLIT, $0
MOVHPD alpha+0(FP), X7
MOVLPD alpha+0(FP), X7
MOVQ x+8(FP), R8
MOVQ x_len+16(FP), DI // n = len(x)
MOVDDUP_ALPHA // ALPHA = { alpha, alpha }
MOVQ x_base+8(FP), X_PTR // X_PTR = &x
MOVQ x_len+16(FP), LEN // LEN = len(x)
CMPQ LEN, $0
JE end // if LEN == 0 { return }
XORQ IDX, IDX // IDX = 0
MOVQ $0, SI // i = 0
SUBQ $4, DI // n -= 4
JL tail // if n < 0 goto tail
MOVQ LEN, TAIL
ANDQ $7, TAIL // TAIL = LEN % 8
SHRQ $3, LEN // LEN = floor( LEN / 8 )
JZ tail_start // if LEN == 0 { goto tail_start }
loop:
// x[i] *= alpha unrolled 4x.
MOVUPD 0(R8)(SI*8), X0
MOVUPD 16(R8)(SI*8), X1
MULPD X7, X0
MULPD X7, X1
MOVUPD X0, 0(R8)(SI*8)
MOVUPD X1, 16(R8)(SI*8)
MOVUPS ALPHA, ALPHA_2
ADDQ $4, SI // i += 4
SUBQ $4, DI // n -= 4
JGE loop // if n >= 0 goto loop
loop: // do { // x[i] *= alpha unrolled 8x.
MOVUPS (X_PTR)(IDX*8), X2 // X_i = x[i]
MOVUPS 16(X_PTR)(IDX*8), X3
MOVUPS 32(X_PTR)(IDX*8), X4
MOVUPS 48(X_PTR)(IDX*8), X5
tail:
ADDQ $4, DI // n += 4
JZ end // if n == 0 goto end
MULPD ALPHA, X2 // X_i *= ALPHA
MULPD ALPHA_2, X3
MULPD ALPHA, X4
MULPD ALPHA_2, X5
onemore:
// x[i] *= alpha for the remaining 1-3 elements.
MOVSD 0(R8)(SI*8), X0
MULSD X7, X0
MOVSD X0, 0(R8)(SI*8)
MOVUPS X2, (X_PTR)(IDX*8) // x[i] = X_i
MOVUPS X3, 16(X_PTR)(IDX*8)
MOVUPS X4, 32(X_PTR)(IDX*8)
MOVUPS X5, 48(X_PTR)(IDX*8)
ADDQ $1, SI // i++
SUBQ $1, DI // n--
JNZ onemore // if n != 0 goto onemore
ADDQ $8, IDX // i += 8
DECQ LEN
JNZ loop // while --LEN > 0
CMPQ TAIL, $0
JE end // if TAIL == 0 { return }
tail_start: // Reset loop registers
MOVQ TAIL, LEN // Loop counter: LEN = TAIL
SHRQ $1, LEN // LEN = floor( TAIL / 2 )
JZ tail_one // if n == 0 goto end
tail_two: // do {
MOVUPS (X_PTR)(IDX*8), X2 // X_i = x[i]
MULPD ALPHA, X2 // X_i *= ALPHA
MOVUPS X2, (X_PTR)(IDX*8) // x[i] = X_i
ADDQ $2, IDX // i += 2
DECQ LEN
JNZ tail_two // while --LEN > 0
ANDQ $1, TAIL
JZ end // if TAIL == 0 { return }
tail_one:
// x[i] *= alpha for the remaining element.
MOVSD (X_PTR)(IDX*8), X2
MULSD ALPHA, X2
MOVSD X2, (X_PTR)(IDX*8)
end:
RET

View File

@ -38,45 +38,76 @@
#include "textflag.h"
// func DscalUnitaryTo(dst []float64, alpha float64, x []float64)
#define MOVDDUP_ALPHA LONG $0x44120FF2; WORD $0x2024 // @ MOVDDUP 32(SP), X0 /*XMM0, 32[RSP]*/
#define X_PTR SI
#define DST_PTR DI
#define IDX AX
#define LEN CX
#define TAIL BX
#define ALPHA X0
#define ALPHA_2 X1
// func ScalUnitaryTo(dst []float64, alpha float64, x []float64)
// This function assumes len(dst) >= len(x).
TEXT ·ScalUnitaryTo(SB), NOSPLIT, $0
MOVQ dst+0(FP), R9
MOVHPD alpha+24(FP), X7
MOVLPD alpha+24(FP), X7
MOVQ x+32(FP), R8
MOVQ x_len+40(FP), DI // n = len(x)
MOVQ x_base+32(FP), X_PTR // X_PTR = &x
MOVQ dst_base+0(FP), DST_PTR // DST_PTR = &dst
MOVDDUP_ALPHA // ALPHA = { alpha, alpha }
MOVQ x_len+40(FP), LEN // LEN = len(x)
CMPQ LEN, $0
JE end // if LEN == 0 { return }
MOVQ $0, SI // i = 0
SUBQ $4, DI // n -= 4
JL tail // if n < 0 goto tail
XORQ IDX, IDX // IDX = 0
MOVQ LEN, TAIL
ANDQ $7, TAIL // TAIL = LEN % 8
SHRQ $3, LEN // LEN = floor( LEN / 8 )
JZ tail_start // if LEN == 0 { goto tail_start }
loop:
// dst[i] = alpha * x[i] unrolled 4x.
MOVUPD 0(R8)(SI*8), X0
MOVUPD 16(R8)(SI*8), X1
MULPD X7, X0
MULPD X7, X1
MOVUPD X0, 0(R9)(SI*8)
MOVUPD X1, 16(R9)(SI*8)
MOVUPS ALPHA, ALPHA_2 // ALPHA_2 = ALPHA for pipelining
ADDQ $4, SI // i += 4
SUBQ $4, DI // n -= 4
JGE loop // if n >= 0 goto loop
loop: // do { // dst[i] = alpha * x[i] unrolled 8x.
MOVUPS (X_PTR)(IDX*8), X2 // X_i = x[i]
MOVUPS 16(X_PTR)(IDX*8), X3
MOVUPS 32(X_PTR)(IDX*8), X4
MOVUPS 48(X_PTR)(IDX*8), X5
tail:
ADDQ $4, DI // n += 4
JZ end // if n == 0 goto end
MULPD ALPHA, X2 // X_i *= ALPHA
MULPD ALPHA_2, X3
MULPD ALPHA, X4
MULPD ALPHA_2, X5
onemore:
// dst[i] = alpha * x[i] for the remaining 1-3 elements.
MOVSD 0(R8)(SI*8), X0
MULSD X7, X0
MOVSD X0, 0(R9)(SI*8)
MOVUPS X2, (DST_PTR)(IDX*8) // dst[i] = X_i
MOVUPS X3, 16(DST_PTR)(IDX*8)
MOVUPS X4, 32(DST_PTR)(IDX*8)
MOVUPS X5, 48(DST_PTR)(IDX*8)
ADDQ $1, SI // i++
SUBQ $1, DI // n--
JNZ onemore // if n != 0 goto onemore
ADDQ $8, IDX // i += 8
DECQ LEN
JNZ loop // while --LEN > 0
CMPQ TAIL, $0
JE end // if TAIL == 0 { return }
tail_start: // Reset loop counters
MOVQ TAIL, LEN // Loop counter: LEN = TAIL
SHRQ $1, LEN // LEN = floor( TAIL / 2 )
JZ tail_one // if LEN == 0 { goto tail_one }
tail_two: // do {
MOVUPS (X_PTR)(IDX*8), X2 // X_i = x[i]
MULPD ALPHA, X2 // X_i *= ALPHA
MOVUPS X2, (DST_PTR)(IDX*8) // dst[i] = X_i
ADDQ $2, IDX // i += 2
DECQ LEN
JNZ tail_two // while --LEN > 0
ANDQ $1, TAIL
JZ end // if TAIL == 0 { return }
tail_one:
MOVSD (X_PTR)(IDX*8), X2 // X_i = x[i]
MULSD ALPHA, X2 // X_i *= ALPHA
MOVSD X2, (DST_PTR)(IDX*8) // dst[i] = X_i
end:
RET

View File

@ -164,6 +164,13 @@ func UseImageWithContext(img draw.Image, gc draw2d.GraphicContext) option {
}
}
// Image returns the image the canvas is drawing to.
//
// The dimensions of the returned image must not be modified.
func (c *Canvas) Image() draw.Image {
return c.img
}
func (c *Canvas) Size() (w, h vg.Length) {
return c.w, c.h
}

View File

@ -35,6 +35,7 @@ import (
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// CallOption is an option used by Invoke to control behaviors of RPC calls.
@ -80,7 +81,11 @@ type boRetryer struct {
}
func (r *boRetryer) Retry(err error) (time.Duration, bool) {
c := grpc.Code(err)
st, ok := status.FromError(err)
if !ok {
return 0, false
}
c := st.Code()
for _, rc := range r.codes {
if c == rc {
return r.backoff.Pause(), true
@ -121,6 +126,9 @@ func (bo *Backoff) Pause() time.Duration {
if bo.Multiplier < 1 {
bo.Multiplier = 2
}
// Select a duration between zero and the current max. It might seem counterintuitive to
// have so much jitter, but https://www.awsarchitectureblog.com/2015/03/backoff.html
// argues that that is the best strategy.
d := time.Duration(rand.Int63n(int64(bo.cur)))
bo.cur = time.Duration(float64(bo.cur) * bo.Multiplier)
if bo.cur > bo.Max {

View File

@ -67,17 +67,19 @@ type AgentCheckRegistration struct {
// AgentServiceCheck is used to define a node or service level check
type AgentServiceCheck struct {
Script string `json:",omitempty"`
DockerContainerID string `json:",omitempty"`
Shell string `json:",omitempty"` // Only supported for Docker.
Interval string `json:",omitempty"`
Timeout string `json:",omitempty"`
TTL string `json:",omitempty"`
HTTP string `json:",omitempty"`
TCP string `json:",omitempty"`
Status string `json:",omitempty"`
Notes string `json:",omitempty"`
TLSSkipVerify bool `json:",omitempty"`
Script string `json:",omitempty"`
DockerContainerID string `json:",omitempty"`
Shell string `json:",omitempty"` // Only supported for Docker.
Interval string `json:",omitempty"`
Timeout string `json:",omitempty"`
TTL string `json:",omitempty"`
HTTP string `json:",omitempty"`
Header map[string][]string `json:",omitempty"`
Method string `json:",omitempty"`
TCP string `json:",omitempty"`
Status string `json:",omitempty"`
Notes string `json:",omitempty"`
TLSSkipVerify bool `json:",omitempty"`
// In Consul 0.7 and later, checks that are associated with a service
// may also contain this optional DeregisterCriticalServiceAfter field,

View File

@ -7,6 +7,7 @@ import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"log"
"net"
"net/http"
@ -369,10 +370,6 @@ func NewClient(config *Config) (*Client, error) {
config.Transport = defConfig.Transport
}
if config.HttpClient == nil {
config.HttpClient = defConfig.HttpClient
}
if config.TLSConfig.Address == "" {
config.TLSConfig.Address = defConfig.TLSConfig.Address
}
@ -434,17 +431,20 @@ func NewClient(config *Config) (*Client, error) {
// NewHttpClient returns an http client configured with the given Transport and TLS
// config.
func NewHttpClient(transport *http.Transport, tlsConf TLSConfig) (*http.Client, error) {
tlsClientConfig, err := SetupTLSConfig(&tlsConf)
if err != nil {
return nil, err
}
transport.TLSClientConfig = tlsClientConfig
client := &http.Client{
Transport: transport,
}
if transport.TLSClientConfig == nil {
tlsClientConfig, err := SetupTLSConfig(&tlsConf)
if err != nil {
return nil, err
}
transport.TLSClientConfig = tlsClientConfig
}
return client, nil
}
@ -649,6 +649,8 @@ func (c *Client) write(endpoint string, in, out interface{}, q *WriteOptions) (*
if err := decodeBody(resp, &out); err != nil {
return nil, err
}
} else if _, err := ioutil.ReadAll(resp.Body); err != nil {
return nil, err
}
return wm, nil
}

View File

@ -34,10 +34,20 @@ type Client struct {
// value to determine how many samples we keep, per node.
latencyFilterSamples map[string][]float64
// stats is used to record events that occur when updating coordinates.
stats ClientStats
// mutex enables safe concurrent access to the client.
mutex sync.RWMutex
}
// ClientStats is used to record events that occur when updating coordinates.
type ClientStats struct {
// Resets is incremented any time we reset our local coordinate because
// our calculations have resulted in an invalid state.
Resets int
}
// NewClient creates a new Client and verifies the configuration is valid.
func NewClient(config *Config) (*Client, error) {
if !(config.Dimensionality > 0) {
@ -63,11 +73,16 @@ func (c *Client) GetCoordinate() *Coordinate {
}
// SetCoordinate forces the client's coordinate to a known state.
func (c *Client) SetCoordinate(coord *Coordinate) {
func (c *Client) SetCoordinate(coord *Coordinate) error {
c.mutex.Lock()
defer c.mutex.Unlock()
if err := c.checkCoordinate(coord); err != nil {
return err
}
c.coord = coord.Clone()
return nil
}
// ForgetNode removes any client state for the given node.
@ -78,6 +93,29 @@ func (c *Client) ForgetNode(node string) {
delete(c.latencyFilterSamples, node)
}
// Stats returns a copy of stats for the client.
func (c *Client) Stats() ClientStats {
c.mutex.Lock()
defer c.mutex.Unlock()
return c.stats
}
// checkCoordinate returns an error if the coordinate isn't compatible with
// this client, or if the coordinate itself isn't valid. This assumes the mutex
// has been locked already.
func (c *Client) checkCoordinate(coord *Coordinate) error {
if !c.coord.IsCompatibleWith(coord) {
return fmt.Errorf("dimensions aren't compatible")
}
if !coord.IsValid() {
return fmt.Errorf("coordinate is invalid")
}
return nil
}
// latencyFilter applies a simple moving median filter with a new sample for
// a node. This assumes that the mutex has been locked already.
func (c *Client) latencyFilter(node string, rttSeconds float64) float64 {
@ -159,15 +197,24 @@ func (c *Client) updateGravity() {
// Update takes other, a coordinate for another node, and rtt, a round trip
// time observation for a ping to that node, and updates the estimated position of
// the client's coordinate. Returns the updated coordinate.
func (c *Client) Update(node string, other *Coordinate, rtt time.Duration) *Coordinate {
func (c *Client) Update(node string, other *Coordinate, rtt time.Duration) (*Coordinate, error) {
c.mutex.Lock()
defer c.mutex.Unlock()
if err := c.checkCoordinate(other); err != nil {
return nil, err
}
rttSeconds := c.latencyFilter(node, rtt.Seconds())
c.updateVivaldi(other, rttSeconds)
c.updateAdjustment(other, rttSeconds)
c.updateGravity()
return c.coord.Clone()
if !c.coord.IsValid() {
c.stats.Resets++
c.coord = NewCoordinate(c.config)
}
return c.coord.Clone(), nil
}
// DistanceTo returns the estimated RTT from the client's coordinate to other, the

View File

@ -72,6 +72,26 @@ func (c *Coordinate) Clone() *Coordinate {
}
}
// componentIsValid returns false if a floating point value is a NaN or an
// infinity.
func componentIsValid(f float64) bool {
return !math.IsInf(f, 0) && !math.IsNaN(f)
}
// IsValid returns false if any component of a coordinate isn't valid, per the
// componentIsValid() helper above.
func (c *Coordinate) IsValid() bool {
for i := range c.Vec {
if !componentIsValid(c.Vec[i]) {
return false
}
}
return componentIsValid(c.Error) &&
componentIsValid(c.Adjustment) &&
componentIsValid(c.Height)
}
// IsCompatibleWith checks to see if the two coordinates are compatible
// dimensionally. If this returns true then you are guaranteed to not get
// any runtime errors operating on them.
@ -122,7 +142,7 @@ func (c *Coordinate) rawDistanceTo(other *Coordinate) float64 {
// already been checked to be compatible.
func add(vec1 []float64, vec2 []float64) []float64 {
ret := make([]float64, len(vec1))
for i, _ := range ret {
for i := range ret {
ret[i] = vec1[i] + vec2[i]
}
return ret
@ -132,7 +152,7 @@ func add(vec1 []float64, vec2 []float64) []float64 {
// dimensions have already been checked to be compatible.
func diff(vec1 []float64, vec2 []float64) []float64 {
ret := make([]float64, len(vec1))
for i, _ := range ret {
for i := range ret {
ret[i] = vec1[i] - vec2[i]
}
return ret
@ -141,7 +161,7 @@ func diff(vec1 []float64, vec2 []float64) []float64 {
// mul returns vec multiplied by a scalar factor.
func mul(vec []float64, factor float64) []float64 {
ret := make([]float64, len(vec))
for i, _ := range vec {
for i := range vec {
ret[i] = vec[i] * factor
}
return ret
@ -150,7 +170,7 @@ func mul(vec []float64, factor float64) []float64 {
// magnitude computes the magnitude of the vec.
func magnitude(vec []float64) float64 {
sum := 0.0
for i, _ := range vec {
for i := range vec {
sum += vec[i] * vec[i]
}
return math.Sqrt(sum)
@ -168,7 +188,7 @@ func unitVectorAt(vec1 []float64, vec2 []float64) ([]float64, float64) {
}
// Otherwise, just return a random unit vector.
for i, _ := range ret {
for i := range ret {
ret[i] = rand.Float64() - 0.5
}
if mag := magnitude(ret); mag > zeroThreshold {

2
vendor/github.com/kr/pty/ioctl.go generated vendored
View File

@ -1,3 +1,5 @@
// +build !windows
package pty
import "syscall"

76
vendor/github.com/kr/pty/pty_dragonfly.go generated vendored Normal file
View File

@ -0,0 +1,76 @@
package pty
import (
"errors"
"os"
"strings"
"syscall"
"unsafe"
)
// same code as pty_darwin.go
func open() (pty, tty *os.File, err error) {
p, err := os.OpenFile("/dev/ptmx", os.O_RDWR, 0)
if err != nil {
return nil, nil, err
}
sname, err := ptsname(p)
if err != nil {
return nil, nil, err
}
err = grantpt(p)
if err != nil {
return nil, nil, err
}
err = unlockpt(p)
if err != nil {
return nil, nil, err
}
t, err := os.OpenFile(sname, os.O_RDWR, 0)
if err != nil {
return nil, nil, err
}
return p, t, nil
}
func grantpt(f *os.File) error {
_, err := isptmaster(f.Fd())
return err
}
func unlockpt(f *os.File) error {
_, err := isptmaster(f.Fd())
return err
}
func isptmaster(fd uintptr) (bool, error) {
err := ioctl(fd, syscall.TIOCISPTMASTER, 0)
return err == nil, err
}
var (
emptyFiodgnameArg fiodgnameArg
ioctl_FIODNAME = _IOW('f', 120, unsafe.Sizeof(emptyFiodgnameArg))
)
func ptsname(f *os.File) (string, error) {
name := make([]byte, _C_SPECNAMELEN)
fa := fiodgnameArg{Name: (*byte)(unsafe.Pointer(&name[0])), Len: _C_SPECNAMELEN, Pad_cgo_0: [4]byte{0, 0, 0, 0}}
err := ioctl(f.Fd(), ioctl_FIODNAME, uintptr(unsafe.Pointer(&fa)))
if err != nil {
return "", err
}
for i, c := range name {
if c == 0 {
s := "/dev/" + string(name[:i])
return strings.Replace(s, "ptm", "pts", -1), nil
}
}
return "", errors.New("TIOCPTYGNAME string not NUL-terminated")
}

View File

@ -1,4 +1,4 @@
// +build !linux,!darwin,!freebsd
// +build !linux,!darwin,!freebsd,!dragonfly
package pty

2
vendor/github.com/kr/pty/run.go generated vendored
View File

@ -1,3 +1,5 @@
// +build !windows
package pty
import (

17
vendor/github.com/kr/pty/types_dragonfly.go generated vendored Normal file
View File

@ -0,0 +1,17 @@
// +build ignore
package pty
/*
#define _KERNEL
#include <sys/conf.h>
#include <sys/param.h>
#include <sys/filio.h>
*/
import "C"
const (
_C_SPECNAMELEN = C.SPECNAMELEN /* max length of devicename */
)
type fiodgnameArg C.struct_fiodname_args

2
vendor/github.com/kr/pty/util.go generated vendored
View File

@ -1,3 +1,5 @@
// +build !windows
package pty
import (

14
vendor/github.com/kr/pty/ztypes_dragonfly_amd64.go generated vendored Normal file
View File

@ -0,0 +1,14 @@
// Created by cgo -godefs - DO NOT EDIT
// cgo -godefs types_dragonfly.go
package pty
const (
_C_SPECNAMELEN = 0x3f
)
type fiodgnameArg struct {
Name *byte
Len uint32
Pad_cgo_0 [4]byte
}

12
vendor/github.com/kr/pty/ztypes_mipsx.go generated vendored Normal file
View File

@ -0,0 +1,12 @@
// Created by cgo -godefs - DO NOT EDIT
// cgo -godefs types.go
// +build linux
// +build mips mipsle mips64 mips64le
package pty
type (
_C_int int32
_C_uint uint32
)

147
vendor/github.com/spf13/pflag/bool_slice.go generated vendored Normal file
View File

@ -0,0 +1,147 @@
package pflag
import (
"io"
"strconv"
"strings"
)
// -- boolSlice Value
type boolSliceValue struct {
value *[]bool
changed bool
}
func newBoolSliceValue(val []bool, p *[]bool) *boolSliceValue {
bsv := new(boolSliceValue)
bsv.value = p
*bsv.value = val
return bsv
}
// Set converts, and assigns, the comma-separated boolean argument string representation as the []bool value of this flag.
// If Set is called on a flag that already has a []bool assigned, the newly converted values will be appended.
func (s *boolSliceValue) Set(val string) error {
// remove all quote characters
rmQuote := strings.NewReplacer(`"`, "", `'`, "", "`", "")
// read flag arguments with CSV parser
boolStrSlice, err := readAsCSV(rmQuote.Replace(val))
if err != nil && err != io.EOF {
return err
}
// parse boolean values into slice
out := make([]bool, 0, len(boolStrSlice))
for _, boolStr := range boolStrSlice {
b, err := strconv.ParseBool(strings.TrimSpace(boolStr))
if err != nil {
return err
}
out = append(out, b)
}
if !s.changed {
*s.value = out
} else {
*s.value = append(*s.value, out...)
}
s.changed = true
return nil
}
// Type returns a string that uniquely represents this flag's type.
func (s *boolSliceValue) Type() string {
return "boolSlice"
}
// String defines a "native" format for this boolean slice flag value.
func (s *boolSliceValue) String() string {
boolStrSlice := make([]string, len(*s.value))
for i, b := range *s.value {
boolStrSlice[i] = strconv.FormatBool(b)
}
out, _ := writeAsCSV(boolStrSlice)
return "[" + out + "]"
}
func boolSliceConv(val string) (interface{}, error) {
val = strings.Trim(val, "[]")
// Empty string would cause a slice with one (empty) entry
if len(val) == 0 {
return []bool{}, nil
}
ss := strings.Split(val, ",")
out := make([]bool, len(ss))
for i, t := range ss {
var err error
out[i], err = strconv.ParseBool(t)
if err != nil {
return nil, err
}
}
return out, nil
}
// GetBoolSlice returns the []bool value of a flag with the given name.
func (f *FlagSet) GetBoolSlice(name string) ([]bool, error) {
val, err := f.getFlagType(name, "boolSlice", boolSliceConv)
if err != nil {
return []bool{}, err
}
return val.([]bool), nil
}
// BoolSliceVar defines a boolSlice flag with specified name, default value, and usage string.
// The argument p points to a []bool variable in which to store the value of the flag.
func (f *FlagSet) BoolSliceVar(p *[]bool, name string, value []bool, usage string) {
f.VarP(newBoolSliceValue(value, p), name, "", usage)
}
// BoolSliceVarP is like BoolSliceVar, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) BoolSliceVarP(p *[]bool, name, shorthand string, value []bool, usage string) {
f.VarP(newBoolSliceValue(value, p), name, shorthand, usage)
}
// BoolSliceVar defines a []bool flag with specified name, default value, and usage string.
// The argument p points to a []bool variable in which to store the value of the flag.
func BoolSliceVar(p *[]bool, name string, value []bool, usage string) {
CommandLine.VarP(newBoolSliceValue(value, p), name, "", usage)
}
// BoolSliceVarP is like BoolSliceVar, but accepts a shorthand letter that can be used after a single dash.
func BoolSliceVarP(p *[]bool, name, shorthand string, value []bool, usage string) {
CommandLine.VarP(newBoolSliceValue(value, p), name, shorthand, usage)
}
// BoolSlice defines a []bool flag with specified name, default value, and usage string.
// The return value is the address of a []bool variable that stores the value of the flag.
func (f *FlagSet) BoolSlice(name string, value []bool, usage string) *[]bool {
p := []bool{}
f.BoolSliceVarP(&p, name, "", value, usage)
return &p
}
// BoolSliceP is like BoolSlice, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) BoolSliceP(name, shorthand string, value []bool, usage string) *[]bool {
p := []bool{}
f.BoolSliceVarP(&p, name, shorthand, value, usage)
return &p
}
// BoolSlice defines a []bool flag with specified name, default value, and usage string.
// The return value is the address of a []bool variable that stores the value of the flag.
func BoolSlice(name string, value []bool, usage string) *[]bool {
return CommandLine.BoolSliceP(name, "", value, usage)
}
// BoolSliceP is like BoolSlice, but accepts a shorthand letter that can be used after a single dash.
func BoolSliceP(name, shorthand string, value []bool, usage string) *[]bool {
return CommandLine.BoolSliceP(name, shorthand, value, usage)
}

View File

@ -83,7 +83,9 @@ func (f *FlagSet) CountP(name, shorthand string, usage string) *int {
return p
}
// Count like Count only the flag is placed on the CommandLine isntead of a given flag set
// Count defines a count flag with specified name, default value, and usage string.
// The return value is the address of an int variable that stores the value of the flag.
// A count flag will add 1 to its value evey time it is found on the command line
func Count(name string, usage string) *int {
return CommandLine.CountP(name, "", usage)
}

363
vendor/github.com/spf13/pflag/flag.go generated vendored
View File

@ -16,9 +16,9 @@ pflag is a drop-in replacement of Go's native flag package. If you import
pflag under the name "flag" then all code should continue to function
with no changes.
import flag "github.com/ogier/pflag"
import flag "github.com/spf13/pflag"
There is one exception to this: if you directly instantiate the Flag struct
There is one exception to this: if you directly instantiate the Flag struct
there is one more field "Shorthand" that you will need to set.
Most code never instantiates this struct directly, and instead uses
functions such as String(), BoolVar(), and Var(), and is therefore
@ -134,14 +134,21 @@ type FlagSet struct {
// a custom error handler.
Usage func()
// SortFlags is used to indicate, if user wants to have sorted flags in
// help/usage messages.
SortFlags bool
name string
parsed bool
actual map[NormalizedName]*Flag
orderedActual []*Flag
sortedActual []*Flag
formal map[NormalizedName]*Flag
orderedFormal []*Flag
sortedFormal []*Flag
shorthands map[byte]*Flag
args []string // arguments after flags
argsLenAtDash int // len(args) when a '--' was located when parsing, or -1 if no --
exitOnError bool // does the program exit if there's an error?
errorHandling ErrorHandling
output io.Writer // nil means stderr; use out() accessor
interspersed bool // allow interspersed option/non-option args
@ -156,7 +163,7 @@ type Flag struct {
Value Value // value as set
DefValue string // default value (as text); for usage message
Changed bool // If the user set the value (or if left to default)
NoOptDefVal string //default value (as text); if the flag is on the command line without any options
NoOptDefVal string // default value (as text); if the flag is on the command line without any options
Deprecated string // If this flag is deprecated, this string is the new or now thing to use
Hidden bool // used by cobra.Command to allow flags to be hidden from help/usage text
ShorthandDeprecated string // If the shorthand of this flag is deprecated, this string is the new or now thing to use
@ -194,11 +201,13 @@ func sortFlags(flags map[NormalizedName]*Flag) []*Flag {
// "--getUrl" which may also be translated to "geturl" and everything will work.
func (f *FlagSet) SetNormalizeFunc(n func(f *FlagSet, name string) NormalizedName) {
f.normalizeNameFunc = n
for k, v := range f.formal {
delete(f.formal, k)
nname := f.normalizeFlagName(string(k))
f.formal[nname] = v
f.sortedFormal = f.sortedFormal[:0]
for k, v := range f.orderedFormal {
delete(f.formal, NormalizedName(v.Name))
nname := f.normalizeFlagName(v.Name)
v.Name = string(nname)
f.formal[nname] = v
f.orderedFormal[k] = v
}
}
@ -229,10 +238,25 @@ func (f *FlagSet) SetOutput(output io.Writer) {
f.output = output
}
// VisitAll visits the flags in lexicographical order, calling fn for each.
// VisitAll visits the flags in lexicographical order or
// in primordial order if f.SortFlags is false, calling fn for each.
// It visits all flags, even those not set.
func (f *FlagSet) VisitAll(fn func(*Flag)) {
for _, flag := range sortFlags(f.formal) {
if len(f.formal) == 0 {
return
}
var flags []*Flag
if f.SortFlags {
if len(f.formal) != len(f.sortedFormal) {
f.sortedFormal = sortFlags(f.formal)
}
flags = f.sortedFormal
} else {
flags = f.orderedFormal
}
for _, flag := range flags {
fn(flag)
}
}
@ -253,22 +277,39 @@ func (f *FlagSet) HasAvailableFlags() bool {
return false
}
// VisitAll visits the command-line flags in lexicographical order, calling
// fn for each. It visits all flags, even those not set.
// VisitAll visits the command-line flags in lexicographical order or
// in primordial order if f.SortFlags is false, calling fn for each.
// It visits all flags, even those not set.
func VisitAll(fn func(*Flag)) {
CommandLine.VisitAll(fn)
}
// Visit visits the flags in lexicographical order, calling fn for each.
// Visit visits the flags in lexicographical order or
// in primordial order if f.SortFlags is false, calling fn for each.
// It visits only those flags that have been set.
func (f *FlagSet) Visit(fn func(*Flag)) {
for _, flag := range sortFlags(f.actual) {
if len(f.actual) == 0 {
return
}
var flags []*Flag
if f.SortFlags {
if len(f.actual) != len(f.sortedActual) {
f.sortedActual = sortFlags(f.actual)
}
flags = f.sortedActual
} else {
flags = f.orderedActual
}
for _, flag := range flags {
fn(flag)
}
}
// Visit visits the command-line flags in lexicographical order, calling fn
// for each. It visits only those flags that have been set.
// Visit visits the command-line flags in lexicographical order or
// in primordial order if f.SortFlags is false, calling fn for each.
// It visits only those flags that have been set.
func Visit(fn func(*Flag)) {
CommandLine.Visit(fn)
}
@ -278,6 +319,22 @@ func (f *FlagSet) Lookup(name string) *Flag {
return f.lookup(f.normalizeFlagName(name))
}
// ShorthandLookup returns the Flag structure of the short handed flag,
// returning nil if none exists.
// It panics, if len(name) > 1.
func (f *FlagSet) ShorthandLookup(name string) *Flag {
if name == "" {
return nil
}
if len(name) > 1 {
msg := fmt.Sprintf("can not look up shorthand which is more than one ASCII character: %q", name)
fmt.Fprintf(f.out(), msg)
panic(msg)
}
c := name[0]
return f.shorthands[c]
}
// lookup returns the Flag structure of the named flag, returning nil if none exists.
func (f *FlagSet) lookup(name NormalizedName) *Flag {
return f.formal[name]
@ -319,7 +376,7 @@ func (f *FlagSet) MarkDeprecated(name string, usageMessage string) error {
if flag == nil {
return fmt.Errorf("flag %q does not exist", name)
}
if len(usageMessage) == 0 {
if usageMessage == "" {
return fmt.Errorf("deprecated message for flag %q must be set", name)
}
flag.Deprecated = usageMessage
@ -334,7 +391,7 @@ func (f *FlagSet) MarkShorthandDeprecated(name string, usageMessage string) erro
if flag == nil {
return fmt.Errorf("flag %q does not exist", name)
}
if len(usageMessage) == 0 {
if usageMessage == "" {
return fmt.Errorf("deprecated message for flag %q must be set", name)
}
flag.ShorthandDeprecated = usageMessage
@ -358,6 +415,12 @@ func Lookup(name string) *Flag {
return CommandLine.Lookup(name)
}
// ShorthandLookup returns the Flag structure of the short handed flag,
// returning nil if none exists.
func ShorthandLookup(name string) *Flag {
return CommandLine.ShorthandLookup(name)
}
// Set sets the value of the named flag.
func (f *FlagSet) Set(name, value string) error {
normalName := f.normalizeFlagName(name)
@ -365,17 +428,28 @@ func (f *FlagSet) Set(name, value string) error {
if !ok {
return fmt.Errorf("no such flag -%v", name)
}
err := flag.Value.Set(value)
if err != nil {
return err
var flagName string
if flag.Shorthand != "" && flag.ShorthandDeprecated == "" {
flagName = fmt.Sprintf("-%s, --%s", flag.Shorthand, flag.Name)
} else {
flagName = fmt.Sprintf("--%s", flag.Name)
}
return fmt.Errorf("invalid argument %q for %q flag: %v", value, flagName, err)
}
if f.actual == nil {
f.actual = make(map[NormalizedName]*Flag)
}
f.actual[normalName] = flag
f.orderedActual = append(f.orderedActual, flag)
flag.Changed = true
if len(flag.Deprecated) > 0 {
fmt.Fprintf(os.Stderr, "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated)
if flag.Deprecated != "" {
fmt.Fprintf(f.out(), "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated)
}
return nil
}
@ -487,31 +561,98 @@ func UnquoteUsage(flag *Flag) (name string, usage string) {
return
}
// FlagUsages Returns a string containing the usage information for all flags in
// the FlagSet
func (f *FlagSet) FlagUsages() string {
x := new(bytes.Buffer)
// Splits the string `s` on whitespace into an initial substring up to
// `i` runes in length and the remainder. Will go `slop` over `i` if
// that encompasses the entire string (which allows the caller to
// avoid short orphan words on the final line).
func wrapN(i, slop int, s string) (string, string) {
if i+slop > len(s) {
return s, ""
}
w := strings.LastIndexAny(s[:i], " \t")
if w <= 0 {
return s, ""
}
return s[:w], s[w+1:]
}
// Wraps the string `s` to a maximum width `w` with leading indent
// `i`. The first line is not indented (this is assumed to be done by
// caller). Pass `w` == 0 to do no wrapping
func wrap(i, w int, s string) string {
if w == 0 {
return s
}
// space between indent i and end of line width w into which
// we should wrap the text.
wrap := w - i
var r, l string
// Not enough space for sensible wrapping. Wrap as a block on
// the next line instead.
if wrap < 24 {
i = 16
wrap = w - i
r += "\n" + strings.Repeat(" ", i)
}
// If still not enough space then don't even try to wrap.
if wrap < 24 {
return s
}
// Try to avoid short orphan words on the final line, by
// allowing wrapN to go a bit over if that would fit in the
// remainder of the line.
slop := 5
wrap = wrap - slop
// Handle first line, which is indented by the caller (or the
// special case above)
l, s = wrapN(wrap, slop, s)
r = r + l
// Now wrap the rest
for s != "" {
var t string
t, s = wrapN(wrap, slop, s)
r = r + "\n" + strings.Repeat(" ", i) + t
}
return r
}
// FlagUsagesWrapped returns a string containing the usage information
// for all flags in the FlagSet. Wrapped to `cols` columns (0 for no
// wrapping)
func (f *FlagSet) FlagUsagesWrapped(cols int) string {
buf := new(bytes.Buffer)
lines := make([]string, 0, len(f.formal))
maxlen := 0
f.VisitAll(func(flag *Flag) {
if len(flag.Deprecated) > 0 || flag.Hidden {
if flag.Deprecated != "" || flag.Hidden {
return
}
line := ""
if len(flag.Shorthand) > 0 && len(flag.ShorthandDeprecated) == 0 {
if flag.Shorthand != "" && flag.ShorthandDeprecated == "" {
line = fmt.Sprintf(" -%s, --%s", flag.Shorthand, flag.Name)
} else {
line = fmt.Sprintf(" --%s", flag.Name)
}
varname, usage := UnquoteUsage(flag)
if len(varname) > 0 {
if varname != "" {
line += " " + varname
}
if len(flag.NoOptDefVal) > 0 {
if flag.NoOptDefVal != "" {
switch flag.Value.Type() {
case "string":
line += fmt.Sprintf("[=\"%s\"]", flag.NoOptDefVal)
@ -534,7 +675,7 @@ func (f *FlagSet) FlagUsages() string {
line += usage
if !flag.defaultIsZeroValue() {
if flag.Value.Type() == "string" {
line += fmt.Sprintf(" (default \"%s\")", flag.DefValue)
line += fmt.Sprintf(" (default %q)", flag.DefValue)
} else {
line += fmt.Sprintf(" (default %s)", flag.DefValue)
}
@ -546,10 +687,17 @@ func (f *FlagSet) FlagUsages() string {
for _, line := range lines {
sidx := strings.Index(line, "\x00")
spacing := strings.Repeat(" ", maxlen-sidx)
fmt.Fprintln(x, line[:sidx], spacing, line[sidx+1:])
// maxlen + 2 comes from + 1 for the \x00 and + 1 for the (deliberate) off-by-one in maxlen-sidx
fmt.Fprintln(buf, line[:sidx], spacing, wrap(maxlen+2, cols, line[sidx+1:]))
}
return x.String()
return buf.String()
}
// FlagUsages returns a string containing the usage information for all flags in
// the FlagSet
func (f *FlagSet) FlagUsages() string {
return f.FlagUsagesWrapped(0)
}
// PrintDefaults prints to standard error the default values of all defined command-line flags.
@ -635,16 +783,15 @@ func (f *FlagSet) VarPF(value Value, name, shorthand, usage string) *Flag {
// VarP is like Var, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) VarP(value Value, name, shorthand, usage string) {
_ = f.VarPF(value, name, shorthand, usage)
f.VarPF(value, name, shorthand, usage)
}
// AddFlag will add the flag to the FlagSet
func (f *FlagSet) AddFlag(flag *Flag) {
// Call normalizeFlagName function only once
normalizedFlagName := f.normalizeFlagName(flag.Name)
_, alreadythere := f.formal[normalizedFlagName]
if alreadythere {
_, alreadyThere := f.formal[normalizedFlagName]
if alreadyThere {
msg := fmt.Sprintf("%s flag redefined: %s", f.name, flag.Name)
fmt.Fprintln(f.out(), msg)
panic(msg) // Happens only if flags are declared with identical names
@ -655,28 +802,31 @@ func (f *FlagSet) AddFlag(flag *Flag) {
flag.Name = string(normalizedFlagName)
f.formal[normalizedFlagName] = flag
f.orderedFormal = append(f.orderedFormal, flag)
if len(flag.Shorthand) == 0 {
if flag.Shorthand == "" {
return
}
if len(flag.Shorthand) > 1 {
fmt.Fprintf(f.out(), "%s shorthand more than ASCII character: %s\n", f.name, flag.Shorthand)
panic("shorthand is more than one character")
msg := fmt.Sprintf("%q shorthand is more than one ASCII character", flag.Shorthand)
fmt.Fprintf(f.out(), msg)
panic(msg)
}
if f.shorthands == nil {
f.shorthands = make(map[byte]*Flag)
}
c := flag.Shorthand[0]
old, alreadythere := f.shorthands[c]
if alreadythere {
fmt.Fprintf(f.out(), "%s shorthand reused: %q for %s already used for %s\n", f.name, c, flag.Name, old.Name)
panic("shorthand redefinition")
used, alreadyThere := f.shorthands[c]
if alreadyThere {
msg := fmt.Sprintf("unable to redefine %q shorthand in %q flagset: it's already used for %q flag", c, f.name, used.Name)
fmt.Fprintf(f.out(), msg)
panic(msg)
}
f.shorthands[c] = flag
}
// AddFlagSet adds one FlagSet to another. If a flag is already present in f
// the flag from newSet will be ignored
// the flag from newSet will be ignored.
func (f *FlagSet) AddFlagSet(newSet *FlagSet) {
if newSet == nil {
return
@ -724,45 +874,18 @@ func (f *FlagSet) usage() {
}
}
func (f *FlagSet) setFlag(flag *Flag, value string, origArg string) error {
if err := flag.Value.Set(value); err != nil {
return f.failf("invalid argument %q for %s: %v", value, origArg, err)
}
// mark as visited for Visit()
if f.actual == nil {
f.actual = make(map[NormalizedName]*Flag)
}
f.actual[f.normalizeFlagName(flag.Name)] = flag
flag.Changed = true
if len(flag.Deprecated) > 0 {
fmt.Fprintf(os.Stderr, "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated)
}
if len(flag.ShorthandDeprecated) > 0 && containsShorthand(origArg, flag.Shorthand) {
fmt.Fprintf(os.Stderr, "Flag shorthand -%s has been deprecated, %s\n", flag.Shorthand, flag.ShorthandDeprecated)
}
return nil
}
func containsShorthand(arg, shorthand string) bool {
// filter out flags --<flag_name>
if strings.HasPrefix(arg, "-") {
return false
}
arg = strings.SplitN(arg, "=", 2)[0]
return strings.Contains(arg, shorthand)
}
func (f *FlagSet) parseLongArg(s string, args []string) (a []string, err error) {
func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []string, err error) {
a = args
name := s[2:]
if len(name) == 0 || name[0] == '-' || name[0] == '=' {
err = f.failf("bad flag syntax: %s", s)
return
}
split := strings.SplitN(name, "=", 2)
name = split[0]
flag, alreadythere := f.formal[f.normalizeFlagName(name)]
if !alreadythere {
flag, exists := f.formal[f.normalizeFlagName(name)]
if !exists {
if name == "help" { // special case for nice help message.
f.usage()
return a, ErrHelp
@ -770,11 +893,12 @@ func (f *FlagSet) parseLongArg(s string, args []string) (a []string, err error)
err = f.failf("unknown flag: --%s", name)
return
}
var value string
if len(split) == 2 {
// '--flag=arg'
value = split[1]
} else if len(flag.NoOptDefVal) > 0 {
} else if flag.NoOptDefVal != "" {
// '--flag' (arg was optional)
value = flag.NoOptDefVal
} else if len(a) > 0 {
@ -786,55 +910,68 @@ func (f *FlagSet) parseLongArg(s string, args []string) (a []string, err error)
err = f.failf("flag needs an argument: %s", s)
return
}
err = f.setFlag(flag, value, s)
err = fn(flag, value)
return
}
func (f *FlagSet) parseSingleShortArg(shorthands string, args []string) (outShorts string, outArgs []string, err error) {
func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parseFunc) (outShorts string, outArgs []string, err error) {
if strings.HasPrefix(shorthands, "test.") {
return
}
outArgs = args
outShorts = shorthands[1:]
c := shorthands[0]
flag, alreadythere := f.shorthands[c]
if !alreadythere {
flag, exists := f.shorthands[c]
if !exists {
if c == 'h' { // special case for nice help message.
f.usage()
err = ErrHelp
return
}
//TODO continue on error
err = f.failf("unknown shorthand flag: %q in -%s", c, shorthands)
return
}
var value string
if len(shorthands) > 2 && shorthands[1] == '=' {
// '-f=arg'
value = shorthands[2:]
outShorts = ""
} else if len(flag.NoOptDefVal) > 0 {
} else if flag.NoOptDefVal != "" {
// '-f' (arg was optional)
value = flag.NoOptDefVal
} else if len(shorthands) > 1 {
// '-farg'
value = shorthands[1:]
outShorts = ""
} else if len(args) > 0 {
// '-f arg'
value = args[0]
outArgs = args[1:]
} else {
// '-f' (arg was required)
err = f.failf("flag needs an argument: %q in -%s", c, shorthands)
return
}
err = f.setFlag(flag, value, shorthands)
if flag.ShorthandDeprecated != "" {
fmt.Fprintf(f.out(), "Flag shorthand -%s has been deprecated, %s\n", flag.Shorthand, flag.ShorthandDeprecated)
}
err = fn(flag, value)
return
}
func (f *FlagSet) parseShortArg(s string, args []string) (a []string, err error) {
func (f *FlagSet) parseShortArg(s string, args []string, fn parseFunc) (a []string, err error) {
a = args
shorthands := s[1:]
// "shorthands" can be a series of shorthand letters of flags (e.g. "-vvv").
for len(shorthands) > 0 {
shorthands, a, err = f.parseSingleShortArg(shorthands, args)
shorthands, a, err = f.parseSingleShortArg(shorthands, args, fn)
if err != nil {
return
}
@ -843,7 +980,7 @@ func (f *FlagSet) parseShortArg(s string, args []string) (a []string, err error)
return
}
func (f *FlagSet) parseArgs(args []string) (err error) {
func (f *FlagSet) parseArgs(args []string, fn parseFunc) (err error) {
for len(args) > 0 {
s := args[0]
args = args[1:]
@ -863,9 +1000,9 @@ func (f *FlagSet) parseArgs(args []string) (err error) {
f.args = append(f.args, args...)
break
}
args, err = f.parseLongArg(s, args)
args, err = f.parseLongArg(s, args, fn)
} else {
args, err = f.parseShortArg(s, args)
args, err = f.parseShortArg(s, args, fn)
}
if err != nil {
return
@ -880,8 +1017,43 @@ func (f *FlagSet) parseArgs(args []string) (err error) {
// The return value will be ErrHelp if -help was set but not defined.
func (f *FlagSet) Parse(arguments []string) error {
f.parsed = true
if len(arguments) < 0 {
return nil
}
f.args = make([]string, 0, len(arguments))
err := f.parseArgs(arguments)
set := func(flag *Flag, value string) error {
return f.Set(flag.Name, value)
}
err := f.parseArgs(arguments, set)
if err != nil {
switch f.errorHandling {
case ContinueOnError:
return err
case ExitOnError:
os.Exit(2)
case PanicOnError:
panic(err)
}
}
return nil
}
type parseFunc func(flag *Flag, value string) error
// ParseAll parses flag definitions from the argument list, which should not
// include the command name. The arguments for fn are flag and value. Must be
// called after all flags in the FlagSet are defined and before flags are
// accessed by the program. The return value will be ErrHelp if -help was set
// but not defined.
func (f *FlagSet) ParseAll(arguments []string, fn func(flag *Flag, value string) error) error {
f.parsed = true
f.args = make([]string, 0, len(arguments))
err := f.parseArgs(arguments, fn)
if err != nil {
switch f.errorHandling {
case ContinueOnError:
@ -907,6 +1079,14 @@ func Parse() {
CommandLine.Parse(os.Args[1:])
}
// ParseAll parses the command-line flags from os.Args[1:] and called fn for each.
// The arguments for fn are flag and value. Must be called after all flags are
// defined and before flags are accessed by the program.
func ParseAll(fn func(flag *Flag, value string) error) {
// Ignore errors; CommandLine is set for ExitOnError.
CommandLine.ParseAll(os.Args[1:], fn)
}
// SetInterspersed sets whether to support interspersed option/non-option arguments.
func SetInterspersed(interspersed bool) {
CommandLine.SetInterspersed(interspersed)
@ -920,14 +1100,15 @@ func Parsed() bool {
// CommandLine is the default set of command-line flags, parsed from os.Args.
var CommandLine = NewFlagSet(os.Args[0], ExitOnError)
// NewFlagSet returns a new, empty flag set with the specified name and
// error handling property.
// NewFlagSet returns a new, empty flag set with the specified name,
// error handling property and SortFlags set to true.
func NewFlagSet(name string, errorHandling ErrorHandling) *FlagSet {
f := &FlagSet{
name: name,
errorHandling: errorHandling,
argsLenAtDash: -1,
interspersed: true,
SortFlags: true,
}
return f
}

View File

@ -6,13 +6,10 @@ package pflag
import (
goflag "flag"
"fmt"
"reflect"
"strings"
)
var _ = fmt.Print
// flagValueWrapper implements pflag.Value around a flag.Value. The main
// difference here is the addition of the Type method that returns a string
// name of the type. As this is generally unknown, we approximate that with

View File

@ -6,8 +6,6 @@ import (
"strings"
)
var _ = strings.TrimSpace
// -- net.IP value
type ipValue net.IP

148
vendor/github.com/spf13/pflag/ip_slice.go generated vendored Normal file
View File

@ -0,0 +1,148 @@
package pflag
import (
"fmt"
"io"
"net"
"strings"
)
// -- ipSlice Value
type ipSliceValue struct {
value *[]net.IP
changed bool
}
func newIPSliceValue(val []net.IP, p *[]net.IP) *ipSliceValue {
ipsv := new(ipSliceValue)
ipsv.value = p
*ipsv.value = val
return ipsv
}
// Set converts, and assigns, the comma-separated IP argument string representation as the []net.IP value of this flag.
// If Set is called on a flag that already has a []net.IP assigned, the newly converted values will be appended.
func (s *ipSliceValue) Set(val string) error {
// remove all quote characters
rmQuote := strings.NewReplacer(`"`, "", `'`, "", "`", "")
// read flag arguments with CSV parser
ipStrSlice, err := readAsCSV(rmQuote.Replace(val))
if err != nil && err != io.EOF {
return err
}
// parse ip values into slice
out := make([]net.IP, 0, len(ipStrSlice))
for _, ipStr := range ipStrSlice {
ip := net.ParseIP(strings.TrimSpace(ipStr))
if ip == nil {
return fmt.Errorf("invalid string being converted to IP address: %s", ipStr)
}
out = append(out, ip)
}
if !s.changed {
*s.value = out
} else {
*s.value = append(*s.value, out...)
}
s.changed = true
return nil
}
// Type returns a string that uniquely represents this flag's type.
func (s *ipSliceValue) Type() string {
return "ipSlice"
}
// String defines a "native" format for this net.IP slice flag value.
func (s *ipSliceValue) String() string {
ipStrSlice := make([]string, len(*s.value))
for i, ip := range *s.value {
ipStrSlice[i] = ip.String()
}
out, _ := writeAsCSV(ipStrSlice)
return "[" + out + "]"
}
func ipSliceConv(val string) (interface{}, error) {
val = strings.Trim(val, "[]")
// Emtpy string would cause a slice with one (empty) entry
if len(val) == 0 {
return []net.IP{}, nil
}
ss := strings.Split(val, ",")
out := make([]net.IP, len(ss))
for i, sval := range ss {
ip := net.ParseIP(strings.TrimSpace(sval))
if ip == nil {
return nil, fmt.Errorf("invalid string being converted to IP address: %s", sval)
}
out[i] = ip
}
return out, nil
}
// GetIPSlice returns the []net.IP value of a flag with the given name
func (f *FlagSet) GetIPSlice(name string) ([]net.IP, error) {
val, err := f.getFlagType(name, "ipSlice", ipSliceConv)
if err != nil {
return []net.IP{}, err
}
return val.([]net.IP), nil
}
// IPSliceVar defines a ipSlice flag with specified name, default value, and usage string.
// The argument p points to a []net.IP variable in which to store the value of the flag.
func (f *FlagSet) IPSliceVar(p *[]net.IP, name string, value []net.IP, usage string) {
f.VarP(newIPSliceValue(value, p), name, "", usage)
}
// IPSliceVarP is like IPSliceVar, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) IPSliceVarP(p *[]net.IP, name, shorthand string, value []net.IP, usage string) {
f.VarP(newIPSliceValue(value, p), name, shorthand, usage)
}
// IPSliceVar defines a []net.IP flag with specified name, default value, and usage string.
// The argument p points to a []net.IP variable in which to store the value of the flag.
func IPSliceVar(p *[]net.IP, name string, value []net.IP, usage string) {
CommandLine.VarP(newIPSliceValue(value, p), name, "", usage)
}
// IPSliceVarP is like IPSliceVar, but accepts a shorthand letter that can be used after a single dash.
func IPSliceVarP(p *[]net.IP, name, shorthand string, value []net.IP, usage string) {
CommandLine.VarP(newIPSliceValue(value, p), name, shorthand, usage)
}
// IPSlice defines a []net.IP flag with specified name, default value, and usage string.
// The return value is the address of a []net.IP variable that stores the value of that flag.
func (f *FlagSet) IPSlice(name string, value []net.IP, usage string) *[]net.IP {
p := []net.IP{}
f.IPSliceVarP(&p, name, "", value, usage)
return &p
}
// IPSliceP is like IPSlice, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) IPSliceP(name, shorthand string, value []net.IP, usage string) *[]net.IP {
p := []net.IP{}
f.IPSliceVarP(&p, name, shorthand, value, usage)
return &p
}
// IPSlice defines a []net.IP flag with specified name, default value, and usage string.
// The return value is the address of a []net.IP variable that stores the value of the flag.
func IPSlice(name string, value []net.IP, usage string) *[]net.IP {
return CommandLine.IPSliceP(name, "", value, usage)
}
// IPSliceP is like IPSlice, but accepts a shorthand letter that can be used after a single dash.
func IPSliceP(name, shorthand string, value []net.IP, usage string) *[]net.IP {
return CommandLine.IPSliceP(name, shorthand, value, usage)
}

View File

@ -27,8 +27,6 @@ func (*ipNetValue) Type() string {
return "ipNet"
}
var _ = strings.TrimSpace
func newIPNetValue(val net.IPNet, p *net.IPNet) *ipNetValue {
*p = val
return (*ipNetValue)(p)

View File

@ -1,11 +1,5 @@
package pflag
import (
"fmt"
)
var _ = fmt.Fprint
// -- stringArray Value
type stringArrayValue struct {
value *[]string

View File

@ -3,12 +3,9 @@ package pflag
import (
"bytes"
"encoding/csv"
"fmt"
"strings"
)
var _ = fmt.Fprint
// -- stringSlice Value
type stringSliceValue struct {
value *[]string
@ -39,7 +36,7 @@ func writeAsCSV(vals []string) (string, error) {
return "", err
}
w.Flush()
return strings.TrimSuffix(b.String(), fmt.Sprintln()), nil
return strings.TrimSuffix(b.String(), "\n"), nil
}
func (s *stringSliceValue) Set(val string) error {

126
vendor/github.com/spf13/pflag/uint_slice.go generated vendored Normal file
View File

@ -0,0 +1,126 @@
package pflag
import (
"fmt"
"strconv"
"strings"
)
// -- uintSlice Value
type uintSliceValue struct {
value *[]uint
changed bool
}
func newUintSliceValue(val []uint, p *[]uint) *uintSliceValue {
uisv := new(uintSliceValue)
uisv.value = p
*uisv.value = val
return uisv
}
func (s *uintSliceValue) Set(val string) error {
ss := strings.Split(val, ",")
out := make([]uint, len(ss))
for i, d := range ss {
u, err := strconv.ParseUint(d, 10, 0)
if err != nil {
return err
}
out[i] = uint(u)
}
if !s.changed {
*s.value = out
} else {
*s.value = append(*s.value, out...)
}
s.changed = true
return nil
}
func (s *uintSliceValue) Type() string {
return "uintSlice"
}
func (s *uintSliceValue) String() string {
out := make([]string, len(*s.value))
for i, d := range *s.value {
out[i] = fmt.Sprintf("%d", d)
}
return "[" + strings.Join(out, ",") + "]"
}
func uintSliceConv(val string) (interface{}, error) {
val = strings.Trim(val, "[]")
// Empty string would cause a slice with one (empty) entry
if len(val) == 0 {
return []uint{}, nil
}
ss := strings.Split(val, ",")
out := make([]uint, len(ss))
for i, d := range ss {
u, err := strconv.ParseUint(d, 10, 0)
if err != nil {
return nil, err
}
out[i] = uint(u)
}
return out, nil
}
// GetUintSlice returns the []uint value of a flag with the given name.
func (f *FlagSet) GetUintSlice(name string) ([]uint, error) {
val, err := f.getFlagType(name, "uintSlice", uintSliceConv)
if err != nil {
return []uint{}, err
}
return val.([]uint), nil
}
// UintSliceVar defines a uintSlice flag with specified name, default value, and usage string.
// The argument p points to a []uint variable in which to store the value of the flag.
func (f *FlagSet) UintSliceVar(p *[]uint, name string, value []uint, usage string) {
f.VarP(newUintSliceValue(value, p), name, "", usage)
}
// UintSliceVarP is like UintSliceVar, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) UintSliceVarP(p *[]uint, name, shorthand string, value []uint, usage string) {
f.VarP(newUintSliceValue(value, p), name, shorthand, usage)
}
// UintSliceVar defines a uint[] flag with specified name, default value, and usage string.
// The argument p points to a uint[] variable in which to store the value of the flag.
func UintSliceVar(p *[]uint, name string, value []uint, usage string) {
CommandLine.VarP(newUintSliceValue(value, p), name, "", usage)
}
// UintSliceVarP is like the UintSliceVar, but accepts a shorthand letter that can be used after a single dash.
func UintSliceVarP(p *[]uint, name, shorthand string, value []uint, usage string) {
CommandLine.VarP(newUintSliceValue(value, p), name, shorthand, usage)
}
// UintSlice defines a []uint flag with specified name, default value, and usage string.
// The return value is the address of a []uint variable that stores the value of the flag.
func (f *FlagSet) UintSlice(name string, value []uint, usage string) *[]uint {
p := []uint{}
f.UintSliceVarP(&p, name, "", value, usage)
return &p
}
// UintSliceP is like UintSlice, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) UintSliceP(name, shorthand string, value []uint, usage string) *[]uint {
p := []uint{}
f.UintSliceVarP(&p, name, shorthand, value, usage)
return &p
}
// UintSlice defines a []uint flag with specified name, default value, and usage string.
// The return value is the address of a []uint variable that stores the value of the flag.
func UintSlice(name string, value []uint, usage string) *[]uint {
return CommandLine.UintSliceP(name, "", value, usage)
}
// UintSliceP is like UintSlice, but accepts a shorthand letter that can be used after a single dash.
func UintSliceP(name, shorthand string, value []uint, usage string) *[]uint {
return CommandLine.UintSliceP(name, shorthand, value, usage)
}

View File

@ -28,8 +28,7 @@ import (
// See https://cloud.google.com/appengine/docs/flexible/custom-runtimes#health_check_requests
// for details on how to do your own health checking.
//
// On App Engine Standard it ensures the server has started and is prepared to
// receive requests.
// Main is not yet supported on App Engine Standard.
//
// Main never returns.
//

View File

@ -32,8 +32,7 @@ import (
)
const (
apiPath = "/rpc_http"
defaultTicketSuffix = "/default.20150612t184001.0"
apiPath = "/rpc_http"
)
var (
@ -61,9 +60,6 @@ var (
Dial: limitDial,
},
}
defaultTicketOnce sync.Once
defaultTicket string
)
func apiURL() *url.URL {
@ -227,8 +223,6 @@ type context struct {
var contextKey = "holds a *context"
// fromContext returns the App Engine context or nil if ctx is not
// derived from an App Engine context.
func fromContext(ctx netcontext.Context) *context {
c, _ := ctx.Value(&contextKey).(*context)
return c
@ -272,24 +266,6 @@ func WithContext(parent netcontext.Context, req *http.Request) netcontext.Contex
return withContext(parent, c)
}
// DefaultTicket returns a ticket used for background context or dev_appserver.
func DefaultTicket() string {
defaultTicketOnce.Do(func() {
if IsDevAppServer() {
defaultTicket = "testapp" + defaultTicketSuffix
return
}
appID := partitionlessAppID()
escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1)
majVersion := VersionID(nil)
if i := strings.Index(majVersion, "."); i > 0 {
majVersion = majVersion[:i]
}
defaultTicket = fmt.Sprintf("%s/%s.%s.%s", escAppID, ModuleName(nil), majVersion, InstanceID())
})
return defaultTicket
}
func BackgroundContext() netcontext.Context {
ctxs.Lock()
defer ctxs.Unlock()
@ -299,7 +275,13 @@ func BackgroundContext() netcontext.Context {
}
// Compute background security ticket.
ticket := DefaultTicket()
appID := partitionlessAppID()
escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1)
majVersion := VersionID(nil)
if i := strings.Index(majVersion, "."); i > 0 {
majVersion = majVersion[:i]
}
ticket := fmt.Sprintf("%s/%s.%s.%s", escAppID, ModuleName(nil), majVersion, InstanceID())
ctxs.bg = &context{
req: &http.Request{
@ -470,7 +452,7 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message)
c := fromContext(ctx)
if c == nil {
// Give a good error message rather than a panic lower down.
return errNotAppEngineContext
return errors.New("not an App Engine context")
}
// Apply transaction modifications if we're in a transaction.
@ -493,16 +475,6 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message)
}
ticket := c.req.Header.Get(ticketHeader)
// Use a test ticket under test environment.
if ticket == "" {
if appid := ctx.Value(&appIDOverrideKey); appid != nil {
ticket = appid.(string) + defaultTicketSuffix
}
}
// Fall back to use background ticket when the request ticket is not available in Flex or dev_appserver.
if ticket == "" {
ticket = DefaultTicket()
}
req := &remotepb.Request{
ServiceName: &service,
Method: &method,
@ -578,9 +550,6 @@ var logLevelName = map[int64]string{
}
func logf(c *context, level int64, format string, args ...interface{}) {
if c == nil {
panic("not an App Engine context")
}
s := fmt.Sprintf(format, args...)
s = strings.TrimRight(s, "\n") // Remove any trailing newline characters.
c.addLogLine(&logpb.UserAppLogLine{

View File

@ -22,20 +22,14 @@ import (
var contextKey = "holds an appengine.Context"
// fromContext returns the App Engine context or nil if ctx is not
// derived from an App Engine context.
func fromContext(ctx netcontext.Context) appengine.Context {
c, _ := ctx.Value(&contextKey).(appengine.Context)
return c
}
// This is only for classic App Engine adapters.
func ClassicContextFromContext(ctx netcontext.Context) (appengine.Context, error) {
c := fromContext(ctx)
if c == nil {
return nil, errNotAppEngineContext
}
return c, nil
func ClassicContextFromContext(ctx netcontext.Context) appengine.Context {
return fromContext(ctx)
}
func withContext(parent netcontext.Context, c appengine.Context) netcontext.Context {
@ -104,7 +98,7 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message)
c := fromContext(ctx)
if c == nil {
// Give a good error message rather than a panic lower down.
return errNotAppEngineContext
return errors.New("not an App Engine context")
}
// Apply transaction modifications if we're in a transaction.

View File

@ -5,15 +5,10 @@
package internal
import (
"errors"
"os"
"github.com/golang/protobuf/proto"
netcontext "golang.org/x/net/context"
)
var errNotAppEngineContext = errors.New("not an App Engine context")
type CallOverrideFunc func(ctx netcontext.Context, service, method string, in, out proto.Message) error
var callOverrideKey = "holds []CallOverrideFunc"
@ -82,42 +77,10 @@ func Logf(ctx netcontext.Context, level int64, format string, args ...interface{
f(level, format, args...)
return
}
c := fromContext(ctx)
if c == nil {
panic(errNotAppEngineContext)
}
logf(c, level, format, args...)
logf(fromContext(ctx), level, format, args...)
}
// NamespacedContext wraps a Context to support namespaces.
func NamespacedContext(ctx netcontext.Context, namespace string) netcontext.Context {
return withNamespace(ctx, namespace)
}
// SetTestEnv sets the env variables for testing background ticket in Flex.
func SetTestEnv() func() {
var environ = []struct {
key, value string
}{
{"GAE_LONG_APP_ID", "my-app-id"},
{"GAE_MINOR_VERSION", "067924799508853122"},
{"GAE_MODULE_INSTANCE", "0"},
{"GAE_MODULE_NAME", "default"},
{"GAE_MODULE_VERSION", "20150612t184001"},
}
for _, v := range environ {
old := os.Getenv(v.key)
os.Setenv(v.key, v.value)
v.value = old
}
return func() { // Restore old environment after the test completes.
for _, v := range environ {
if v.value == "" {
os.Unsetenv(v.key)
continue
}
os.Setenv(v.key, v.value)
}
}
}

View File

@ -13,45 +13,15 @@ import (
)
func DefaultVersionHostname(ctx netcontext.Context) string {
c := fromContext(ctx)
if c == nil {
panic(errNotAppEngineContext)
}
return appengine.DefaultVersionHostname(c)
return appengine.DefaultVersionHostname(fromContext(ctx))
}
func Datacenter(_ netcontext.Context) string { return appengine.Datacenter() }
func ServerSoftware() string { return appengine.ServerSoftware() }
func InstanceID() string { return appengine.InstanceID() }
func IsDevAppServer() bool { return appengine.IsDevAppServer() }
func RequestID(ctx netcontext.Context) string { return appengine.RequestID(fromContext(ctx)) }
func Datacenter(_ netcontext.Context) string { return appengine.Datacenter() }
func ServerSoftware() string { return appengine.ServerSoftware() }
func ModuleName(ctx netcontext.Context) string { return appengine.ModuleName(fromContext(ctx)) }
func VersionID(ctx netcontext.Context) string { return appengine.VersionID(fromContext(ctx)) }
func InstanceID() string { return appengine.InstanceID() }
func IsDevAppServer() bool { return appengine.IsDevAppServer() }
func RequestID(ctx netcontext.Context) string {
c := fromContext(ctx)
if c == nil {
panic(errNotAppEngineContext)
}
return appengine.RequestID(c)
}
func ModuleName(ctx netcontext.Context) string {
c := fromContext(ctx)
if c == nil {
panic(errNotAppEngineContext)
}
return appengine.ModuleName(c)
}
func VersionID(ctx netcontext.Context) string {
c := fromContext(ctx)
if c == nil {
panic(errNotAppEngineContext)
}
return appengine.VersionID(c)
}
func fullyQualifiedAppID(ctx netcontext.Context) string {
c := fromContext(ctx)
if c == nil {
panic(errNotAppEngineContext)
}
return c.FullyQualifiedAppID()
}
func fullyQualifiedAppID(ctx netcontext.Context) string { return fromContext(ctx).FullyQualifiedAppID() }

View File

@ -23,11 +23,7 @@ const (
)
func ctxHeaders(ctx netcontext.Context) http.Header {
c := fromContext(ctx)
if c == nil {
return nil
}
return c.Request().Header
return fromContext(ctx).Request().Header
}
func DefaultVersionHostname(ctx netcontext.Context) string {

View File

@ -22,11 +22,7 @@ func Main() {
port = s
}
host := ""
if IsDevAppServer() {
host = "127.0.0.1"
}
if err := http.ListenAndServe(host+":"+port, http.HandlerFunc(handleHTTP)); err != nil {
if err := http.ListenAndServe(":"+port, http.HandlerFunc(handleHTTP)); err != nil {
log.Fatalf("http.ListenAndServe: %v", err)
}
}

202
vendor/google.golang.org/genproto/LICENSE generated vendored Normal file
View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,143 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/rpc/status.proto
/*
Package status is a generated protocol buffer package.
It is generated from these files:
google/rpc/status.proto
It has these top-level messages:
Status
*/
package status
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import google_protobuf "github.com/golang/protobuf/ptypes/any"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// The `Status` type defines a logical error model that is suitable for different
// programming environments, including REST APIs and RPC APIs. It is used by
// [gRPC](https://github.com/grpc). The error model is designed to be:
//
// - Simple to use and understand for most users
// - Flexible enough to meet unexpected needs
//
// # Overview
//
// The `Status` message contains three pieces of data: error code, error message,
// and error details. The error code should be an enum value of
// [google.rpc.Code][google.rpc.Code], but it may accept additional error codes if needed. The
// error message should be a developer-facing English message that helps
// developers *understand* and *resolve* the error. If a localized user-facing
// error message is needed, put the localized message in the error details or
// localize it in the client. The optional error details may contain arbitrary
// information about the error. There is a predefined set of error detail types
// in the package `google.rpc` which can be used for common error conditions.
//
// # Language mapping
//
// The `Status` message is the logical representation of the error model, but it
// is not necessarily the actual wire format. When the `Status` message is
// exposed in different client libraries and different wire protocols, it can be
// mapped differently. For example, it will likely be mapped to some exceptions
// in Java, but more likely mapped to some error codes in C.
//
// # Other uses
//
// The error model and the `Status` message can be used in a variety of
// environments, either with or without APIs, to provide a
// consistent developer experience across different environments.
//
// Example uses of this error model include:
//
// - Partial errors. If a service needs to return partial errors to the client,
// it may embed the `Status` in the normal response to indicate the partial
// errors.
//
// - Workflow errors. A typical workflow has multiple steps. Each step may
// have a `Status` message for error reporting purpose.
//
// - Batch operations. If a client uses batch request and batch response, the
// `Status` message should be used directly inside batch response, one for
// each error sub-response.
//
// - Asynchronous operations. If an API call embeds asynchronous operation
// results in its response, the status of those operations should be
// represented directly using the `Status` message.
//
// - Logging. If some API errors are stored in logs, the message `Status` could
// be used directly after any stripping needed for security/privacy reasons.
type Status struct {
// The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code].
Code int32 `protobuf:"varint,1,opt,name=code" json:"code,omitempty"`
// A developer-facing error message, which should be in English. Any
// user-facing error message should be localized and sent in the
// [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client.
Message string `protobuf:"bytes,2,opt,name=message" json:"message,omitempty"`
// A list of messages that carry the error details. There will be a
// common set of message types for APIs to use.
Details []*google_protobuf.Any `protobuf:"bytes,3,rep,name=details" json:"details,omitempty"`
}
func (m *Status) Reset() { *m = Status{} }
func (m *Status) String() string { return proto.CompactTextString(m) }
func (*Status) ProtoMessage() {}
func (*Status) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *Status) GetCode() int32 {
if m != nil {
return m.Code
}
return 0
}
func (m *Status) GetMessage() string {
if m != nil {
return m.Message
}
return ""
}
func (m *Status) GetDetails() []*google_protobuf.Any {
if m != nil {
return m.Details
}
return nil
}
func init() {
proto.RegisterType((*Status)(nil), "google.rpc.Status")
}
func init() { proto.RegisterFile("google/rpc/status.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 209 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4f, 0xcf, 0xcf, 0x4f,
0xcf, 0x49, 0xd5, 0x2f, 0x2a, 0x48, 0xd6, 0x2f, 0x2e, 0x49, 0x2c, 0x29, 0x2d, 0xd6, 0x2b, 0x28,
0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x82, 0x48, 0xe8, 0x15, 0x15, 0x24, 0x4b, 0x49, 0x42, 0x15, 0x81,
0x65, 0x92, 0x4a, 0xd3, 0xf4, 0x13, 0xf3, 0x2a, 0x21, 0xca, 0x94, 0xd2, 0xb8, 0xd8, 0x82, 0xc1,
0xda, 0x84, 0x84, 0xb8, 0x58, 0x92, 0xf3, 0x53, 0x52, 0x25, 0x18, 0x15, 0x18, 0x35, 0x58, 0x83,
0xc0, 0x6c, 0x21, 0x09, 0x2e, 0xf6, 0xdc, 0xd4, 0xe2, 0xe2, 0xc4, 0xf4, 0x54, 0x09, 0x26, 0x05,
0x46, 0x0d, 0xce, 0x20, 0x18, 0x57, 0x48, 0x8f, 0x8b, 0x3d, 0x25, 0xb5, 0x24, 0x31, 0x33, 0xa7,
0x58, 0x82, 0x59, 0x81, 0x59, 0x83, 0xdb, 0x48, 0x44, 0x0f, 0x6a, 0x21, 0xcc, 0x12, 0x3d, 0xc7,
0xbc, 0xca, 0x20, 0x98, 0x22, 0xa7, 0x38, 0x2e, 0xbe, 0xe4, 0xfc, 0x5c, 0x3d, 0x84, 0xa3, 0x9c,
0xb8, 0x21, 0xf6, 0x06, 0x80, 0x94, 0x07, 0x30, 0x46, 0x99, 0x43, 0xa5, 0xd2, 0xf3, 0x73, 0x12,
0xf3, 0xd2, 0xf5, 0xf2, 0x8b, 0xd2, 0xf5, 0xd3, 0x53, 0xf3, 0xc0, 0x86, 0xe9, 0x43, 0xa4, 0x12,
0x0b, 0x32, 0x8b, 0x91, 0xfc, 0x69, 0x0d, 0xa1, 0x16, 0x31, 0x31, 0x07, 0x05, 0x38, 0x27, 0xb1,
0x81, 0x55, 0x1a, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0xa4, 0x53, 0xf0, 0x7c, 0x10, 0x01, 0x00,
0x00,
}

View File

@ -35,6 +35,7 @@ package grpc
import (
"fmt"
"net"
"sync"
"golang.org/x/net/context"
@ -60,6 +61,10 @@ type BalancerConfig struct {
// use to dial to a remote load balancer server. The Balancer implementations
// can ignore this if it does not need to talk to another party securely.
DialCreds credentials.TransportCredentials
// Dialer is the custom dialer the Balancer implementation can use to dial
// to a remote load balancer server. The Balancer implementations
// can ignore this if it doesn't need to talk to remote balancer.
Dialer func(context.Context, string) (net.Conn, error)
}
// BalancerGetOptions configures a Get call.
@ -385,6 +390,9 @@ func (rr *roundRobin) Notify() <-chan []Address {
func (rr *roundRobin) Close() error {
rr.mu.Lock()
defer rr.mu.Unlock()
if rr.done {
return errBalancerClosed
}
rr.done = true
if rr.w != nil {
rr.w.Close()

109
vendor/google.golang.org/grpc/call.go generated vendored
View File

@ -43,6 +43,7 @@ import (
"google.golang.org/grpc/codes"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/stats"
"google.golang.org/grpc/status"
"google.golang.org/grpc/transport"
)
@ -72,14 +73,17 @@ func recvResponse(ctx context.Context, dopts dialOptions, t transport.ClientTran
}
}
for {
if err = recv(p, dopts.codec, stream, dopts.dc, reply, dopts.maxMsgSize, inPayload); err != nil {
if c.maxReceiveMessageSize == nil {
return Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)")
}
if err = recv(p, dopts.codec, stream, dopts.dc, reply, *c.maxReceiveMessageSize, inPayload); err != nil {
if err == io.EOF {
break
}
return
}
}
if inPayload != nil && err == io.EOF && stream.StatusCode() == codes.OK {
if inPayload != nil && err == io.EOF && stream.Status().Code() == codes.OK {
// TODO in the current implementation, inTrailer may be handled before inPayload in some cases.
// Fix the order if necessary.
dopts.copts.StatsHandler.HandleRPC(ctx, inPayload)
@ -92,11 +96,7 @@ func recvResponse(ctx context.Context, dopts dialOptions, t transport.ClientTran
}
// sendRequest writes out various information of an RPC such as Context and Message.
func sendRequest(ctx context.Context, dopts dialOptions, compressor Compressor, callHdr *transport.CallHdr, t transport.ClientTransport, args interface{}, opts *transport.Options) (_ *transport.Stream, err error) {
stream, err := t.NewStream(ctx, callHdr)
if err != nil {
return nil, err
}
func sendRequest(ctx context.Context, dopts dialOptions, compressor Compressor, c *callInfo, callHdr *transport.CallHdr, stream *transport.Stream, t transport.ClientTransport, args interface{}, opts *transport.Options) (err error) {
defer func() {
if err != nil {
// If err is connection error, t will be closed, no need to close stream here.
@ -119,7 +119,13 @@ func sendRequest(ctx context.Context, dopts dialOptions, compressor Compressor,
}
outBuf, err := encode(dopts.codec, args, compressor, cbuf, outPayload)
if err != nil {
return nil, Errorf(codes.Internal, "grpc: %v", err)
return err
}
if c.maxSendMessageSize == nil {
return Errorf(codes.Internal, "callInfo maxSendMessageSize field uninitialized(nil)")
}
if len(outBuf) > *c.maxSendMessageSize {
return Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(outBuf), *c.maxSendMessageSize)
}
err = t.Write(stream, outBuf, opts)
if err == nil && outPayload != nil {
@ -130,10 +136,10 @@ func sendRequest(ctx context.Context, dopts dialOptions, compressor Compressor,
// does not exist.) so that t.Write could get io.EOF from wait(...). Leave the following
// recvResponse to get the final status.
if err != nil && err != io.EOF {
return nil, err
return err
}
// Sent successfully.
return stream, nil
return nil
}
// Invoke sends the RPC request on the wire and returns after response is received.
@ -148,14 +154,18 @@ func Invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
func invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) (e error) {
c := defaultCallInfo
if mc, ok := cc.getMethodConfig(method); ok {
c.failFast = !mc.WaitForReady
if mc.Timeout > 0 {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, mc.Timeout)
defer cancel()
}
mc := cc.GetMethodConfig(method)
if mc.WaitForReady != nil {
c.failFast = !*mc.WaitForReady
}
if mc.Timeout != nil && *mc.Timeout >= 0 {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, *mc.Timeout)
defer cancel()
}
opts = append(cc.dopts.callOptions, opts...)
for _, o := range opts {
if err := o.before(&c); err != nil {
return toRPCErr(err)
@ -166,6 +176,10 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
o.after(&c)
}
}()
c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize)
c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize)
if EnableTracing {
c.traceInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method)
defer c.traceInfo.tr.Finish()
@ -182,26 +196,25 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
}
}()
}
ctx = newContextWithRPCInfo(ctx)
sh := cc.dopts.copts.StatsHandler
if sh != nil {
ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method})
ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast})
begin := &stats.Begin{
Client: true,
BeginTime: time.Now(),
FailFast: c.failFast,
}
sh.HandleRPC(ctx, begin)
}
defer func() {
if sh != nil {
defer func() {
end := &stats.End{
Client: true,
EndTime: time.Now(),
Error: e,
}
sh.HandleRPC(ctx, end)
}
}()
}()
}
topts := &transport.Options{
Last: true,
Delay: false,
@ -223,6 +236,9 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
if cc.dopts.cp != nil {
callHdr.SendCompress = cc.dopts.cp.Type()
}
if c.creds != nil {
callHdr.Creds = c.creds
}
gopts := BalancerGetOptions{
BlockingWait: !c.failFast,
@ -230,7 +246,7 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
t, put, err = cc.getTransport(ctx, gopts)
if err != nil {
// TODO(zhaoq): Probably revisit the error handling.
if _, ok := err.(*rpcError); ok {
if _, ok := status.FromError(err); ok {
return err
}
if err == errConnClosing || err == errConnUnavailable {
@ -245,19 +261,35 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
if c.traceInfo.tr != nil {
c.traceInfo.tr.LazyLog(&payload{sent: true, msg: args}, true)
}
stream, err = sendRequest(ctx, cc.dopts, cc.dopts.cp, callHdr, t, args, topts)
stream, err = t.NewStream(ctx, callHdr)
if err != nil {
if put != nil {
if _, ok := err.(transport.ConnectionError); ok {
// If error is connection error, transport was sending data on wire,
// and we are not sure if anything has been sent on wire.
// If error is not connection error, we are sure nothing has been sent.
updateRPCInfoInContext(ctx, rpcInfo{bytesSent: true, bytesReceived: false})
}
put()
}
if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast {
continue
}
return toRPCErr(err)
}
err = sendRequest(ctx, cc.dopts, cc.dopts.cp, &c, callHdr, stream, t, args, topts)
if err != nil {
if put != nil {
updateRPCInfoInContext(ctx, rpcInfo{
bytesSent: stream.BytesSent(),
bytesReceived: stream.BytesReceived(),
})
put()
put = nil
}
// Retry a non-failfast RPC when
// i) there is a connection error; or
// ii) the server started to drain before this RPC was initiated.
if _, ok := err.(transport.ConnectionError); ok || err == transport.ErrStreamDrain {
if c.failFast {
return toRPCErr(err)
}
if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast {
continue
}
return toRPCErr(err)
@ -265,13 +297,13 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
err = recvResponse(ctx, cc.dopts, t, &c, stream, reply)
if err != nil {
if put != nil {
updateRPCInfoInContext(ctx, rpcInfo{
bytesSent: stream.BytesSent(),
bytesReceived: stream.BytesReceived(),
})
put()
put = nil
}
if _, ok := err.(transport.ConnectionError); ok || err == transport.ErrStreamDrain {
if c.failFast {
return toRPCErr(err)
}
if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast {
continue
}
return toRPCErr(err)
@ -281,9 +313,12 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
}
t.CloseStream(stream, nil)
if put != nil {
updateRPCInfoInContext(ctx, rpcInfo{
bytesSent: stream.BytesSent(),
bytesReceived: stream.BytesReceived(),
})
put()
put = nil
}
return Errorf(stream.StatusCode(), "%s", stream.StatusDesc())
return stream.Status().Err()
}
}

View File

@ -36,7 +36,6 @@ package grpc
import (
"errors"
"fmt"
"math"
"net"
"strings"
"sync"
@ -57,8 +56,7 @@ var (
ErrClientConnClosing = errors.New("grpc: the client connection is closing")
// ErrClientConnTimeout indicates that the ClientConn cannot establish the
// underlying connections within the specified timeout.
// DEPRECATED: Please use context.DeadlineExceeded instead. This error will be
// removed in Q1 2017.
// DEPRECATED: Please use context.DeadlineExceeded instead.
ErrClientConnTimeout = errors.New("grpc: timed out when dialing")
// errNoTransportSecurity indicates that there is no transport security
@ -80,7 +78,8 @@ var (
errConnClosing = errors.New("grpc: the connection is closing")
// errConnUnavailable indicates that the connection is unavailable.
errConnUnavailable = errors.New("grpc: the connection is unavailable")
errNoAddr = errors.New("grpc: there is no address available to dial")
// errBalancerClosed indicates that the balancer is closed.
errBalancerClosed = errors.New("grpc: balancer is closed")
// minimum time to give a connection to complete
minConnectTimeout = 20 * time.Second
)
@ -88,30 +87,54 @@ var (
// dialOptions configure a Dial call. dialOptions are set by the DialOption
// values passed to Dial.
type dialOptions struct {
unaryInt UnaryClientInterceptor
streamInt StreamClientInterceptor
codec Codec
cp Compressor
dc Decompressor
bs backoffStrategy
balancer Balancer
block bool
insecure bool
timeout time.Duration
scChan <-chan ServiceConfig
copts transport.ConnectOptions
maxMsgSize int
unaryInt UnaryClientInterceptor
streamInt StreamClientInterceptor
codec Codec
cp Compressor
dc Decompressor
bs backoffStrategy
balancer Balancer
block bool
insecure bool
timeout time.Duration
scChan <-chan ServiceConfig
copts transport.ConnectOptions
callOptions []CallOption
}
const defaultClientMaxMsgSize = math.MaxInt32
const (
defaultClientMaxReceiveMessageSize = 1024 * 1024 * 4
defaultClientMaxSendMessageSize = 1024 * 1024 * 4
)
// DialOption configures how we set up the connection.
type DialOption func(*dialOptions)
// WithMaxMsgSize returns a DialOption which sets the maximum message size the client can receive.
func WithMaxMsgSize(s int) DialOption {
// WithInitialWindowSize returns a DialOption which sets the value for initial window size on a stream.
// The lower bound for window size is 64K and any value smaller than that will be ignored.
func WithInitialWindowSize(s int32) DialOption {
return func(o *dialOptions) {
o.maxMsgSize = s
o.copts.InitialWindowSize = s
}
}
// WithInitialConnWindowSize returns a DialOption which sets the value for initial window size on a connection.
// The lower bound for window size is 64K and any value smaller than that will be ignored.
func WithInitialConnWindowSize(s int32) DialOption {
return func(o *dialOptions) {
o.copts.InitialConnWindowSize = s
}
}
// WithMaxMsgSize returns a DialOption which sets the maximum message size the client can receive. Deprecated: use WithDefaultCallOptions(MaxCallRecvMsgSize(s)) instead.
func WithMaxMsgSize(s int) DialOption {
return WithDefaultCallOptions(MaxCallRecvMsgSize(s))
}
// WithDefaultCallOptions returns a DialOption which sets the default CallOptions for calls over the connection.
func WithDefaultCallOptions(cos ...CallOption) DialOption {
return func(o *dialOptions) {
o.callOptions = append(o.callOptions, cos...)
}
}
@ -206,7 +229,7 @@ func WithTransportCredentials(creds credentials.TransportCredentials) DialOption
}
// WithPerRPCCredentials returns a DialOption which sets
// credentials which will place auth state on each outbound RPC.
// credentials and places auth state on each outbound RPC.
func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption {
return func(o *dialOptions) {
o.copts.PerRPCCredentials = append(o.copts.PerRPCCredentials, creds)
@ -243,7 +266,7 @@ func WithStatsHandler(h stats.Handler) DialOption {
}
}
// FailOnNonTempDialError returns a DialOption that specified if gRPC fails on non-temporary dial errors.
// FailOnNonTempDialError returns a DialOption that specifies if gRPC fails on non-temporary dial errors.
// If f is true, and dialer returns a non-temporary error, gRPC will fail the connection to the network
// address and won't try to reconnect.
// The default value of FailOnNonTempDialError is false.
@ -297,22 +320,29 @@ func Dial(target string, opts ...DialOption) (*ClientConn, error) {
}
// DialContext creates a client connection to the given target. ctx can be used to
// cancel or expire the pending connecting. Once this function returns, the
// cancel or expire the pending connection. Once this function returns, the
// cancellation and expiration of ctx will be noop. Users should call ClientConn.Close
// to terminate all the pending operations after this function returns.
// This is the EXPERIMENTAL API.
func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) {
cc := &ClientConn{
target: target,
conns: make(map[Address]*addrConn),
}
cc.ctx, cc.cancel = context.WithCancel(context.Background())
cc.dopts.maxMsgSize = defaultClientMaxMsgSize
for _, opt := range opts {
opt(&cc.dopts)
}
cc.mkp = cc.dopts.copts.KeepaliveParams
if cc.dopts.copts.Dialer == nil {
cc.dopts.copts.Dialer = newProxyDialer(
func(ctx context.Context, addr string) (net.Conn, error) {
return dialContext(ctx, "tcp", addr)
},
)
}
grpcUA := "grpc-go/" + Version
if cc.dopts.copts.UserAgent != "" {
cc.dopts.copts.UserAgent += " " + grpcUA
} else {
@ -337,15 +367,16 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
}
}()
scSet := false
if cc.dopts.scChan != nil {
// Wait for the initial service config.
// Try to get an initial service config.
select {
case sc, ok := <-cc.dopts.scChan:
if ok {
cc.sc = sc
scSet = true
}
case <-ctx.Done():
return nil, ctx.Err()
default:
}
}
// Set defaults.
@ -361,53 +392,44 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
} else if cc.dopts.insecure && cc.dopts.copts.Authority != "" {
cc.authority = cc.dopts.copts.Authority
} else {
colonPos := strings.LastIndex(target, ":")
if colonPos == -1 {
colonPos = len(target)
}
cc.authority = target[:colonPos]
cc.authority = target
}
var ok bool
waitC := make(chan error, 1)
go func() {
var addrs []Address
defer close(waitC)
if cc.dopts.balancer == nil && cc.sc.LB != nil {
cc.dopts.balancer = cc.sc.LB
}
if cc.dopts.balancer == nil {
// Connect to target directly if balancer is nil.
addrs = append(addrs, Address{Addr: target})
} else {
if cc.dopts.balancer != nil {
var credsClone credentials.TransportCredentials
if creds != nil {
credsClone = creds.Clone()
}
config := BalancerConfig{
DialCreds: credsClone,
Dialer: cc.dopts.copts.Dialer,
}
if err := cc.dopts.balancer.Start(target, config); err != nil {
waitC <- err
return
}
ch := cc.dopts.balancer.Notify()
if ch == nil {
// There is no name resolver installed.
addrs = append(addrs, Address{Addr: target})
} else {
addrs, ok = <-ch
if !ok || len(addrs) == 0 {
waitC <- errNoAddr
return
if ch != nil {
if cc.dopts.block {
doneChan := make(chan struct{})
go cc.lbWatcher(doneChan)
<-doneChan
} else {
go cc.lbWatcher(nil)
}
}
}
for _, a := range addrs {
if err := cc.resetAddrConn(a, false, nil); err != nil {
waitC <- err
return
}
}
close(waitC)
// No balancer, or no resolver within the balancer. Connect directly.
if err := cc.resetAddrConn(Address{Addr: target}, cc.dopts.block, nil); err != nil {
waitC <- err
return
}
}()
select {
case <-ctx.Done():
@ -417,16 +439,21 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
return nil, err
}
}
// If balancer is nil or balancer.Notify() is nil, ok will be false here.
// The lbWatcher goroutine will not be created.
if ok {
go cc.lbWatcher()
if cc.dopts.scChan != nil && !scSet {
// Blocking wait for the initial service config.
select {
case sc, ok := <-cc.dopts.scChan:
if ok {
cc.sc = sc
}
case <-ctx.Done():
return nil, ctx.Err()
}
}
if cc.dopts.scChan != nil {
go cc.scWatcher()
}
return cc, nil
}
@ -475,9 +502,14 @@ type ClientConn struct {
mu sync.RWMutex
sc ServiceConfig
conns map[Address]*addrConn
// Keepalive parameter can be udated if a GoAway is received.
mkp keepalive.ClientParameters
}
func (cc *ClientConn) lbWatcher() {
// lbWatcher watches the Notify channel of the balancer in cc and manages
// connections accordingly. If doneChan is not nil, it is closed after the
// first successfull connection is made.
func (cc *ClientConn) lbWatcher(doneChan chan struct{}) {
for addrs := range cc.dopts.balancer.Notify() {
var (
add []Address // Addresses need to setup connections.
@ -504,7 +536,15 @@ func (cc *ClientConn) lbWatcher() {
}
cc.mu.Unlock()
for _, a := range add {
cc.resetAddrConn(a, true, nil)
if doneChan != nil {
err := cc.resetAddrConn(a, true, nil)
if err == nil {
close(doneChan)
doneChan = nil
}
} else {
cc.resetAddrConn(a, false, nil)
}
}
for _, c := range del {
c.tearDown(errConnDrain)
@ -533,12 +573,15 @@ func (cc *ClientConn) scWatcher() {
// resetAddrConn creates an addrConn for addr and adds it to cc.conns.
// If there is an old addrConn for addr, it will be torn down, using tearDownErr as the reason.
// If tearDownErr is nil, errConnDrain will be used instead.
func (cc *ClientConn) resetAddrConn(addr Address, skipWait bool, tearDownErr error) error {
func (cc *ClientConn) resetAddrConn(addr Address, block bool, tearDownErr error) error {
ac := &addrConn{
cc: cc,
addr: addr,
dopts: cc.dopts,
}
cc.mu.RLock()
ac.dopts.copts.KeepaliveParams = cc.mkp
cc.mu.RUnlock()
ac.ctx, ac.cancel = context.WithCancel(cc.ctx)
ac.stateCV = sync.NewCond(&ac.mu)
if EnableTracing {
@ -583,8 +626,7 @@ func (cc *ClientConn) resetAddrConn(addr Address, skipWait bool, tearDownErr err
stale.tearDown(tearDownErr)
}
}
// skipWait may overwrite the decision in ac.dopts.block.
if ac.dopts.block && !skipWait {
if block {
if err := ac.resetTransport(false); err != nil {
if err != errConnClosing {
// Tear down ac and delete it from cc.conns.
@ -617,12 +659,23 @@ func (cc *ClientConn) resetAddrConn(addr Address, skipWait bool, tearDownErr err
return nil
}
// TODO: Avoid the locking here.
func (cc *ClientConn) getMethodConfig(method string) (m MethodConfig, ok bool) {
// GetMethodConfig gets the method config of the input method.
// If there's an exact match for input method (i.e. /service/method), we return
// the corresponding MethodConfig.
// If there isn't an exact match for the input method, we look for the default config
// under the service (i.e /service/). If there is a default MethodConfig for
// the serivce, we return it.
// Otherwise, we return an empty MethodConfig.
func (cc *ClientConn) GetMethodConfig(method string) MethodConfig {
// TODO: Avoid the locking here.
cc.mu.RLock()
defer cc.mu.RUnlock()
m, ok = cc.sc.Methods[method]
return
m, ok := cc.sc.Methods[method]
if !ok {
i := strings.LastIndex(method, "/")
m, _ = cc.sc.Methods[method[:i+1]]
}
return m
}
func (cc *ClientConn) getTransport(ctx context.Context, opts BalancerGetOptions) (transport.ClientTransport, func(), error) {
@ -663,6 +716,7 @@ func (cc *ClientConn) getTransport(ctx context.Context, opts BalancerGetOptions)
}
if !ok {
if put != nil {
updateRPCInfoInContext(ctx, rpcInfo{bytesSent: false, bytesReceived: false})
put()
}
return nil, nil, errConnClosing
@ -670,6 +724,7 @@ func (cc *ClientConn) getTransport(ctx context.Context, opts BalancerGetOptions)
t, err := ac.wait(ctx, cc.dopts.balancer != nil, !opts.BlockingWait)
if err != nil {
if put != nil {
updateRPCInfoInContext(ctx, rpcInfo{bytesSent: false, bytesReceived: false})
put()
}
return nil, nil, err
@ -721,6 +776,20 @@ type addrConn struct {
tearDownErr error
}
// adjustParams updates parameters used to create transports upon
// receiving a GoAway.
func (ac *addrConn) adjustParams(r transport.GoAwayReason) {
switch r {
case transport.TooManyPings:
v := 2 * ac.dopts.copts.KeepaliveParams.Time
ac.cc.mu.Lock()
if v > ac.cc.mkp.Time {
ac.cc.mkp.Time = v
}
ac.cc.mu.Unlock()
}
}
// printf records an event in ac's event log, unless ac has been closed.
// REQUIRES ac.mu is held.
func (ac *addrConn) printf(format string, a ...interface{}) {
@ -829,11 +898,14 @@ func (ac *addrConn) resetTransport(closeTransport bool) error {
}
ac.mu.Unlock()
closeTransport = false
timer := time.NewTimer(sleepTime - time.Since(connectTime))
select {
case <-time.After(sleepTime - time.Since(connectTime)):
case <-timer.C:
case <-ac.ctx.Done():
timer.Stop()
return ac.ctx.Err()
}
timer.Stop()
continue
}
ac.mu.Lock()
@ -877,6 +949,7 @@ func (ac *addrConn) transportMonitor() {
}
return
case <-t.GoAway():
ac.adjustParams(t.GetGoAwayReason())
// If GoAway happens without any network I/O error, ac is closed without shutting down the
// underlying transport (the transport will be closed when all the pending RPCs finished or
// failed.).
@ -885,9 +958,9 @@ func (ac *addrConn) transportMonitor() {
// In both cases, a new ac is created.
select {
case <-t.Error():
ac.cc.resetAddrConn(ac.addr, true, errNetworkIO)
ac.cc.resetAddrConn(ac.addr, false, errNetworkIO)
default:
ac.cc.resetAddrConn(ac.addr, true, errConnDrain)
ac.cc.resetAddrConn(ac.addr, false, errConnDrain)
}
return
case <-t.Error():
@ -896,7 +969,8 @@ func (ac *addrConn) transportMonitor() {
t.Close()
return
case <-t.GoAway():
ac.cc.resetAddrConn(ac.addr, true, errNetworkIO)
ac.adjustParams(t.GetGoAwayReason())
ac.cc.resetAddrConn(ac.addr, false, errNetworkIO)
return
default:
}

119
vendor/google.golang.org/grpc/codec.go generated vendored Normal file
View File

@ -0,0 +1,119 @@
/*
*
* Copyright 2014, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
package grpc
import (
"math"
"sync"
"github.com/golang/protobuf/proto"
)
// Codec defines the interface gRPC uses to encode and decode messages.
// Note that implementations of this interface must be thread safe;
// a Codec's methods can be called from concurrent goroutines.
type Codec interface {
// Marshal returns the wire format of v.
Marshal(v interface{}) ([]byte, error)
// Unmarshal parses the wire format into v.
Unmarshal(data []byte, v interface{}) error
// String returns the name of the Codec implementation. The returned
// string will be used as part of content type in transmission.
String() string
}
// protoCodec is a Codec implementation with protobuf. It is the default codec for gRPC.
type protoCodec struct {
}
type cachedProtoBuffer struct {
lastMarshaledSize uint32
proto.Buffer
}
func capToMaxInt32(val int) uint32 {
if val > math.MaxInt32 {
return uint32(math.MaxInt32)
}
return uint32(val)
}
func (p protoCodec) marshal(v interface{}, cb *cachedProtoBuffer) ([]byte, error) {
protoMsg := v.(proto.Message)
newSlice := make([]byte, 0, cb.lastMarshaledSize)
cb.SetBuf(newSlice)
cb.Reset()
if err := cb.Marshal(protoMsg); err != nil {
return nil, err
}
out := cb.Bytes()
cb.lastMarshaledSize = capToMaxInt32(len(out))
return out, nil
}
func (p protoCodec) Marshal(v interface{}) ([]byte, error) {
cb := protoBufferPool.Get().(*cachedProtoBuffer)
out, err := p.marshal(v, cb)
// put back buffer and lose the ref to the slice
cb.SetBuf(nil)
protoBufferPool.Put(cb)
return out, err
}
func (p protoCodec) Unmarshal(data []byte, v interface{}) error {
cb := protoBufferPool.Get().(*cachedProtoBuffer)
cb.SetBuf(data)
v.(proto.Message).Reset()
err := cb.Unmarshal(v.(proto.Message))
cb.SetBuf(nil)
protoBufferPool.Put(cb)
return err
}
func (protoCodec) String() string {
return "proto"
}
var (
protoBufferPool = &sync.Pool{
New: func() interface{} {
return &cachedProtoBuffer{
Buffer: proto.Buffer{},
lastMarshaledSize: 16,
}
},
}
)

View File

@ -44,7 +44,7 @@ const (
// OK is returned on success.
OK Code = 0
// Canceled indicates the operation was cancelled (typically by the caller).
// Canceled indicates the operation was canceled (typically by the caller).
Canceled Code = 1
// Unknown error. An example of where this error may be returned is

View File

@ -102,6 +102,10 @@ type TransportCredentials interface {
// authentication protocol on rawConn for clients. It returns the authenticated
// connection and the corresponding auth information about the connection.
// Implementations must use the provided context to implement timely cancellation.
// gRPC will try to reconnect if the error returned is a temporary error
// (io.EOF, context.DeadlineExceeded or err.Temporary() == true).
// If the returned error is a wrapper error, implementations should make sure that
// the error implements Temporary() to have the correct retry behaviors.
ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error)
// ServerHandshake does the authentication handshake for servers. It returns
// the authenticated connection and the corresponding auth information about
@ -192,14 +196,14 @@ func NewTLS(c *tls.Config) TransportCredentials {
return tc
}
// NewClientTLSFromCert constructs a TLS from the input certificate for client.
// NewClientTLSFromCert constructs TLS credentials from the input certificate for client.
// serverNameOverride is for testing only. If set to a non empty string,
// it will override the virtual host name of authority (e.g. :authority header field) in requests.
func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials {
return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp})
}
// NewClientTLSFromFile constructs a TLS from the input certificate file for client.
// NewClientTLSFromFile constructs TLS credentials from the input certificate file for client.
// serverNameOverride is for testing only. If set to a non empty string,
// it will override the virtual host name of authority (e.g. :authority header field) in requests.
func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) {
@ -214,12 +218,12 @@ func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredent
return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}), nil
}
// NewServerTLSFromCert constructs a TLS from the input certificate for server.
// NewServerTLSFromCert constructs TLS credentials from the input certificate for server.
func NewServerTLSFromCert(cert *tls.Certificate) TransportCredentials {
return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}})
}
// NewServerTLSFromFile constructs a TLS from the input certificate file and key
// NewServerTLSFromFile constructs TLS credentials from the input certificate file and key
// file for server.
func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error) {
cert, err := tls.LoadX509KeyPair(certFile, keyFile)

View File

@ -37,6 +37,7 @@ package oauth
import (
"fmt"
"io/ioutil"
"sync"
"golang.org/x/net/context"
"golang.org/x/oauth2"
@ -132,20 +133,27 @@ func NewComputeEngine() credentials.PerRPCCredentials {
// serviceAccount represents PerRPCCredentials via JWT signing key.
type serviceAccount struct {
mu sync.Mutex
config *jwt.Config
t *oauth2.Token
}
func (s serviceAccount) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
token, err := s.config.TokenSource(ctx).Token()
if err != nil {
return nil, err
func (s *serviceAccount) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
s.mu.Lock()
defer s.mu.Unlock()
if !s.t.Valid() {
var err error
s.t, err = s.config.TokenSource(ctx).Token()
if err != nil {
return nil, err
}
}
return map[string]string{
"authorization": token.TokenType + " " + token.AccessToken,
"authorization": s.t.TokenType + " " + s.t.AccessToken,
}, nil
}
func (s serviceAccount) RequireTransportSecurity() bool {
func (s *serviceAccount) RequireTransportSecurity() bool {
return true
}
@ -156,7 +164,7 @@ func NewServiceAccountFromKey(jsonKey []byte, scope ...string) (credentials.PerR
if err != nil {
return nil, err
}
return serviceAccount{config: config}, nil
return &serviceAccount{config: config}, nil
}
// NewServiceAccountFromFile constructs the PerRPCCredentials using the JSON key file

113
vendor/google.golang.org/grpc/go16.go generated vendored Normal file
View File

@ -0,0 +1,113 @@
// +build go1.6,!go1.7
/*
* Copyright 2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
package grpc
import (
"fmt"
"io"
"net"
"net/http"
"os"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/grpc/transport"
"golang.org/x/net/context"
)
// dialContext connects to the address on the named network.
func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
return (&net.Dialer{Cancel: ctx.Done()}).Dial(network, address)
}
func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error {
req.Cancel = ctx.Done()
if err := req.Write(conn); err != nil {
return fmt.Errorf("failed to write the HTTP request: %v", err)
}
return nil
}
// toRPCErr converts an error into an error from the status package.
func toRPCErr(err error) error {
if _, ok := status.FromError(err); ok {
return err
}
switch e := err.(type) {
case transport.StreamError:
return status.Error(e.Code, e.Desc)
case transport.ConnectionError:
return status.Error(codes.Internal, e.Desc)
default:
switch err {
case context.DeadlineExceeded:
return status.Error(codes.DeadlineExceeded, err.Error())
case context.Canceled:
return status.Error(codes.Canceled, err.Error())
case ErrClientConnClosing:
return status.Error(codes.FailedPrecondition, err.Error())
}
}
return status.Error(codes.Unknown, err.Error())
}
// convertCode converts a standard Go error into its canonical code. Note that
// this is only used to translate the error returned by the server applications.
func convertCode(err error) codes.Code {
switch err {
case nil:
return codes.OK
case io.EOF:
return codes.OutOfRange
case io.ErrClosedPipe, io.ErrNoProgress, io.ErrShortBuffer, io.ErrShortWrite, io.ErrUnexpectedEOF:
return codes.FailedPrecondition
case os.ErrInvalid:
return codes.InvalidArgument
case context.Canceled:
return codes.Canceled
case context.DeadlineExceeded:
return codes.DeadlineExceeded
}
switch {
case os.IsExist(err):
return codes.AlreadyExists
case os.IsNotExist(err):
return codes.NotFound
case os.IsPermission(err):
return codes.PermissionDenied
}
return codes.Unknown
}

113
vendor/google.golang.org/grpc/go17.go generated vendored Normal file
View File

@ -0,0 +1,113 @@
// +build go1.7
/*
* Copyright 2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
package grpc
import (
"context"
"io"
"net"
"net/http"
"os"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/grpc/transport"
netctx "golang.org/x/net/context"
)
// dialContext connects to the address on the named network.
func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
return (&net.Dialer{}).DialContext(ctx, network, address)
}
func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error {
req = req.WithContext(ctx)
if err := req.Write(conn); err != nil {
return err
}
return nil
}
// toRPCErr converts an error into an error from the status package.
func toRPCErr(err error) error {
if _, ok := status.FromError(err); ok {
return err
}
switch e := err.(type) {
case transport.StreamError:
return status.Error(e.Code, e.Desc)
case transport.ConnectionError:
return status.Error(codes.Internal, e.Desc)
default:
switch err {
case context.DeadlineExceeded, netctx.DeadlineExceeded:
return status.Error(codes.DeadlineExceeded, err.Error())
case context.Canceled, netctx.Canceled:
return status.Error(codes.Canceled, err.Error())
case ErrClientConnClosing:
return status.Error(codes.FailedPrecondition, err.Error())
}
}
return status.Error(codes.Unknown, err.Error())
}
// convertCode converts a standard Go error into its canonical code. Note that
// this is only used to translate the error returned by the server applications.
func convertCode(err error) codes.Code {
switch err {
case nil:
return codes.OK
case io.EOF:
return codes.OutOfRange
case io.ErrClosedPipe, io.ErrNoProgress, io.ErrShortBuffer, io.ErrShortWrite, io.ErrUnexpectedEOF:
return codes.FailedPrecondition
case os.ErrInvalid:
return codes.InvalidArgument
case context.Canceled, netctx.Canceled:
return codes.Canceled
case context.DeadlineExceeded, netctx.DeadlineExceeded:
return codes.DeadlineExceeded
}
switch {
case os.IsExist(err):
return codes.AlreadyExists
case os.IsNotExist(err):
return codes.NotFound
case os.IsPermission(err):
return codes.PermissionDenied
}
return codes.Unknown
}

765
vendor/google.golang.org/grpc/grpclb.go generated vendored Normal file
View File

@ -0,0 +1,765 @@
/*
*
* Copyright 2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
package grpc
import (
"errors"
"fmt"
"math/rand"
"net"
"sync"
"time"
"golang.org/x/net/context"
"google.golang.org/grpc/codes"
lbpb "google.golang.org/grpc/grpclb/grpc_lb_v1"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/naming"
)
// Client API for LoadBalancer service.
// Mostly copied from generated pb.go file.
// To avoid circular dependency.
type loadBalancerClient struct {
cc *ClientConn
}
func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...CallOption) (*balanceLoadClientStream, error) {
desc := &StreamDesc{
StreamName: "BalanceLoad",
ServerStreams: true,
ClientStreams: true,
}
stream, err := NewClientStream(ctx, desc, c.cc, "/grpc.lb.v1.LoadBalancer/BalanceLoad", opts...)
if err != nil {
return nil, err
}
x := &balanceLoadClientStream{stream}
return x, nil
}
type balanceLoadClientStream struct {
ClientStream
}
func (x *balanceLoadClientStream) Send(m *lbpb.LoadBalanceRequest) error {
return x.ClientStream.SendMsg(m)
}
func (x *balanceLoadClientStream) Recv() (*lbpb.LoadBalanceResponse, error) {
m := new(lbpb.LoadBalanceResponse)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// AddressType indicates the address type returned by name resolution.
type AddressType uint8
const (
// Backend indicates the server is a backend server.
Backend AddressType = iota
// GRPCLB indicates the server is a grpclb load balancer.
GRPCLB
)
// AddrMetadataGRPCLB contains the information the name resolver for grpclb should provide. The
// name resolver used by the grpclb balancer is required to provide this type of metadata in
// its address updates.
type AddrMetadataGRPCLB struct {
// AddrType is the type of server (grpc load balancer or backend).
AddrType AddressType
// ServerName is the name of the grpc load balancer. Used for authentication.
ServerName string
}
// NewGRPCLBBalancer creates a grpclb load balancer.
func NewGRPCLBBalancer(r naming.Resolver) Balancer {
return &balancer{
r: r,
}
}
type remoteBalancerInfo struct {
addr string
// the server name used for authentication with the remote LB server.
name string
}
// grpclbAddrInfo consists of the information of a backend server.
type grpclbAddrInfo struct {
addr Address
connected bool
// dropForRateLimiting indicates whether this particular request should be
// dropped by the client for rate limiting.
dropForRateLimiting bool
// dropForLoadBalancing indicates whether this particular request should be
// dropped by the client for load balancing.
dropForLoadBalancing bool
}
type balancer struct {
r naming.Resolver
target string
mu sync.Mutex
seq int // a sequence number to make sure addrCh does not get stale addresses.
w naming.Watcher
addrCh chan []Address
rbs []remoteBalancerInfo
addrs []*grpclbAddrInfo
next int
waitCh chan struct{}
done bool
expTimer *time.Timer
rand *rand.Rand
clientStats lbpb.ClientStats
}
func (b *balancer) watchAddrUpdates(w naming.Watcher, ch chan []remoteBalancerInfo) error {
updates, err := w.Next()
if err != nil {
grpclog.Printf("grpclb: failed to get next addr update from watcher: %v", err)
return err
}
b.mu.Lock()
defer b.mu.Unlock()
if b.done {
return ErrClientConnClosing
}
for _, update := range updates {
switch update.Op {
case naming.Add:
var exist bool
for _, v := range b.rbs {
// TODO: Is the same addr with different server name a different balancer?
if update.Addr == v.addr {
exist = true
break
}
}
if exist {
continue
}
md, ok := update.Metadata.(*AddrMetadataGRPCLB)
if !ok {
// TODO: Revisit the handling here and may introduce some fallback mechanism.
grpclog.Printf("The name resolution contains unexpected metadata %v", update.Metadata)
continue
}
switch md.AddrType {
case Backend:
// TODO: Revisit the handling here and may introduce some fallback mechanism.
grpclog.Printf("The name resolution does not give grpclb addresses")
continue
case GRPCLB:
b.rbs = append(b.rbs, remoteBalancerInfo{
addr: update.Addr,
name: md.ServerName,
})
default:
grpclog.Printf("Received unknow address type %d", md.AddrType)
continue
}
case naming.Delete:
for i, v := range b.rbs {
if update.Addr == v.addr {
copy(b.rbs[i:], b.rbs[i+1:])
b.rbs = b.rbs[:len(b.rbs)-1]
break
}
}
default:
grpclog.Println("Unknown update.Op ", update.Op)
}
}
// TODO: Fall back to the basic round-robin load balancing if the resulting address is
// not a load balancer.
select {
case <-ch:
default:
}
ch <- b.rbs
return nil
}
func (b *balancer) serverListExpire(seq int) {
b.mu.Lock()
defer b.mu.Unlock()
// TODO: gRPC interanls do not clear the connections when the server list is stale.
// This means RPCs will keep using the existing server list until b receives new
// server list even though the list is expired. Revisit this behavior later.
if b.done || seq < b.seq {
return
}
b.next = 0
b.addrs = nil
// Ask grpc internals to close all the corresponding connections.
b.addrCh <- nil
}
func convertDuration(d *lbpb.Duration) time.Duration {
if d == nil {
return 0
}
return time.Duration(d.Seconds)*time.Second + time.Duration(d.Nanos)*time.Nanosecond
}
func (b *balancer) processServerList(l *lbpb.ServerList, seq int) {
if l == nil {
return
}
servers := l.GetServers()
expiration := convertDuration(l.GetExpirationInterval())
var (
sl []*grpclbAddrInfo
addrs []Address
)
for _, s := range servers {
md := metadata.Pairs("lb-token", s.LoadBalanceToken)
addr := Address{
Addr: fmt.Sprintf("%s:%d", net.IP(s.IpAddress), s.Port),
Metadata: &md,
}
sl = append(sl, &grpclbAddrInfo{
addr: addr,
dropForRateLimiting: s.DropForRateLimiting,
dropForLoadBalancing: s.DropForLoadBalancing,
})
addrs = append(addrs, addr)
}
b.mu.Lock()
defer b.mu.Unlock()
if b.done || seq < b.seq {
return
}
if len(sl) > 0 {
// reset b.next to 0 when replacing the server list.
b.next = 0
b.addrs = sl
b.addrCh <- addrs
if b.expTimer != nil {
b.expTimer.Stop()
b.expTimer = nil
}
if expiration > 0 {
b.expTimer = time.AfterFunc(expiration, func() {
b.serverListExpire(seq)
})
}
}
return
}
func (b *balancer) sendLoadReport(s *balanceLoadClientStream, interval time.Duration, done <-chan struct{}) {
ticker := time.NewTicker(interval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
case <-done:
return
}
b.mu.Lock()
stats := b.clientStats
b.clientStats = lbpb.ClientStats{} // Clear the stats.
b.mu.Unlock()
t := time.Now()
stats.Timestamp = &lbpb.Timestamp{
Seconds: t.Unix(),
Nanos: int32(t.Nanosecond()),
}
if err := s.Send(&lbpb.LoadBalanceRequest{
LoadBalanceRequestType: &lbpb.LoadBalanceRequest_ClientStats{
ClientStats: &stats,
},
}); err != nil {
grpclog.Printf("grpclb: failed to send load report: %v", err)
return
}
}
}
func (b *balancer) callRemoteBalancer(lbc *loadBalancerClient, seq int) (retry bool) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
stream, err := lbc.BalanceLoad(ctx)
if err != nil {
grpclog.Printf("grpclb: failed to perform RPC to the remote balancer %v", err)
return
}
b.mu.Lock()
if b.done {
b.mu.Unlock()
return
}
b.mu.Unlock()
initReq := &lbpb.LoadBalanceRequest{
LoadBalanceRequestType: &lbpb.LoadBalanceRequest_InitialRequest{
InitialRequest: &lbpb.InitialLoadBalanceRequest{
Name: b.target,
},
},
}
if err := stream.Send(initReq); err != nil {
grpclog.Printf("grpclb: failed to send init request: %v", err)
// TODO: backoff on retry?
return true
}
reply, err := stream.Recv()
if err != nil {
grpclog.Printf("grpclb: failed to recv init response: %v", err)
// TODO: backoff on retry?
return true
}
initResp := reply.GetInitialResponse()
if initResp == nil {
grpclog.Println("grpclb: reply from remote balancer did not include initial response.")
return
}
// TODO: Support delegation.
if initResp.LoadBalancerDelegate != "" {
// delegation
grpclog.Println("TODO: Delegation is not supported yet.")
return
}
streamDone := make(chan struct{})
defer close(streamDone)
b.mu.Lock()
b.clientStats = lbpb.ClientStats{} // Clear client stats.
b.mu.Unlock()
if d := convertDuration(initResp.ClientStatsReportInterval); d > 0 {
go b.sendLoadReport(stream, d, streamDone)
}
// Retrieve the server list.
for {
reply, err := stream.Recv()
if err != nil {
grpclog.Printf("grpclb: failed to recv server list: %v", err)
break
}
b.mu.Lock()
if b.done || seq < b.seq {
b.mu.Unlock()
return
}
b.seq++ // tick when receiving a new list of servers.
seq = b.seq
b.mu.Unlock()
if serverList := reply.GetServerList(); serverList != nil {
b.processServerList(serverList, seq)
}
}
return true
}
func (b *balancer) Start(target string, config BalancerConfig) error {
b.rand = rand.New(rand.NewSource(time.Now().Unix()))
// TODO: Fall back to the basic direct connection if there is no name resolver.
if b.r == nil {
return errors.New("there is no name resolver installed")
}
b.target = target
b.mu.Lock()
if b.done {
b.mu.Unlock()
return ErrClientConnClosing
}
b.addrCh = make(chan []Address)
w, err := b.r.Resolve(target)
if err != nil {
b.mu.Unlock()
grpclog.Printf("grpclb: failed to resolve address: %v, err: %v", target, err)
return err
}
b.w = w
b.mu.Unlock()
balancerAddrsCh := make(chan []remoteBalancerInfo, 1)
// Spawn a goroutine to monitor the name resolution of remote load balancer.
go func() {
for {
if err := b.watchAddrUpdates(w, balancerAddrsCh); err != nil {
grpclog.Printf("grpclb: the naming watcher stops working due to %v.\n", err)
close(balancerAddrsCh)
return
}
}
}()
// Spawn a goroutine to talk to the remote load balancer.
go func() {
var (
cc *ClientConn
// ccError is closed when there is an error in the current cc.
// A new rb should be picked from rbs and connected.
ccError chan struct{}
rb *remoteBalancerInfo
rbs []remoteBalancerInfo
rbIdx int
)
defer func() {
if ccError != nil {
select {
case <-ccError:
default:
close(ccError)
}
}
if cc != nil {
cc.Close()
}
}()
for {
var ok bool
select {
case rbs, ok = <-balancerAddrsCh:
if !ok {
return
}
foundIdx := -1
if rb != nil {
for i, trb := range rbs {
if trb == *rb {
foundIdx = i
break
}
}
}
if foundIdx >= 0 {
if foundIdx >= 1 {
// Move the address in use to the beginning of the list.
b.rbs[0], b.rbs[foundIdx] = b.rbs[foundIdx], b.rbs[0]
rbIdx = 0
}
continue // If found, don't dial new cc.
} else if len(rbs) > 0 {
// Pick a random one from the list, instead of always using the first one.
if l := len(rbs); l > 1 && rb != nil {
tmpIdx := b.rand.Intn(l - 1)
b.rbs[0], b.rbs[tmpIdx] = b.rbs[tmpIdx], b.rbs[0]
}
rbIdx = 0
rb = &rbs[0]
} else {
// foundIdx < 0 && len(rbs) <= 0.
rb = nil
}
case <-ccError:
ccError = nil
if rbIdx < len(rbs)-1 {
rbIdx++
rb = &rbs[rbIdx]
} else {
rb = nil
}
}
if rb == nil {
continue
}
if cc != nil {
cc.Close()
}
// Talk to the remote load balancer to get the server list.
var (
err error
dopts []DialOption
)
if creds := config.DialCreds; creds != nil {
if rb.name != "" {
if err := creds.OverrideServerName(rb.name); err != nil {
grpclog.Printf("grpclb: failed to override the server name in the credentials: %v", err)
continue
}
}
dopts = append(dopts, WithTransportCredentials(creds))
} else {
dopts = append(dopts, WithInsecure())
}
if dialer := config.Dialer; dialer != nil {
// WithDialer takes a different type of function, so we instead use a special DialOption here.
dopts = append(dopts, func(o *dialOptions) { o.copts.Dialer = dialer })
}
ccError = make(chan struct{})
cc, err = Dial(rb.addr, dopts...)
if err != nil {
grpclog.Printf("grpclb: failed to setup a connection to the remote balancer %v: %v", rb.addr, err)
close(ccError)
continue
}
b.mu.Lock()
b.seq++ // tick when getting a new balancer address
seq := b.seq
b.next = 0
b.mu.Unlock()
go func(cc *ClientConn, ccError chan struct{}) {
lbc := &loadBalancerClient{cc}
b.callRemoteBalancer(lbc, seq)
cc.Close()
select {
case <-ccError:
default:
close(ccError)
}
}(cc, ccError)
}
}()
return nil
}
func (b *balancer) down(addr Address, err error) {
b.mu.Lock()
defer b.mu.Unlock()
for _, a := range b.addrs {
if addr == a.addr {
a.connected = false
break
}
}
}
func (b *balancer) Up(addr Address) func(error) {
b.mu.Lock()
defer b.mu.Unlock()
if b.done {
return nil
}
var cnt int
for _, a := range b.addrs {
if a.addr == addr {
if a.connected {
return nil
}
a.connected = true
}
if a.connected && !a.dropForRateLimiting && !a.dropForLoadBalancing {
cnt++
}
}
// addr is the only one which is connected. Notify the Get() callers who are blocking.
if cnt == 1 && b.waitCh != nil {
close(b.waitCh)
b.waitCh = nil
}
return func(err error) {
b.down(addr, err)
}
}
func (b *balancer) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) {
var ch chan struct{}
b.mu.Lock()
if b.done {
b.mu.Unlock()
err = ErrClientConnClosing
return
}
seq := b.seq
defer func() {
if err != nil {
return
}
put = func() {
s, ok := rpcInfoFromContext(ctx)
if !ok {
return
}
b.mu.Lock()
defer b.mu.Unlock()
if b.done || seq < b.seq {
return
}
b.clientStats.NumCallsFinished++
if !s.bytesSent {
b.clientStats.NumCallsFinishedWithClientFailedToSend++
} else if s.bytesReceived {
b.clientStats.NumCallsFinishedKnownReceived++
}
}
}()
b.clientStats.NumCallsStarted++
if len(b.addrs) > 0 {
if b.next >= len(b.addrs) {
b.next = 0
}
next := b.next
for {
a := b.addrs[next]
next = (next + 1) % len(b.addrs)
if a.connected {
if !a.dropForRateLimiting && !a.dropForLoadBalancing {
addr = a.addr
b.next = next
b.mu.Unlock()
return
}
if !opts.BlockingWait {
b.next = next
if a.dropForLoadBalancing {
b.clientStats.NumCallsFinished++
b.clientStats.NumCallsFinishedWithDropForLoadBalancing++
} else if a.dropForRateLimiting {
b.clientStats.NumCallsFinished++
b.clientStats.NumCallsFinishedWithDropForRateLimiting++
}
b.mu.Unlock()
err = Errorf(codes.Unavailable, "%s drops requests", a.addr.Addr)
return
}
}
if next == b.next {
// Has iterated all the possible address but none is connected.
break
}
}
}
if !opts.BlockingWait {
if len(b.addrs) == 0 {
b.clientStats.NumCallsFinished++
b.clientStats.NumCallsFinishedWithClientFailedToSend++
b.mu.Unlock()
err = Errorf(codes.Unavailable, "there is no address available")
return
}
// Returns the next addr on b.addrs for a failfast RPC.
addr = b.addrs[b.next].addr
b.next++
b.mu.Unlock()
return
}
// Wait on b.waitCh for non-failfast RPCs.
if b.waitCh == nil {
ch = make(chan struct{})
b.waitCh = ch
} else {
ch = b.waitCh
}
b.mu.Unlock()
for {
select {
case <-ctx.Done():
b.mu.Lock()
b.clientStats.NumCallsFinished++
b.clientStats.NumCallsFinishedWithClientFailedToSend++
b.mu.Unlock()
err = ctx.Err()
return
case <-ch:
b.mu.Lock()
if b.done {
b.clientStats.NumCallsFinished++
b.clientStats.NumCallsFinishedWithClientFailedToSend++
b.mu.Unlock()
err = ErrClientConnClosing
return
}
if len(b.addrs) > 0 {
if b.next >= len(b.addrs) {
b.next = 0
}
next := b.next
for {
a := b.addrs[next]
next = (next + 1) % len(b.addrs)
if a.connected {
if !a.dropForRateLimiting && !a.dropForLoadBalancing {
addr = a.addr
b.next = next
b.mu.Unlock()
return
}
if !opts.BlockingWait {
b.next = next
if a.dropForLoadBalancing {
b.clientStats.NumCallsFinished++
b.clientStats.NumCallsFinishedWithDropForLoadBalancing++
} else if a.dropForRateLimiting {
b.clientStats.NumCallsFinished++
b.clientStats.NumCallsFinishedWithDropForRateLimiting++
}
b.mu.Unlock()
err = Errorf(codes.Unavailable, "drop requests for the addreess %s", a.addr.Addr)
return
}
}
if next == b.next {
// Has iterated all the possible address but none is connected.
break
}
}
}
// The newly added addr got removed by Down() again.
if b.waitCh == nil {
ch = make(chan struct{})
b.waitCh = ch
} else {
ch = b.waitCh
}
b.mu.Unlock()
}
}
}
func (b *balancer) Notify() <-chan []Address {
return b.addrCh
}
func (b *balancer) Close() error {
b.mu.Lock()
defer b.mu.Unlock()
if b.done {
return errBalancerClosed
}
b.done = true
if b.expTimer != nil {
b.expTimer.Stop()
}
if b.waitCh != nil {
close(b.waitCh)
}
if b.addrCh != nil {
close(b.addrCh)
}
if b.w != nil {
b.w.Close()
}
return nil
}

View File

@ -0,0 +1,629 @@
// Code generated by protoc-gen-go.
// source: grpclb.proto
// DO NOT EDIT!
/*
Package grpc_lb_v1 is a generated protocol buffer package.
It is generated from these files:
grpclb.proto
It has these top-level messages:
Duration
Timestamp
LoadBalanceRequest
InitialLoadBalanceRequest
ClientStats
LoadBalanceResponse
InitialLoadBalanceResponse
ServerList
Server
*/
package grpc_lb_v1
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type Duration struct {
// Signed seconds of the span of time. Must be from -315,576,000,000
// to +315,576,000,000 inclusive.
Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"`
// Signed fractions of a second at nanosecond resolution of the span
// of time. Durations less than one second are represented with a 0
// `seconds` field and a positive or negative `nanos` field. For durations
// of one second or more, a non-zero value for the `nanos` field must be
// of the same sign as the `seconds` field. Must be from -999,999,999
// to +999,999,999 inclusive.
Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
}
func (m *Duration) Reset() { *m = Duration{} }
func (m *Duration) String() string { return proto.CompactTextString(m) }
func (*Duration) ProtoMessage() {}
func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *Duration) GetSeconds() int64 {
if m != nil {
return m.Seconds
}
return 0
}
func (m *Duration) GetNanos() int32 {
if m != nil {
return m.Nanos
}
return 0
}
type Timestamp struct {
// Represents seconds of UTC time since Unix epoch
// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
// 9999-12-31T23:59:59Z inclusive.
Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"`
// Non-negative fractions of a second at nanosecond resolution. Negative
// second values with fractions must still have non-negative nanos values
// that count forward in time. Must be from 0 to 999,999,999
// inclusive.
Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
}
func (m *Timestamp) Reset() { *m = Timestamp{} }
func (m *Timestamp) String() string { return proto.CompactTextString(m) }
func (*Timestamp) ProtoMessage() {}
func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
func (m *Timestamp) GetSeconds() int64 {
if m != nil {
return m.Seconds
}
return 0
}
func (m *Timestamp) GetNanos() int32 {
if m != nil {
return m.Nanos
}
return 0
}
type LoadBalanceRequest struct {
// Types that are valid to be assigned to LoadBalanceRequestType:
// *LoadBalanceRequest_InitialRequest
// *LoadBalanceRequest_ClientStats
LoadBalanceRequestType isLoadBalanceRequest_LoadBalanceRequestType `protobuf_oneof:"load_balance_request_type"`
}
func (m *LoadBalanceRequest) Reset() { *m = LoadBalanceRequest{} }
func (m *LoadBalanceRequest) String() string { return proto.CompactTextString(m) }
func (*LoadBalanceRequest) ProtoMessage() {}
func (*LoadBalanceRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
type isLoadBalanceRequest_LoadBalanceRequestType interface {
isLoadBalanceRequest_LoadBalanceRequestType()
}
type LoadBalanceRequest_InitialRequest struct {
InitialRequest *InitialLoadBalanceRequest `protobuf:"bytes,1,opt,name=initial_request,json=initialRequest,oneof"`
}
type LoadBalanceRequest_ClientStats struct {
ClientStats *ClientStats `protobuf:"bytes,2,opt,name=client_stats,json=clientStats,oneof"`
}
func (*LoadBalanceRequest_InitialRequest) isLoadBalanceRequest_LoadBalanceRequestType() {}
func (*LoadBalanceRequest_ClientStats) isLoadBalanceRequest_LoadBalanceRequestType() {}
func (m *LoadBalanceRequest) GetLoadBalanceRequestType() isLoadBalanceRequest_LoadBalanceRequestType {
if m != nil {
return m.LoadBalanceRequestType
}
return nil
}
func (m *LoadBalanceRequest) GetInitialRequest() *InitialLoadBalanceRequest {
if x, ok := m.GetLoadBalanceRequestType().(*LoadBalanceRequest_InitialRequest); ok {
return x.InitialRequest
}
return nil
}
func (m *LoadBalanceRequest) GetClientStats() *ClientStats {
if x, ok := m.GetLoadBalanceRequestType().(*LoadBalanceRequest_ClientStats); ok {
return x.ClientStats
}
return nil
}
// XXX_OneofFuncs is for the internal use of the proto package.
func (*LoadBalanceRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
return _LoadBalanceRequest_OneofMarshaler, _LoadBalanceRequest_OneofUnmarshaler, _LoadBalanceRequest_OneofSizer, []interface{}{
(*LoadBalanceRequest_InitialRequest)(nil),
(*LoadBalanceRequest_ClientStats)(nil),
}
}
func _LoadBalanceRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
m := msg.(*LoadBalanceRequest)
// load_balance_request_type
switch x := m.LoadBalanceRequestType.(type) {
case *LoadBalanceRequest_InitialRequest:
b.EncodeVarint(1<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.InitialRequest); err != nil {
return err
}
case *LoadBalanceRequest_ClientStats:
b.EncodeVarint(2<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.ClientStats); err != nil {
return err
}
case nil:
default:
return fmt.Errorf("LoadBalanceRequest.LoadBalanceRequestType has unexpected type %T", x)
}
return nil
}
func _LoadBalanceRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
m := msg.(*LoadBalanceRequest)
switch tag {
case 1: // load_balance_request_type.initial_request
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(InitialLoadBalanceRequest)
err := b.DecodeMessage(msg)
m.LoadBalanceRequestType = &LoadBalanceRequest_InitialRequest{msg}
return true, err
case 2: // load_balance_request_type.client_stats
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(ClientStats)
err := b.DecodeMessage(msg)
m.LoadBalanceRequestType = &LoadBalanceRequest_ClientStats{msg}
return true, err
default:
return false, nil
}
}
func _LoadBalanceRequest_OneofSizer(msg proto.Message) (n int) {
m := msg.(*LoadBalanceRequest)
// load_balance_request_type
switch x := m.LoadBalanceRequestType.(type) {
case *LoadBalanceRequest_InitialRequest:
s := proto.Size(x.InitialRequest)
n += proto.SizeVarint(1<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case *LoadBalanceRequest_ClientStats:
s := proto.Size(x.ClientStats)
n += proto.SizeVarint(2<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case nil:
default:
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
}
return n
}
type InitialLoadBalanceRequest struct {
// Name of load balanced service (IE, balancer.service.com)
// length should be less than 256 bytes.
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
}
func (m *InitialLoadBalanceRequest) Reset() { *m = InitialLoadBalanceRequest{} }
func (m *InitialLoadBalanceRequest) String() string { return proto.CompactTextString(m) }
func (*InitialLoadBalanceRequest) ProtoMessage() {}
func (*InitialLoadBalanceRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
func (m *InitialLoadBalanceRequest) GetName() string {
if m != nil {
return m.Name
}
return ""
}
// Contains client level statistics that are useful to load balancing. Each
// count except the timestamp should be reset to zero after reporting the stats.
type ClientStats struct {
// The timestamp of generating the report.
Timestamp *Timestamp `protobuf:"bytes,1,opt,name=timestamp" json:"timestamp,omitempty"`
// The total number of RPCs that started.
NumCallsStarted int64 `protobuf:"varint,2,opt,name=num_calls_started,json=numCallsStarted" json:"num_calls_started,omitempty"`
// The total number of RPCs that finished.
NumCallsFinished int64 `protobuf:"varint,3,opt,name=num_calls_finished,json=numCallsFinished" json:"num_calls_finished,omitempty"`
// The total number of RPCs that were dropped by the client because of rate
// limiting.
NumCallsFinishedWithDropForRateLimiting int64 `protobuf:"varint,4,opt,name=num_calls_finished_with_drop_for_rate_limiting,json=numCallsFinishedWithDropForRateLimiting" json:"num_calls_finished_with_drop_for_rate_limiting,omitempty"`
// The total number of RPCs that were dropped by the client because of load
// balancing.
NumCallsFinishedWithDropForLoadBalancing int64 `protobuf:"varint,5,opt,name=num_calls_finished_with_drop_for_load_balancing,json=numCallsFinishedWithDropForLoadBalancing" json:"num_calls_finished_with_drop_for_load_balancing,omitempty"`
// The total number of RPCs that failed to reach a server except dropped RPCs.
NumCallsFinishedWithClientFailedToSend int64 `protobuf:"varint,6,opt,name=num_calls_finished_with_client_failed_to_send,json=numCallsFinishedWithClientFailedToSend" json:"num_calls_finished_with_client_failed_to_send,omitempty"`
// The total number of RPCs that finished and are known to have been received
// by a server.
NumCallsFinishedKnownReceived int64 `protobuf:"varint,7,opt,name=num_calls_finished_known_received,json=numCallsFinishedKnownReceived" json:"num_calls_finished_known_received,omitempty"`
}
func (m *ClientStats) Reset() { *m = ClientStats{} }
func (m *ClientStats) String() string { return proto.CompactTextString(m) }
func (*ClientStats) ProtoMessage() {}
func (*ClientStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
func (m *ClientStats) GetTimestamp() *Timestamp {
if m != nil {
return m.Timestamp
}
return nil
}
func (m *ClientStats) GetNumCallsStarted() int64 {
if m != nil {
return m.NumCallsStarted
}
return 0
}
func (m *ClientStats) GetNumCallsFinished() int64 {
if m != nil {
return m.NumCallsFinished
}
return 0
}
func (m *ClientStats) GetNumCallsFinishedWithDropForRateLimiting() int64 {
if m != nil {
return m.NumCallsFinishedWithDropForRateLimiting
}
return 0
}
func (m *ClientStats) GetNumCallsFinishedWithDropForLoadBalancing() int64 {
if m != nil {
return m.NumCallsFinishedWithDropForLoadBalancing
}
return 0
}
func (m *ClientStats) GetNumCallsFinishedWithClientFailedToSend() int64 {
if m != nil {
return m.NumCallsFinishedWithClientFailedToSend
}
return 0
}
func (m *ClientStats) GetNumCallsFinishedKnownReceived() int64 {
if m != nil {
return m.NumCallsFinishedKnownReceived
}
return 0
}
type LoadBalanceResponse struct {
// Types that are valid to be assigned to LoadBalanceResponseType:
// *LoadBalanceResponse_InitialResponse
// *LoadBalanceResponse_ServerList
LoadBalanceResponseType isLoadBalanceResponse_LoadBalanceResponseType `protobuf_oneof:"load_balance_response_type"`
}
func (m *LoadBalanceResponse) Reset() { *m = LoadBalanceResponse{} }
func (m *LoadBalanceResponse) String() string { return proto.CompactTextString(m) }
func (*LoadBalanceResponse) ProtoMessage() {}
func (*LoadBalanceResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
type isLoadBalanceResponse_LoadBalanceResponseType interface {
isLoadBalanceResponse_LoadBalanceResponseType()
}
type LoadBalanceResponse_InitialResponse struct {
InitialResponse *InitialLoadBalanceResponse `protobuf:"bytes,1,opt,name=initial_response,json=initialResponse,oneof"`
}
type LoadBalanceResponse_ServerList struct {
ServerList *ServerList `protobuf:"bytes,2,opt,name=server_list,json=serverList,oneof"`
}
func (*LoadBalanceResponse_InitialResponse) isLoadBalanceResponse_LoadBalanceResponseType() {}
func (*LoadBalanceResponse_ServerList) isLoadBalanceResponse_LoadBalanceResponseType() {}
func (m *LoadBalanceResponse) GetLoadBalanceResponseType() isLoadBalanceResponse_LoadBalanceResponseType {
if m != nil {
return m.LoadBalanceResponseType
}
return nil
}
func (m *LoadBalanceResponse) GetInitialResponse() *InitialLoadBalanceResponse {
if x, ok := m.GetLoadBalanceResponseType().(*LoadBalanceResponse_InitialResponse); ok {
return x.InitialResponse
}
return nil
}
func (m *LoadBalanceResponse) GetServerList() *ServerList {
if x, ok := m.GetLoadBalanceResponseType().(*LoadBalanceResponse_ServerList); ok {
return x.ServerList
}
return nil
}
// XXX_OneofFuncs is for the internal use of the proto package.
func (*LoadBalanceResponse) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
return _LoadBalanceResponse_OneofMarshaler, _LoadBalanceResponse_OneofUnmarshaler, _LoadBalanceResponse_OneofSizer, []interface{}{
(*LoadBalanceResponse_InitialResponse)(nil),
(*LoadBalanceResponse_ServerList)(nil),
}
}
func _LoadBalanceResponse_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
m := msg.(*LoadBalanceResponse)
// load_balance_response_type
switch x := m.LoadBalanceResponseType.(type) {
case *LoadBalanceResponse_InitialResponse:
b.EncodeVarint(1<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.InitialResponse); err != nil {
return err
}
case *LoadBalanceResponse_ServerList:
b.EncodeVarint(2<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.ServerList); err != nil {
return err
}
case nil:
default:
return fmt.Errorf("LoadBalanceResponse.LoadBalanceResponseType has unexpected type %T", x)
}
return nil
}
func _LoadBalanceResponse_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
m := msg.(*LoadBalanceResponse)
switch tag {
case 1: // load_balance_response_type.initial_response
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(InitialLoadBalanceResponse)
err := b.DecodeMessage(msg)
m.LoadBalanceResponseType = &LoadBalanceResponse_InitialResponse{msg}
return true, err
case 2: // load_balance_response_type.server_list
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(ServerList)
err := b.DecodeMessage(msg)
m.LoadBalanceResponseType = &LoadBalanceResponse_ServerList{msg}
return true, err
default:
return false, nil
}
}
func _LoadBalanceResponse_OneofSizer(msg proto.Message) (n int) {
m := msg.(*LoadBalanceResponse)
// load_balance_response_type
switch x := m.LoadBalanceResponseType.(type) {
case *LoadBalanceResponse_InitialResponse:
s := proto.Size(x.InitialResponse)
n += proto.SizeVarint(1<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case *LoadBalanceResponse_ServerList:
s := proto.Size(x.ServerList)
n += proto.SizeVarint(2<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case nil:
default:
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
}
return n
}
type InitialLoadBalanceResponse struct {
// This is an application layer redirect that indicates the client should use
// the specified server for load balancing. When this field is non-empty in
// the response, the client should open a separate connection to the
// load_balancer_delegate and call the BalanceLoad method. Its length should
// be less than 64 bytes.
LoadBalancerDelegate string `protobuf:"bytes,1,opt,name=load_balancer_delegate,json=loadBalancerDelegate" json:"load_balancer_delegate,omitempty"`
// This interval defines how often the client should send the client stats
// to the load balancer. Stats should only be reported when the duration is
// positive.
ClientStatsReportInterval *Duration `protobuf:"bytes,2,opt,name=client_stats_report_interval,json=clientStatsReportInterval" json:"client_stats_report_interval,omitempty"`
}
func (m *InitialLoadBalanceResponse) Reset() { *m = InitialLoadBalanceResponse{} }
func (m *InitialLoadBalanceResponse) String() string { return proto.CompactTextString(m) }
func (*InitialLoadBalanceResponse) ProtoMessage() {}
func (*InitialLoadBalanceResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
func (m *InitialLoadBalanceResponse) GetLoadBalancerDelegate() string {
if m != nil {
return m.LoadBalancerDelegate
}
return ""
}
func (m *InitialLoadBalanceResponse) GetClientStatsReportInterval() *Duration {
if m != nil {
return m.ClientStatsReportInterval
}
return nil
}
type ServerList struct {
// Contains a list of servers selected by the load balancer. The list will
// be updated when server resolutions change or as needed to balance load
// across more servers. The client should consume the server list in order
// unless instructed otherwise via the client_config.
Servers []*Server `protobuf:"bytes,1,rep,name=servers" json:"servers,omitempty"`
// Indicates the amount of time that the client should consider this server
// list as valid. It may be considered stale after waiting this interval of
// time after receiving the list. If the interval is not positive, the
// client can assume the list is valid until the next list is received.
ExpirationInterval *Duration `protobuf:"bytes,3,opt,name=expiration_interval,json=expirationInterval" json:"expiration_interval,omitempty"`
}
func (m *ServerList) Reset() { *m = ServerList{} }
func (m *ServerList) String() string { return proto.CompactTextString(m) }
func (*ServerList) ProtoMessage() {}
func (*ServerList) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
func (m *ServerList) GetServers() []*Server {
if m != nil {
return m.Servers
}
return nil
}
func (m *ServerList) GetExpirationInterval() *Duration {
if m != nil {
return m.ExpirationInterval
}
return nil
}
// Contains server information. When none of the [drop_for_*] fields are true,
// use the other fields. When drop_for_rate_limiting is true, ignore all other
// fields. Use drop_for_load_balancing only when it is true and
// drop_for_rate_limiting is false.
type Server struct {
// A resolved address for the server, serialized in network-byte-order. It may
// either be an IPv4 or IPv6 address.
IpAddress []byte `protobuf:"bytes,1,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"`
// A resolved port number for the server.
Port int32 `protobuf:"varint,2,opt,name=port" json:"port,omitempty"`
// An opaque but printable token given to the frontend for each pick. All
// frontend requests for that pick must include the token in its initial
// metadata. The token is used by the backend to verify the request and to
// allow the backend to report load to the gRPC LB system.
//
// Its length is variable but less than 50 bytes.
LoadBalanceToken string `protobuf:"bytes,3,opt,name=load_balance_token,json=loadBalanceToken" json:"load_balance_token,omitempty"`
// Indicates whether this particular request should be dropped by the client
// for rate limiting.
DropForRateLimiting bool `protobuf:"varint,4,opt,name=drop_for_rate_limiting,json=dropForRateLimiting" json:"drop_for_rate_limiting,omitempty"`
// Indicates whether this particular request should be dropped by the client
// for load balancing.
DropForLoadBalancing bool `protobuf:"varint,5,opt,name=drop_for_load_balancing,json=dropForLoadBalancing" json:"drop_for_load_balancing,omitempty"`
}
func (m *Server) Reset() { *m = Server{} }
func (m *Server) String() string { return proto.CompactTextString(m) }
func (*Server) ProtoMessage() {}
func (*Server) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
func (m *Server) GetIpAddress() []byte {
if m != nil {
return m.IpAddress
}
return nil
}
func (m *Server) GetPort() int32 {
if m != nil {
return m.Port
}
return 0
}
func (m *Server) GetLoadBalanceToken() string {
if m != nil {
return m.LoadBalanceToken
}
return ""
}
func (m *Server) GetDropForRateLimiting() bool {
if m != nil {
return m.DropForRateLimiting
}
return false
}
func (m *Server) GetDropForLoadBalancing() bool {
if m != nil {
return m.DropForLoadBalancing
}
return false
}
func init() {
proto.RegisterType((*Duration)(nil), "grpc.lb.v1.Duration")
proto.RegisterType((*Timestamp)(nil), "grpc.lb.v1.Timestamp")
proto.RegisterType((*LoadBalanceRequest)(nil), "grpc.lb.v1.LoadBalanceRequest")
proto.RegisterType((*InitialLoadBalanceRequest)(nil), "grpc.lb.v1.InitialLoadBalanceRequest")
proto.RegisterType((*ClientStats)(nil), "grpc.lb.v1.ClientStats")
proto.RegisterType((*LoadBalanceResponse)(nil), "grpc.lb.v1.LoadBalanceResponse")
proto.RegisterType((*InitialLoadBalanceResponse)(nil), "grpc.lb.v1.InitialLoadBalanceResponse")
proto.RegisterType((*ServerList)(nil), "grpc.lb.v1.ServerList")
proto.RegisterType((*Server)(nil), "grpc.lb.v1.Server")
}
func init() { proto.RegisterFile("grpclb.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 733 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0xdd, 0x4e, 0x1b, 0x39,
0x14, 0x66, 0x36, 0xfc, 0xe5, 0x24, 0x5a, 0x58, 0x93, 0x85, 0xc0, 0xc2, 0x2e, 0x1b, 0xa9, 0x34,
0xaa, 0x68, 0x68, 0x43, 0x7b, 0xd1, 0x9f, 0x9b, 0x02, 0x45, 0x41, 0xe5, 0xa2, 0x72, 0xa8, 0x7a,
0x55, 0x59, 0x4e, 0xc6, 0x80, 0xc5, 0xc4, 0x9e, 0xda, 0x4e, 0x68, 0x2f, 0x7b, 0xd9, 0x47, 0xe9,
0x63, 0x54, 0x7d, 0x86, 0xbe, 0x4f, 0x65, 0x7b, 0x26, 0x33, 0x90, 0x1f, 0xd4, 0xbb, 0xf1, 0xf1,
0x77, 0xbe, 0xf3, 0xf9, 0xd8, 0xdf, 0x19, 0x28, 0x5f, 0xa8, 0xb8, 0x1b, 0x75, 0x1a, 0xb1, 0x92,
0x46, 0x22, 0xb0, 0xab, 0x46, 0xd4, 0x69, 0x0c, 0x1e, 0xd7, 0x9e, 0xc3, 0xe2, 0x51, 0x5f, 0x51,
0xc3, 0xa5, 0x40, 0x55, 0x58, 0xd0, 0xac, 0x2b, 0x45, 0xa8, 0xab, 0xc1, 0x76, 0x50, 0x2f, 0xe0,
0x74, 0x89, 0x2a, 0x30, 0x27, 0xa8, 0x90, 0xba, 0xfa, 0xc7, 0x76, 0x50, 0x9f, 0xc3, 0x7e, 0x51,
0x7b, 0x01, 0xc5, 0x33, 0xde, 0x63, 0xda, 0xd0, 0x5e, 0xfc, 0xdb, 0xc9, 0xdf, 0x03, 0x40, 0xa7,
0x92, 0x86, 0x07, 0x34, 0xa2, 0xa2, 0xcb, 0x30, 0xfb, 0xd8, 0x67, 0xda, 0xa0, 0xb7, 0xb0, 0xc4,
0x05, 0x37, 0x9c, 0x46, 0x44, 0xf9, 0x90, 0xa3, 0x2b, 0x35, 0xef, 0x35, 0x32, 0xd5, 0x8d, 0x13,
0x0f, 0x19, 0xcd, 0x6f, 0xcd, 0xe0, 0x3f, 0x93, 0xfc, 0x94, 0xf1, 0x25, 0x94, 0xbb, 0x11, 0x67,
0xc2, 0x10, 0x6d, 0xa8, 0xf1, 0x2a, 0x4a, 0xcd, 0xb5, 0x3c, 0xdd, 0xa1, 0xdb, 0x6f, 0xdb, 0xed,
0xd6, 0x0c, 0x2e, 0x75, 0xb3, 0xe5, 0xc1, 0x3f, 0xb0, 0x1e, 0x49, 0x1a, 0x92, 0x8e, 0x2f, 0x93,
0x8a, 0x22, 0xe6, 0x73, 0xcc, 0x6a, 0x7b, 0xb0, 0x3e, 0x51, 0x09, 0x42, 0x30, 0x2b, 0x68, 0x8f,
0x39, 0xf9, 0x45, 0xec, 0xbe, 0x6b, 0x5f, 0x67, 0xa1, 0x94, 0x2b, 0x86, 0xf6, 0xa1, 0x68, 0xd2,
0x0e, 0x26, 0xe7, 0xfc, 0x3b, 0x2f, 0x6c, 0xd8, 0x5e, 0x9c, 0xe1, 0xd0, 0x03, 0xf8, 0x4b, 0xf4,
0x7b, 0xa4, 0x4b, 0xa3, 0x48, 0xdb, 0x33, 0x29, 0xc3, 0x42, 0x77, 0xaa, 0x02, 0x5e, 0x12, 0xfd,
0xde, 0xa1, 0x8d, 0xb7, 0x7d, 0x18, 0xed, 0x02, 0xca, 0xb0, 0xe7, 0x5c, 0x70, 0x7d, 0xc9, 0xc2,
0x6a, 0xc1, 0x81, 0x97, 0x53, 0xf0, 0x71, 0x12, 0x47, 0x04, 0x1a, 0xa3, 0x68, 0x72, 0xcd, 0xcd,
0x25, 0x09, 0x95, 0x8c, 0xc9, 0xb9, 0x54, 0x44, 0x51, 0xc3, 0x48, 0xc4, 0x7b, 0xdc, 0x70, 0x71,
0x51, 0x9d, 0x75, 0x4c, 0xf7, 0x6f, 0x33, 0xbd, 0xe7, 0xe6, 0xf2, 0x48, 0xc9, 0xf8, 0x58, 0x2a,
0x4c, 0x0d, 0x3b, 0x4d, 0xe0, 0x88, 0xc2, 0xde, 0x9d, 0x05, 0x72, 0xed, 0xb6, 0x15, 0xe6, 0x5c,
0x85, 0xfa, 0x94, 0x0a, 0x59, 0xef, 0x6d, 0x89, 0x0f, 0xf0, 0x70, 0x52, 0x89, 0xe4, 0x19, 0x9c,
0x53, 0x1e, 0xb1, 0x90, 0x18, 0x49, 0x34, 0x13, 0x61, 0x75, 0xde, 0x15, 0xd8, 0x19, 0x57, 0xc0,
0x5f, 0xd5, 0xb1, 0xc3, 0x9f, 0xc9, 0x36, 0x13, 0x21, 0x6a, 0xc1, 0xff, 0x63, 0xe8, 0xaf, 0x84,
0xbc, 0x16, 0x44, 0xb1, 0x2e, 0xe3, 0x03, 0x16, 0x56, 0x17, 0x1c, 0xe5, 0xd6, 0x6d, 0xca, 0x37,
0x16, 0x85, 0x13, 0x50, 0xed, 0x47, 0x00, 0x2b, 0x37, 0x9e, 0x8d, 0x8e, 0xa5, 0xd0, 0x0c, 0xb5,
0x61, 0x39, 0x73, 0x80, 0x8f, 0x25, 0x4f, 0x63, 0xe7, 0x2e, 0x0b, 0x78, 0x74, 0x6b, 0x06, 0x2f,
0x0d, 0x3d, 0x90, 0x90, 0x3e, 0x83, 0x92, 0x66, 0x6a, 0xc0, 0x14, 0x89, 0xb8, 0x36, 0x89, 0x07,
0x56, 0xf3, 0x7c, 0x6d, 0xb7, 0x7d, 0xca, 0x9d, 0x87, 0x40, 0x0f, 0x57, 0x07, 0x9b, 0xb0, 0x71,
0xcb, 0x01, 0x9e, 0xd3, 0x5b, 0xe0, 0x5b, 0x00, 0x1b, 0x93, 0xa5, 0xa0, 0x27, 0xb0, 0x9a, 0x4f,
0x56, 0x24, 0x64, 0x11, 0xbb, 0xa0, 0x26, 0xb5, 0x45, 0x25, 0xca, 0x92, 0xd4, 0x51, 0xb2, 0x87,
0xde, 0xc1, 0x66, 0xde, 0xb2, 0x44, 0xb1, 0x58, 0x2a, 0x43, 0xb8, 0x30, 0x4c, 0x0d, 0x68, 0x94,
0xc8, 0xaf, 0xe4, 0xe5, 0xa7, 0x43, 0x0c, 0xaf, 0xe7, 0xdc, 0x8b, 0x5d, 0xde, 0x49, 0x92, 0x56,
0xfb, 0x12, 0x00, 0x64, 0xc7, 0x44, 0xbb, 0x76, 0x62, 0xd9, 0x95, 0x9d, 0x58, 0x85, 0x7a, 0xa9,
0x89, 0x46, 0xfb, 0x81, 0x53, 0x08, 0x7a, 0x0d, 0x2b, 0xec, 0x53, 0xcc, 0x7d, 0x95, 0x4c, 0x4a,
0x61, 0x8a, 0x14, 0x94, 0x25, 0x0c, 0x35, 0xfc, 0x0c, 0x60, 0xde, 0x53, 0xa3, 0x2d, 0x00, 0x1e,
0x13, 0x1a, 0x86, 0x8a, 0x69, 0x3f, 0x34, 0xcb, 0xb8, 0xc8, 0xe3, 0x57, 0x3e, 0x60, 0xe7, 0x87,
0x55, 0x9f, 0x4c, 0x4d, 0xf7, 0x6d, 0xed, 0x7c, 0xe3, 0x2e, 0x8c, 0xbc, 0x62, 0xc2, 0x69, 0x28,
0xe2, 0xe5, 0x5c, 0x2b, 0xcf, 0x6c, 0x1c, 0xed, 0xc3, 0xea, 0x14, 0xdb, 0x2e, 0xe2, 0x95, 0x70,
0x8c, 0x45, 0x9f, 0xc2, 0xda, 0x34, 0x2b, 0x2e, 0xe2, 0x4a, 0x38, 0xc6, 0x76, 0xcd, 0x0e, 0x94,
0x73, 0xf7, 0xaf, 0x10, 0x86, 0x52, 0xf2, 0x6d, 0xc3, 0xe8, 0xdf, 0x7c, 0x83, 0x46, 0x87, 0xe5,
0xc6, 0x7f, 0x13, 0xf7, 0xfd, 0x43, 0xaa, 0x07, 0x8f, 0x82, 0xce, 0xbc, 0xfb, 0x7d, 0xed, 0xff,
0x0a, 0x00, 0x00, 0xff, 0xff, 0x64, 0xbf, 0xda, 0x5e, 0xce, 0x06, 0x00, 0x00,
}

View File

@ -40,17 +40,17 @@ import (
// UnaryInvoker is called by UnaryClientInterceptor to complete RPCs.
type UnaryInvoker func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error
// UnaryClientInterceptor intercepts the execution of a unary RPC on the client. inovker is the handler to complete the RPC
// UnaryClientInterceptor intercepts the execution of a unary RPC on the client. invoker is the handler to complete the RPC
// and it is the responsibility of the interceptor to call it.
// This is the EXPERIMENTAL API.
// This is an EXPERIMENTAL API.
type UnaryClientInterceptor func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error
// Streamer is called by StreamClientInterceptor to create a ClientStream.
type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error)
// StreamClientInterceptor intercepts the creation of ClientStream. It may return a custom ClientStream to intercept all I/O
// operations. streamer is the handlder to create a ClientStream and it is the responsibility of the interceptor to call it.
// This is the EXPERIMENTAL API.
// operations. streamer is the handler to create a ClientStream and it is the responsibility of the interceptor to call it.
// This is an EXPERIMENTAL API.
type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error)
// UnaryServerInfo consists of various information about a unary RPC on

View File

@ -39,8 +39,10 @@ import (
)
// ClientParameters is used to set keepalive parameters on the client-side.
// These configure how the client will actively probe to notice when a connection broken
// and to cause activity so intermediaries are aware the connection is still in use.
// These configure how the client will actively probe to notice when a connection is broken
// and send pings so intermediaries will be aware of the liveness of the connection.
// Make sure these parameters are set in coordination with the keepalive policy on the server,
// as incompatible settings can result in closing of connection.
type ClientParameters struct {
// After a duration of this time if the client doesn't see any activity it pings the server to see if the transport is still alive.
Time time.Duration // The current default value is infinity.
@ -48,5 +50,31 @@ type ClientParameters struct {
// the connection is closed.
Timeout time.Duration // The current default value is 20 seconds.
// If true, client runs keepalive checks even with no active RPCs.
PermitWithoutStream bool
PermitWithoutStream bool // false by default.
}
// ServerParameters is used to set keepalive and max-age parameters on the server-side.
type ServerParameters struct {
// MaxConnectionIdle is a duration for the amount of time after which an idle connection would be closed by sending a GoAway.
// Idleness duration is defined since the most recent time the number of outstanding RPCs became zero or the connection establishment.
MaxConnectionIdle time.Duration // The current default value is infinity.
// MaxConnectionAge is a duration for the maximum amount of time a connection may exist before it will be closed by sending a GoAway.
// A random jitter of +/-10% will be added to MaxConnectionAge to spread out connection storms.
MaxConnectionAge time.Duration // The current default value is infinity.
// MaxConnectinoAgeGrace is an additive period after MaxConnectionAge after which the connection will be forcibly closed.
MaxConnectionAgeGrace time.Duration // The current default value is infinity.
// After a duration of this time if the server doesn't see any activity it pings the client to see if the transport is still alive.
Time time.Duration // The current default value is 2 hours.
// After having pinged for keepalive check, the server waits for a duration of Timeout and if no activity is seen even after that
// the connection is closed.
Timeout time.Duration // The current default value is 20 seconds.
}
// EnforcementPolicy is used to set keepalive enforcement policy on the server-side.
// Server will close connection with a client that violates this policy.
type EnforcementPolicy struct {
// MinTime is the minimum amount of time a client should wait before sending a keepalive ping.
MinTime time.Duration // The current default value is 5 minutes.
// If true, server expects keepalive pings even when there are no active streams(RPCs).
PermitWithoutStream bool // false by default.
}

View File

@ -36,58 +36,27 @@
package metadata // import "google.golang.org/grpc/metadata"
import (
"encoding/base64"
"fmt"
"strings"
"golang.org/x/net/context"
)
const (
binHdrSuffix = "-bin"
)
// encodeKeyValue encodes key and value qualified for transmission via gRPC.
// Transmitting binary headers violates HTTP/2 spec.
// TODO(zhaoq): Maybe check if k is ASCII also.
func encodeKeyValue(k, v string) (string, string) {
k = strings.ToLower(k)
if strings.HasSuffix(k, binHdrSuffix) {
val := base64.StdEncoding.EncodeToString([]byte(v))
v = string(val)
}
return k, v
}
// DecodeKeyValue returns the original key and value corresponding to the
// encoded data in k, v.
// If k is a binary header and v contains comma, v is split on comma before decoded,
// and the decoded v will be joined with comma before returned.
// DecodeKeyValue returns k, v, nil. It is deprecated and should not be used.
func DecodeKeyValue(k, v string) (string, string, error) {
if !strings.HasSuffix(k, binHdrSuffix) {
return k, v, nil
}
vvs := strings.Split(v, ",")
for i, vv := range vvs {
val, err := base64.StdEncoding.DecodeString(vv)
if err != nil {
return "", "", err
}
vvs[i] = string(val)
}
return k, strings.Join(vvs, ","), nil
return k, v, nil
}
// MD is a mapping from metadata keys to values. Users should use the following
// two convenience functions New and Pairs to generate MD.
type MD map[string][]string
// New creates a MD from given key-value map.
// Keys are automatically converted to lowercase. And for keys having "-bin" as suffix, their values will be applied Base64 encoding.
// New creates an MD from a given key-value map.
// Keys are automatically converted to lowercase.
func New(m map[string]string) MD {
md := MD{}
for k, v := range m {
key, val := encodeKeyValue(k, v)
for k, val := range m {
key := strings.ToLower(k)
md[key] = append(md[key], val)
}
return md
@ -95,20 +64,19 @@ func New(m map[string]string) MD {
// Pairs returns an MD formed by the mapping of key, value ...
// Pairs panics if len(kv) is odd.
// Keys are automatically converted to lowercase. And for keys having "-bin" as suffix, their values will be appplied Base64 encoding.
// Keys are automatically converted to lowercase.
func Pairs(kv ...string) MD {
if len(kv)%2 == 1 {
panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv)))
}
md := MD{}
var k string
var key string
for i, s := range kv {
if i%2 == 0 {
k = s
key = strings.ToLower(s)
continue
}
key, val := encodeKeyValue(k, s)
md[key] = append(md[key], val)
md[key] = append(md[key], s)
}
return md
}
@ -123,9 +91,9 @@ func (md MD) Copy() MD {
return Join(md)
}
// Join joins any number of MDs into a single MD.
// Join joins any number of mds into a single MD.
// The order of values for each key is determined by the order in which
// the MDs containing those values are presented to Join.
// the mds containing those values are presented to Join.
func Join(mds ...MD) MD {
out := MD{}
for _, md := range mds {
@ -136,17 +104,41 @@ func Join(mds ...MD) MD {
return out
}
type mdKey struct{}
type mdIncomingKey struct{}
type mdOutgoingKey struct{}
// NewContext creates a new context with md attached.
// NewContext is a wrapper for NewOutgoingContext(ctx, md). Deprecated.
func NewContext(ctx context.Context, md MD) context.Context {
return context.WithValue(ctx, mdKey{}, md)
return NewOutgoingContext(ctx, md)
}
// FromContext returns the MD in ctx if it exists.
// The returned md should be immutable, writing to it may cause races.
// Modification should be made to the copies of the returned md.
// NewIncomingContext creates a new context with incoming md attached.
func NewIncomingContext(ctx context.Context, md MD) context.Context {
return context.WithValue(ctx, mdIncomingKey{}, md)
}
// NewOutgoingContext creates a new context with outgoing md attached.
func NewOutgoingContext(ctx context.Context, md MD) context.Context {
return context.WithValue(ctx, mdOutgoingKey{}, md)
}
// FromContext is a wrapper for FromIncomingContext(ctx). Deprecated.
func FromContext(ctx context.Context) (md MD, ok bool) {
md, ok = ctx.Value(mdKey{}).(MD)
return FromIncomingContext(ctx)
}
// FromIncomingContext returns the incoming metadata in ctx if it exists. The
// returned MD should not be modified. Writing to it may cause races.
// Modification should be made to copies of the returned MD.
func FromIncomingContext(ctx context.Context) (md MD, ok bool) {
md, ok = ctx.Value(mdIncomingKey{}).(MD)
return
}
// FromOutgoingContext returns the outgoing metadata in ctx if it exists. The
// returned MD should not be modified. Writing to it may cause races.
// Modification should be made to the copies of the returned MD.
func FromOutgoingContext(ctx context.Context) (md MD, ok bool) {
md, ok = ctx.Value(mdOutgoingKey{}).(MD)
return
}

View File

@ -42,7 +42,8 @@ import (
"google.golang.org/grpc/credentials"
)
// Peer contains the information of the peer for an RPC.
// Peer contains the information of the peer for an RPC, such as the address
// and authentication information.
type Peer struct {
// Addr is the peer address.
Addr net.Addr

145
vendor/google.golang.org/grpc/proxy.go generated vendored Normal file
View File

@ -0,0 +1,145 @@
/*
*
* Copyright 2017, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
package grpc
import (
"bufio"
"errors"
"fmt"
"io"
"net"
"net/http"
"net/http/httputil"
"net/url"
"golang.org/x/net/context"
)
var (
// errDisabled indicates that proxy is disabled for the address.
errDisabled = errors.New("proxy is disabled for the address")
// The following variable will be overwritten in the tests.
httpProxyFromEnvironment = http.ProxyFromEnvironment
)
func mapAddress(ctx context.Context, address string) (string, error) {
req := &http.Request{
URL: &url.URL{
Scheme: "https",
Host: address,
},
}
url, err := httpProxyFromEnvironment(req)
if err != nil {
return "", err
}
if url == nil {
return "", errDisabled
}
return url.Host, nil
}
// To read a response from a net.Conn, http.ReadResponse() takes a bufio.Reader.
// It's possible that this reader reads more than what's need for the response and stores
// those bytes in the buffer.
// bufConn wraps the original net.Conn and the bufio.Reader to make sure we don't lose the
// bytes in the buffer.
type bufConn struct {
net.Conn
r io.Reader
}
func (c *bufConn) Read(b []byte) (int, error) {
return c.r.Read(b)
}
func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, addr string) (_ net.Conn, err error) {
defer func() {
if err != nil {
conn.Close()
}
}()
req := (&http.Request{
Method: http.MethodConnect,
URL: &url.URL{Host: addr},
Header: map[string][]string{"User-Agent": {grpcUA}},
})
if err := sendHTTPRequest(ctx, req, conn); err != nil {
return nil, fmt.Errorf("failed to write the HTTP request: %v", err)
}
r := bufio.NewReader(conn)
resp, err := http.ReadResponse(r, req)
if err != nil {
return nil, fmt.Errorf("reading server HTTP response: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
dump, err := httputil.DumpResponse(resp, true)
if err != nil {
return nil, fmt.Errorf("failed to do connect handshake, status code: %s", resp.Status)
}
return nil, fmt.Errorf("failed to do connect handshake, response: %q", dump)
}
return &bufConn{Conn: conn, r: r}, nil
}
// newProxyDialer returns a dialer that connects to proxy first if necessary.
// The returned dialer checks if a proxy is necessary, dial to the proxy with the
// provided dialer, does HTTP CONNECT handshake and returns the connection.
func newProxyDialer(dialer func(context.Context, string) (net.Conn, error)) func(context.Context, string) (net.Conn, error) {
return func(ctx context.Context, addr string) (conn net.Conn, err error) {
var skipHandshake bool
newAddr, err := mapAddress(ctx, addr)
if err != nil {
if err != errDisabled {
return nil, err
}
skipHandshake = true
newAddr = addr
}
conn, err = dialer(ctx, newAddr)
if err != nil {
return
}
if !skipHandshake {
conn, err = doHTTPConnectHandshake(ctx, conn, addr)
}
return
}
}

View File

@ -37,48 +37,22 @@ import (
"bytes"
"compress/gzip"
"encoding/binary"
"fmt"
"io"
"io/ioutil"
"math"
"os"
"sync"
"time"
"github.com/golang/protobuf/proto"
"golang.org/x/net/context"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/stats"
"google.golang.org/grpc/status"
"google.golang.org/grpc/transport"
)
// Codec defines the interface gRPC uses to encode and decode messages.
type Codec interface {
// Marshal returns the wire format of v.
Marshal(v interface{}) ([]byte, error)
// Unmarshal parses the wire format into v.
Unmarshal(data []byte, v interface{}) error
// String returns the name of the Codec implementation. The returned
// string will be used as part of content type in transmission.
String() string
}
// protoCodec is a Codec implementation with protobuf. It is the default codec for gRPC.
type protoCodec struct{}
func (protoCodec) Marshal(v interface{}) ([]byte, error) {
return proto.Marshal(v.(proto.Message))
}
func (protoCodec) Unmarshal(data []byte, v interface{}) error {
return proto.Unmarshal(data, v.(proto.Message))
}
func (protoCodec) String() string {
return "proto"
}
// Compressor defines the interface gRPC uses to compress a message.
type Compressor interface {
// Do compresses p into w.
@ -87,16 +61,24 @@ type Compressor interface {
Type() string
}
// NewGZIPCompressor creates a Compressor based on GZIP.
func NewGZIPCompressor() Compressor {
return &gzipCompressor{}
type gzipCompressor struct {
pool sync.Pool
}
type gzipCompressor struct {
// NewGZIPCompressor creates a Compressor based on GZIP.
func NewGZIPCompressor() Compressor {
return &gzipCompressor{
pool: sync.Pool{
New: func() interface{} {
return gzip.NewWriter(ioutil.Discard)
},
},
}
}
func (c *gzipCompressor) Do(w io.Writer, p []byte) error {
z := gzip.NewWriter(w)
z := c.pool.Get().(*gzip.Writer)
z.Reset(w)
if _, err := z.Write(p); err != nil {
return err
}
@ -116,6 +98,7 @@ type Decompressor interface {
}
type gzipDecompressor struct {
pool sync.Pool
}
// NewGZIPDecompressor creates a Decompressor based on GZIP.
@ -124,11 +107,26 @@ func NewGZIPDecompressor() Decompressor {
}
func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) {
z, err := gzip.NewReader(r)
if err != nil {
return nil, err
var z *gzip.Reader
switch maybeZ := d.pool.Get().(type) {
case nil:
newZ, err := gzip.NewReader(r)
if err != nil {
return nil, err
}
z = newZ
case *gzip.Reader:
z = maybeZ
if err := z.Reset(r); err != nil {
d.pool.Put(z)
return nil, err
}
}
defer z.Close()
defer func() {
z.Close()
d.pool.Put(z)
}()
return ioutil.ReadAll(z)
}
@ -138,11 +136,14 @@ func (d *gzipDecompressor) Type() string {
// callInfo contains all related configuration and information about an RPC.
type callInfo struct {
failFast bool
headerMD metadata.MD
trailerMD metadata.MD
peer *peer.Peer
traceInfo traceInfo // in trace.go
failFast bool
headerMD metadata.MD
trailerMD metadata.MD
peer *peer.Peer
traceInfo traceInfo // in trace.go
maxReceiveMessageSize *int
maxSendMessageSize *int
creds credentials.PerRPCCredentials
}
var defaultCallInfo = callInfo{failFast: true}
@ -159,6 +160,14 @@ type CallOption interface {
after(*callInfo)
}
// EmptyCallOption does not alter the Call configuration.
// It can be embedded in another structure to carry satellite data for use
// by interceptors.
type EmptyCallOption struct{}
func (EmptyCallOption) before(*callInfo) error { return nil }
func (EmptyCallOption) after(*callInfo) {}
type beforeCall func(c *callInfo) error
func (o beforeCall) before(c *callInfo) error { return o(c) }
@ -189,7 +198,9 @@ func Trailer(md *metadata.MD) CallOption {
// unary RPC.
func Peer(peer *peer.Peer) CallOption {
return afterCall(func(c *callInfo) {
*peer = *c.peer
if c.peer != nil {
*peer = *c.peer
}
})
}
@ -198,7 +209,8 @@ func Peer(peer *peer.Peer) CallOption {
// immediately. Otherwise, the RPC client will block the call until a
// connection is available (or the call is canceled or times out) and will retry
// the call if it fails due to a transient error. Please refer to
// https://github.com/grpc/grpc/blob/master/doc/fail_fast.md. Note: failFast is default to true.
// https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md.
// Note: failFast is default to true.
func FailFast(failFast bool) CallOption {
return beforeCall(func(c *callInfo) error {
c.failFast = failFast
@ -206,6 +218,31 @@ func FailFast(failFast bool) CallOption {
})
}
// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size the client can receive.
func MaxCallRecvMsgSize(s int) CallOption {
return beforeCall(func(o *callInfo) error {
o.maxReceiveMessageSize = &s
return nil
})
}
// MaxCallSendMsgSize returns a CallOption which sets the maximum message size the client can send.
func MaxCallSendMsgSize(s int) CallOption {
return beforeCall(func(o *callInfo) error {
o.maxSendMessageSize = &s
return nil
})
}
// PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials
// for a call.
func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption {
return beforeCall(func(c *callInfo) error {
c.creds = creds
return nil
})
}
// The format of the payload: compressed or not?
type payloadFormat uint8
@ -239,8 +276,8 @@ type parser struct {
// No other error values or types must be returned, which also means
// that the underlying io.Reader must not return an incompatible
// error.
func (p *parser) recvMsg(maxMsgSize int) (pf payloadFormat, msg []byte, err error) {
if _, err := io.ReadFull(p.r, p.header[:]); err != nil {
func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byte, err error) {
if _, err := p.r.Read(p.header[:]); err != nil {
return 0, nil, err
}
@ -250,13 +287,13 @@ func (p *parser) recvMsg(maxMsgSize int) (pf payloadFormat, msg []byte, err erro
if length == 0 {
return pf, nil, nil
}
if length > uint32(maxMsgSize) {
return 0, nil, Errorf(codes.Internal, "grpc: received message length %d exceeding the max size %d", length, maxMsgSize)
if length > uint32(maxReceiveMessageSize) {
return 0, nil, Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize)
}
// TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead
// of making it for each message:
msg = make([]byte, int(length))
if _, err := io.ReadFull(p.r, msg); err != nil {
if _, err := p.r.Read(msg); err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
@ -277,7 +314,7 @@ func encode(c Codec, msg interface{}, cp Compressor, cbuf *bytes.Buffer, outPayl
// TODO(zhaoq): optimize to reduce memory alloc and copying.
b, err = c.Marshal(msg)
if err != nil {
return nil, err
return nil, Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error())
}
if outPayload != nil {
outPayload.Payload = msg
@ -287,14 +324,14 @@ func encode(c Codec, msg interface{}, cp Compressor, cbuf *bytes.Buffer, outPayl
}
if cp != nil {
if err := cp.Do(cbuf, b); err != nil {
return nil, err
return nil, Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error())
}
b = cbuf.Bytes()
}
length = uint(len(b))
}
if length > math.MaxUint32 {
return nil, Errorf(codes.InvalidArgument, "grpc: message too large (%d bytes)", length)
return nil, Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", length)
}
const (
@ -335,8 +372,8 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, dc Decompressor) er
return nil
}
func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{}, maxMsgSize int, inPayload *stats.InPayload) error {
pf, d, err := p.recvMsg(maxMsgSize)
func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, inPayload *stats.InPayload) error {
pf, d, err := p.recvMsg(maxReceiveMessageSize)
if err != nil {
return err
}
@ -352,10 +389,10 @@ func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{
return Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
}
}
if len(d) > maxMsgSize {
if len(d) > maxReceiveMessageSize {
// TODO: Revisit the error code. Currently keep it consistent with java
// implementation.
return Errorf(codes.Internal, "grpc: received a message of %d bytes exceeding %d limit", len(d), maxMsgSize)
return Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(d), maxReceiveMessageSize)
}
if err := c.Unmarshal(d, m); err != nil {
return Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err)
@ -370,116 +407,57 @@ func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{
return nil
}
// rpcError defines the status from an RPC.
type rpcError struct {
code codes.Code
desc string
type rpcInfo struct {
bytesSent bool
bytesReceived bool
}
func (e *rpcError) Error() string {
return fmt.Sprintf("rpc error: code = %s desc = %s", e.code, e.desc)
type rpcInfoContextKey struct{}
func newContextWithRPCInfo(ctx context.Context) context.Context {
return context.WithValue(ctx, rpcInfoContextKey{}, &rpcInfo{})
}
func rpcInfoFromContext(ctx context.Context) (s *rpcInfo, ok bool) {
s, ok = ctx.Value(rpcInfoContextKey{}).(*rpcInfo)
return
}
func updateRPCInfoInContext(ctx context.Context, s rpcInfo) {
if ss, ok := rpcInfoFromContext(ctx); ok {
*ss = s
}
return
}
// Code returns the error code for err if it was produced by the rpc system.
// Otherwise, it returns codes.Unknown.
//
// Deprecated; use status.FromError and Code method instead.
func Code(err error) codes.Code {
if err == nil {
return codes.OK
}
if e, ok := err.(*rpcError); ok {
return e.code
if s, ok := status.FromError(err); ok {
return s.Code()
}
return codes.Unknown
}
// ErrorDesc returns the error description of err if it was produced by the rpc system.
// Otherwise, it returns err.Error() or empty string when err is nil.
//
// Deprecated; use status.FromError and Message method instead.
func ErrorDesc(err error) string {
if err == nil {
return ""
}
if e, ok := err.(*rpcError); ok {
return e.desc
if s, ok := status.FromError(err); ok {
return s.Message()
}
return err.Error()
}
// Errorf returns an error containing an error code and a description;
// Errorf returns nil if c is OK.
//
// Deprecated; use status.Errorf instead.
func Errorf(c codes.Code, format string, a ...interface{}) error {
if c == codes.OK {
return nil
}
return &rpcError{
code: c,
desc: fmt.Sprintf(format, a...),
}
}
// toRPCErr converts an error into a rpcError.
func toRPCErr(err error) error {
switch e := err.(type) {
case *rpcError:
return err
case transport.StreamError:
return &rpcError{
code: e.Code,
desc: e.Desc,
}
case transport.ConnectionError:
return &rpcError{
code: codes.Internal,
desc: e.Desc,
}
default:
switch err {
case context.DeadlineExceeded:
return &rpcError{
code: codes.DeadlineExceeded,
desc: err.Error(),
}
case context.Canceled:
return &rpcError{
code: codes.Canceled,
desc: err.Error(),
}
case ErrClientConnClosing:
return &rpcError{
code: codes.FailedPrecondition,
desc: err.Error(),
}
}
}
return Errorf(codes.Unknown, "%v", err)
}
// convertCode converts a standard Go error into its canonical code. Note that
// this is only used to translate the error returned by the server applications.
func convertCode(err error) codes.Code {
switch err {
case nil:
return codes.OK
case io.EOF:
return codes.OutOfRange
case io.ErrClosedPipe, io.ErrNoProgress, io.ErrShortBuffer, io.ErrShortWrite, io.ErrUnexpectedEOF:
return codes.FailedPrecondition
case os.ErrInvalid:
return codes.InvalidArgument
case context.Canceled:
return codes.Canceled
case context.DeadlineExceeded:
return codes.DeadlineExceeded
}
switch {
case os.IsExist(err):
return codes.AlreadyExists
case os.IsNotExist(err):
return codes.NotFound
case os.IsPermission(err):
return codes.PermissionDenied
}
return codes.Unknown
return status.Errorf(c, format, a...)
}
// MethodConfig defines the configuration recommended by the service providers for a
@ -489,24 +467,22 @@ type MethodConfig struct {
// WaitForReady indicates whether RPCs sent to this method should wait until
// the connection is ready by default (!failfast). The value specified via the
// gRPC client API will override the value set here.
WaitForReady bool
WaitForReady *bool
// Timeout is the default timeout for RPCs sent to this method. The actual
// deadline used will be the minimum of the value specified here and the value
// set by the application via the gRPC client API. If either one is not set,
// then the other will be used. If neither is set, then the RPC has no deadline.
Timeout time.Duration
Timeout *time.Duration
// MaxReqSize is the maximum allowed payload size for an individual request in a
// stream (client->server) in bytes. The size which is measured is the serialized
// payload after per-message compression (but before stream compression) in bytes.
// The actual value used is the minumum of the value specified here and the value set
// by the application via the gRPC client API. If either one is not set, then the other
// will be used. If neither is set, then the built-in default is used.
// TODO: support this.
MaxReqSize uint32
MaxReqSize *int
// MaxRespSize is the maximum allowed payload size for an individual response in a
// stream (server->client) in bytes.
// TODO: support this.
MaxRespSize uint32
MaxRespSize *int
}
// ServiceConfig is provided by the service provider and contains parameters for how
@ -517,9 +493,32 @@ type ServiceConfig struct {
// via grpc.WithBalancer will override this.
LB Balancer
// Methods contains a map for the methods in this service.
// If there is an exact match for a method (i.e. /service/method) in the map, use the corresponding MethodConfig.
// If there's no exact match, look for the default config for the service (/service/) and use the corresponding MethodConfig if it exists.
// Otherwise, the method has no MethodConfig to use.
Methods map[string]MethodConfig
}
func min(a, b *int) *int {
if *a < *b {
return a
}
return b
}
func getMaxSize(mcMax, doptMax *int, defaultVal int) *int {
if mcMax == nil && doptMax == nil {
return &defaultVal
}
if mcMax != nil && doptMax != nil {
return min(mcMax, doptMax)
}
if mcMax != nil {
return mcMax
}
return doptMax
}
// SupportPackageIsVersion4 is referenced from generated protocol buffer files
// to assert that that code is compatible with this version of the grpc package.
//
@ -529,4 +528,6 @@ type ServiceConfig struct {
const SupportPackageIsVersion4 = true
// Version is the current grpc version.
const Version = "1.2.1"
const Version = "1.4.2"
const grpcUA = "grpc-go/" + Version

View File

@ -53,12 +53,19 @@ import (
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/stats"
"google.golang.org/grpc/status"
"google.golang.org/grpc/tap"
"google.golang.org/grpc/transport"
)
const (
defaultServerMaxReceiveMessageSize = 1024 * 1024 * 4
defaultServerMaxSendMessageSize = 1024 * 1024 * 4
)
type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error)
// MethodDesc represents an RPC service's method specification.
@ -105,25 +112,63 @@ type Server struct {
}
type options struct {
creds credentials.TransportCredentials
codec Codec
cp Compressor
dc Decompressor
maxMsgSize int
unaryInt UnaryServerInterceptor
streamInt StreamServerInterceptor
inTapHandle tap.ServerInHandle
statsHandler stats.Handler
maxConcurrentStreams uint32
useHandlerImpl bool // use http.Handler-based server
unknownStreamDesc *StreamDesc
creds credentials.TransportCredentials
codec Codec
cp Compressor
dc Decompressor
unaryInt UnaryServerInterceptor
streamInt StreamServerInterceptor
inTapHandle tap.ServerInHandle
statsHandler stats.Handler
maxConcurrentStreams uint32
maxReceiveMessageSize int
maxSendMessageSize int
useHandlerImpl bool // use http.Handler-based server
unknownStreamDesc *StreamDesc
keepaliveParams keepalive.ServerParameters
keepalivePolicy keepalive.EnforcementPolicy
initialWindowSize int32
initialConnWindowSize int32
}
var defaultMaxMsgSize = 1024 * 1024 * 4 // use 4MB as the default message size limit
var defaultServerOptions = options{
maxReceiveMessageSize: defaultServerMaxReceiveMessageSize,
maxSendMessageSize: defaultServerMaxSendMessageSize,
}
// A ServerOption sets options.
// A ServerOption sets options such as credentials, codec and keepalive parameters, etc.
type ServerOption func(*options)
// InitialWindowSize returns a ServerOption that sets window size for stream.
// The lower bound for window size is 64K and any value smaller than that will be ignored.
func InitialWindowSize(s int32) ServerOption {
return func(o *options) {
o.initialWindowSize = s
}
}
// InitialConnWindowSize returns a ServerOption that sets window size for a connection.
// The lower bound for window size is 64K and any value smaller than that will be ignored.
func InitialConnWindowSize(s int32) ServerOption {
return func(o *options) {
o.initialConnWindowSize = s
}
}
// KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server.
func KeepaliveParams(kp keepalive.ServerParameters) ServerOption {
return func(o *options) {
o.keepaliveParams = kp
}
}
// KeepaliveEnforcementPolicy returns a ServerOption that sets keepalive enforcement policy for the server.
func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption {
return func(o *options) {
o.keepalivePolicy = kep
}
}
// CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling.
func CustomCodec(codec Codec) ServerOption {
return func(o *options) {
@ -145,11 +190,25 @@ func RPCDecompressor(dc Decompressor) ServerOption {
}
}
// MaxMsgSize returns a ServerOption to set the max message size in bytes for inbound mesages.
// If this is not set, gRPC uses the default 4MB.
// MaxMsgSize returns a ServerOption to set the max message size in bytes the server can receive.
// If this is not set, gRPC uses the default limit. Deprecated: use MaxRecvMsgSize instead.
func MaxMsgSize(m int) ServerOption {
return MaxRecvMsgSize(m)
}
// MaxRecvMsgSize returns a ServerOption to set the max message size in bytes the server can receive.
// If this is not set, gRPC uses the default 4MB.
func MaxRecvMsgSize(m int) ServerOption {
return func(o *options) {
o.maxMsgSize = m
o.maxReceiveMessageSize = m
}
}
// MaxSendMsgSize returns a ServerOption to set the max message size in bytes the server can send.
// If this is not set, gRPC uses the default 4MB.
func MaxSendMsgSize(m int) ServerOption {
return func(o *options) {
o.maxSendMessageSize = m
}
}
@ -174,7 +233,7 @@ func Creds(c credentials.TransportCredentials) ServerOption {
func UnaryInterceptor(i UnaryServerInterceptor) ServerOption {
return func(o *options) {
if o.unaryInt != nil {
panic("The unary server interceptor has been set.")
panic("The unary server interceptor was already set and may not be reset.")
}
o.unaryInt = i
}
@ -185,7 +244,7 @@ func UnaryInterceptor(i UnaryServerInterceptor) ServerOption {
func StreamInterceptor(i StreamServerInterceptor) ServerOption {
return func(o *options) {
if o.streamInt != nil {
panic("The stream server interceptor has been set.")
panic("The stream server interceptor was already set and may not be reset.")
}
o.streamInt = i
}
@ -196,7 +255,7 @@ func StreamInterceptor(i StreamServerInterceptor) ServerOption {
func InTapHandle(h tap.ServerInHandle) ServerOption {
return func(o *options) {
if o.inTapHandle != nil {
panic("The tap handle has been set.")
panic("The tap handle was already set and may not be reset.")
}
o.inTapHandle = h
}
@ -211,7 +270,7 @@ func StatsHandler(h stats.Handler) ServerOption {
// UnknownServiceHandler returns a ServerOption that allows for adding a custom
// unknown service handler. The provided method is a bidi-streaming RPC service
// handler that will be invoked instead of returning the the "unimplemented" gRPC
// handler that will be invoked instead of returning the "unimplemented" gRPC
// error whenever a request is received for an unregistered service or method.
// The handling function has full access to the Context of the request and the
// stream, and the invocation passes through interceptors.
@ -230,8 +289,7 @@ func UnknownServiceHandler(streamHandler StreamHandler) ServerOption {
// NewServer creates a gRPC server which has no service registered and has not
// started to accept requests yet.
func NewServer(opt ...ServerOption) *Server {
var opts options
opts.maxMsgSize = defaultMaxMsgSize
opts := defaultServerOptions
for _, o := range opt {
o(&opts)
}
@ -270,8 +328,8 @@ func (s *Server) errorf(format string, a ...interface{}) {
}
}
// RegisterService register a service and its implementation to the gRPC
// server. Called from the IDL generated code. This must be called before
// RegisterService registers a service and its implementation to the gRPC
// server. It is called from the IDL generated code. This must be called before
// invoking Serve.
func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) {
ht := reflect.TypeOf(sd.HandlerType).Elem()
@ -316,7 +374,7 @@ type MethodInfo struct {
IsServerStream bool
}
// ServiceInfo contains unary RPC method info, streaming RPC methid info and metadata for a service.
// ServiceInfo contains unary RPC method info, streaming RPC method info and metadata for a service.
type ServiceInfo struct {
Methods []MethodInfo
// Metadata is the metadata specified in ServiceDesc when registering service.
@ -409,10 +467,12 @@ func (s *Server) Serve(lis net.Listener) error {
s.mu.Lock()
s.printf("Accept error: %v; retrying in %v", err, tempDelay)
s.mu.Unlock()
timer := time.NewTimer(tempDelay)
select {
case <-time.After(tempDelay):
case <-timer.C:
case <-s.ctx.Done():
}
timer.Stop()
continue
}
s.mu.Lock()
@ -465,10 +525,14 @@ func (s *Server) handleRawConn(rawConn net.Conn) {
// transport.NewServerTransport).
func (s *Server) serveHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) {
config := &transport.ServerConfig{
MaxStreams: s.opts.maxConcurrentStreams,
AuthInfo: authInfo,
InTapHandle: s.opts.inTapHandle,
StatsHandler: s.opts.statsHandler,
MaxStreams: s.opts.maxConcurrentStreams,
AuthInfo: authInfo,
InTapHandle: s.opts.inTapHandle,
StatsHandler: s.opts.statsHandler,
KeepaliveParams: s.opts.keepaliveParams,
KeepalivePolicy: s.opts.keepalivePolicy,
InitialWindowSize: s.opts.initialWindowSize,
InitialConnWindowSize: s.opts.initialConnWindowSize,
}
st, err := transport.NewServerTransport("http2", c, config)
if err != nil {
@ -600,14 +664,11 @@ func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Str
}
p, err := encode(s.opts.codec, msg, cp, cbuf, outPayload)
if err != nil {
// This typically indicates a fatal issue (e.g., memory
// corruption or hardware faults) the application program
// cannot handle.
//
// TODO(zhaoq): There exist other options also such as only closing the
// faulty stream locally and remotely (Other streams can keep going). Find
// the optimal option.
grpclog.Fatalf("grpc: Server failed to encode response %v", err)
grpclog.Println("grpc: server failed to encode response: ", err)
return err
}
if len(p) > s.opts.maxSendMessageSize {
return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(p), s.opts.maxSendMessageSize)
}
err = t.Write(stream, p, opts)
if err == nil && outPayload != nil {
@ -624,9 +685,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
BeginTime: time.Now(),
}
sh.HandleRPC(stream.Context(), begin)
}
defer func() {
if sh != nil {
defer func() {
end := &stats.End{
EndTime: time.Now(),
}
@ -634,8 +693,8 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
end.Error = toRPCErr(err)
}
sh.HandleRPC(stream.Context(), end)
}
}()
}()
}
if trInfo != nil {
defer trInfo.tr.Finish()
trInfo.firstLine.client = false
@ -652,136 +711,137 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
stream.SetSendCompress(s.opts.cp.Type())
}
p := &parser{r: stream}
for {
pf, req, err := p.recvMsg(s.opts.maxMsgSize)
pf, req, err := p.recvMsg(s.opts.maxReceiveMessageSize)
if err == io.EOF {
// The entire stream is done (for unary RPC only).
return err
}
if err == io.ErrUnexpectedEOF {
err = Errorf(codes.Internal, io.ErrUnexpectedEOF.Error())
}
if err != nil {
if st, ok := status.FromError(err); ok {
if e := t.WriteStatus(stream, st); e != nil {
grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", e)
}
} else {
switch st := err.(type) {
case transport.ConnectionError:
// Nothing to do here.
case transport.StreamError:
if e := t.WriteStatus(stream, status.New(st.Code, st.Desc)); e != nil {
grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", e)
}
default:
panic(fmt.Sprintf("grpc: Unexpected error (%T) from recvMsg: %v", st, st))
}
}
return err
}
if err := checkRecvPayload(pf, stream.RecvCompress(), s.opts.dc); err != nil {
if st, ok := status.FromError(err); ok {
if e := t.WriteStatus(stream, st); e != nil {
grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", e)
}
return err
}
if e := t.WriteStatus(stream, status.New(codes.Internal, err.Error())); e != nil {
grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", e)
}
// TODO checkRecvPayload always return RPC error. Add a return here if necessary.
}
var inPayload *stats.InPayload
if sh != nil {
inPayload = &stats.InPayload{
RecvTime: time.Now(),
}
}
df := func(v interface{}) error {
if inPayload != nil {
inPayload.WireLength = len(req)
}
if pf == compressionMade {
var err error
req, err = s.opts.dc.Do(bytes.NewReader(req))
if err != nil {
return Errorf(codes.Internal, err.Error())
}
}
if len(req) > s.opts.maxReceiveMessageSize {
// TODO: Revisit the error code. Currently keep it consistent with
// java implementation.
return status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(req), s.opts.maxReceiveMessageSize)
}
if err := s.opts.codec.Unmarshal(req, v); err != nil {
return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err)
}
if inPayload != nil {
inPayload.Payload = v
inPayload.Data = req
inPayload.Length = len(req)
sh.HandleRPC(stream.Context(), inPayload)
}
if trInfo != nil {
trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true)
}
return nil
}
reply, appErr := md.Handler(srv.server, stream.Context(), df, s.opts.unaryInt)
if appErr != nil {
appStatus, ok := status.FromError(appErr)
if !ok {
// Convert appErr if it is not a grpc status error.
appErr = status.Error(convertCode(appErr), appErr.Error())
appStatus, _ = status.FromError(appErr)
}
if trInfo != nil {
trInfo.tr.LazyLog(stringer(appStatus.Message()), true)
trInfo.tr.SetError()
}
if e := t.WriteStatus(stream, appStatus); e != nil {
grpclog.Printf("grpc: Server.processUnaryRPC failed to write status: %v", e)
}
return appErr
}
if trInfo != nil {
trInfo.tr.LazyLog(stringer("OK"), false)
}
opts := &transport.Options{
Last: true,
Delay: false,
}
if err := s.sendResponse(t, stream, reply, s.opts.cp, opts); err != nil {
if err == io.EOF {
// The entire stream is done (for unary RPC only).
return err
}
if err == io.ErrUnexpectedEOF {
err = Errorf(codes.Internal, io.ErrUnexpectedEOF.Error())
}
if err != nil {
switch err := err.(type) {
case *rpcError:
if e := t.WriteStatus(stream, err.code, err.desc); e != nil {
grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", e)
}
if s, ok := status.FromError(err); ok {
if e := t.WriteStatus(stream, s); e != nil {
grpclog.Printf("grpc: Server.processUnaryRPC failed to write status: %v", e)
}
} else {
switch st := err.(type) {
case transport.ConnectionError:
// Nothing to do here.
case transport.StreamError:
if e := t.WriteStatus(stream, err.Code, err.Desc); e != nil {
if e := t.WriteStatus(stream, status.New(st.Code, st.Desc)); e != nil {
grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", e)
}
default:
panic(fmt.Sprintf("grpc: Unexpected error (%T) from recvMsg: %v", err, err))
}
return err
}
if err := checkRecvPayload(pf, stream.RecvCompress(), s.opts.dc); err != nil {
switch err := err.(type) {
case *rpcError:
if e := t.WriteStatus(stream, err.code, err.desc); e != nil {
grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", e)
}
return err
default:
if e := t.WriteStatus(stream, codes.Internal, err.Error()); e != nil {
grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", e)
}
// TODO checkRecvPayload always return RPC error. Add a return here if necessary.
panic(fmt.Sprintf("grpc: Unexpected error (%T) from sendResponse: %v", st, st))
}
}
var inPayload *stats.InPayload
if sh != nil {
inPayload = &stats.InPayload{
RecvTime: time.Now(),
}
}
statusCode := codes.OK
statusDesc := ""
df := func(v interface{}) error {
if inPayload != nil {
inPayload.WireLength = len(req)
}
if pf == compressionMade {
var err error
req, err = s.opts.dc.Do(bytes.NewReader(req))
if err != nil {
if err := t.WriteStatus(stream, codes.Internal, err.Error()); err != nil {
grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err)
}
return Errorf(codes.Internal, err.Error())
}
}
if len(req) > s.opts.maxMsgSize {
// TODO: Revisit the error code. Currently keep it consistent with
// java implementation.
statusCode = codes.Internal
statusDesc = fmt.Sprintf("grpc: server received a message of %d bytes exceeding %d limit", len(req), s.opts.maxMsgSize)
}
if err := s.opts.codec.Unmarshal(req, v); err != nil {
return err
}
if inPayload != nil {
inPayload.Payload = v
inPayload.Data = req
inPayload.Length = len(req)
sh.HandleRPC(stream.Context(), inPayload)
}
if trInfo != nil {
trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true)
}
return nil
}
reply, appErr := md.Handler(srv.server, stream.Context(), df, s.opts.unaryInt)
if appErr != nil {
if err, ok := appErr.(*rpcError); ok {
statusCode = err.code
statusDesc = err.desc
} else {
statusCode = convertCode(appErr)
statusDesc = appErr.Error()
}
if trInfo != nil && statusCode != codes.OK {
trInfo.tr.LazyLog(stringer(statusDesc), true)
trInfo.tr.SetError()
}
if err := t.WriteStatus(stream, statusCode, statusDesc); err != nil {
grpclog.Printf("grpc: Server.processUnaryRPC failed to write status: %v", err)
}
return Errorf(statusCode, statusDesc)
}
if trInfo != nil {
trInfo.tr.LazyLog(stringer("OK"), false)
}
opts := &transport.Options{
Last: true,
Delay: false,
}
if err := s.sendResponse(t, stream, reply, s.opts.cp, opts); err != nil {
switch err := err.(type) {
case transport.ConnectionError:
// Nothing to do here.
case transport.StreamError:
statusCode = err.Code
statusDesc = err.Desc
default:
statusCode = codes.Unknown
statusDesc = err.Error()
}
return err
}
if trInfo != nil {
trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true)
}
errWrite := t.WriteStatus(stream, statusCode, statusDesc)
if statusCode != codes.OK {
return Errorf(statusCode, statusDesc)
}
return errWrite
return err
}
if trInfo != nil {
trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true)
}
// TODO: Should we be logging if writing status failed here, like above?
// Should the logging be in WriteStatus? Should we ignore the WriteStatus
// error or allow the stats handler to see it?
return t.WriteStatus(stream, status.New(codes.OK, ""))
}
func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) {
@ -791,9 +851,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
BeginTime: time.Now(),
}
sh.HandleRPC(stream.Context(), begin)
}
defer func() {
if sh != nil {
defer func() {
end := &stats.End{
EndTime: time.Now(),
}
@ -801,21 +859,22 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
end.Error = toRPCErr(err)
}
sh.HandleRPC(stream.Context(), end)
}
}()
}()
}
if s.opts.cp != nil {
stream.SetSendCompress(s.opts.cp.Type())
}
ss := &serverStream{
t: t,
s: stream,
p: &parser{r: stream},
codec: s.opts.codec,
cp: s.opts.cp,
dc: s.opts.dc,
maxMsgSize: s.opts.maxMsgSize,
trInfo: trInfo,
statsHandler: sh,
t: t,
s: stream,
p: &parser{r: stream},
codec: s.opts.codec,
cp: s.opts.cp,
dc: s.opts.dc,
maxReceiveMessageSize: s.opts.maxReceiveMessageSize,
maxSendMessageSize: s.opts.maxSendMessageSize,
trInfo: trInfo,
statsHandler: sh,
}
if ss.cp != nil {
ss.cbuf = new(bytes.Buffer)
@ -849,32 +908,32 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
appErr = s.opts.streamInt(server, ss, info, sd.Handler)
}
if appErr != nil {
if err, ok := appErr.(*rpcError); ok {
ss.statusCode = err.code
ss.statusDesc = err.desc
} else if err, ok := appErr.(transport.StreamError); ok {
ss.statusCode = err.Code
ss.statusDesc = err.Desc
} else {
ss.statusCode = convertCode(appErr)
ss.statusDesc = appErr.Error()
appStatus, ok := status.FromError(appErr)
if !ok {
switch err := appErr.(type) {
case transport.StreamError:
appStatus = status.New(err.Code, err.Desc)
default:
appStatus = status.New(convertCode(appErr), appErr.Error())
}
appErr = appStatus.Err()
}
if trInfo != nil {
ss.mu.Lock()
ss.trInfo.tr.LazyLog(stringer(appStatus.Message()), true)
ss.trInfo.tr.SetError()
ss.mu.Unlock()
}
t.WriteStatus(ss.s, appStatus)
// TODO: Should we log an error from WriteStatus here and below?
return appErr
}
if trInfo != nil {
ss.mu.Lock()
if ss.statusCode != codes.OK {
ss.trInfo.tr.LazyLog(stringer(ss.statusDesc), true)
ss.trInfo.tr.SetError()
} else {
ss.trInfo.tr.LazyLog(stringer("OK"), false)
}
ss.trInfo.tr.LazyLog(stringer("OK"), false)
ss.mu.Unlock()
}
errWrite := t.WriteStatus(ss.s, ss.statusCode, ss.statusDesc)
if ss.statusCode != codes.OK {
return Errorf(ss.statusCode, ss.statusDesc)
}
return errWrite
return t.WriteStatus(ss.s, status.New(codes.OK, ""))
}
@ -890,7 +949,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
trInfo.tr.SetError()
}
errDesc := fmt.Sprintf("malformed method name: %q", stream.Method())
if err := t.WriteStatus(stream, codes.InvalidArgument, errDesc); err != nil {
if err := t.WriteStatus(stream, status.New(codes.ResourceExhausted, errDesc)); err != nil {
if trInfo != nil {
trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
trInfo.tr.SetError()
@ -915,7 +974,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
trInfo.tr.SetError()
}
errDesc := fmt.Sprintf("unknown service %v", service)
if err := t.WriteStatus(stream, codes.Unimplemented, errDesc); err != nil {
if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil {
if trInfo != nil {
trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
trInfo.tr.SetError()
@ -945,7 +1004,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
return
}
errDesc := fmt.Sprintf("unknown method %v", method)
if err := t.WriteStatus(stream, codes.Unimplemented, errDesc); err != nil {
if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil {
if trInfo != nil {
trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
trInfo.tr.SetError()
@ -988,8 +1047,9 @@ func (s *Server) Stop() {
s.mu.Unlock()
}
// GracefulStop stops the gRPC server gracefully. It stops the server to accept new
// connections and RPCs and blocks until all the pending RPCs are finished.
// GracefulStop stops the gRPC server gracefully. It stops the server from
// accepting new connections and RPCs and blocks until all the pending RPCs are
// finished.
func (s *Server) GracefulStop() {
s.mu.Lock()
defer s.mu.Unlock()

View File

@ -45,19 +45,22 @@ type ConnTagInfo struct {
RemoteAddr net.Addr
// LocalAddr is the local address of the corresponding connection.
LocalAddr net.Addr
// TODO add QOS related fields.
}
// RPCTagInfo defines the relevant information needed by RPC context tagger.
type RPCTagInfo struct {
// FullMethodName is the RPC method in the format of /package.service/method.
FullMethodName string
// FailFast indicates if this RPC is failfast.
// This field is only valid on client side, it's always false on server side.
FailFast bool
}
// Handler defines the interface for the related stats handling (e.g., RPCs, connections).
type Handler interface {
// TagRPC can attach some information to the given context.
// The returned context is used in the rest lifetime of the RPC.
// The context used for the rest lifetime of the RPC will be derived from
// the returned context.
TagRPC(context.Context, *RPCTagInfo) context.Context
// HandleRPC processes the RPC stats.
HandleRPC(context.Context, RPCStats)

View File

@ -49,7 +49,7 @@ type RPCStats interface {
}
// Begin contains stats when an RPC begins.
// FailFast are only valid if Client is true.
// FailFast is only valid if this Begin is from client side.
type Begin struct {
// Client is true if this Begin is from client side.
Client bool
@ -59,7 +59,7 @@ type Begin struct {
FailFast bool
}
// IsClient indicates if this is from client side.
// IsClient indicates if the stats information is from client side.
func (s *Begin) IsClient() bool { return s.Client }
func (s *Begin) isRPCStats() {}
@ -80,19 +80,19 @@ type InPayload struct {
RecvTime time.Time
}
// IsClient indicates if this is from client side.
// IsClient indicates if the stats information is from client side.
func (s *InPayload) IsClient() bool { return s.Client }
func (s *InPayload) isRPCStats() {}
// InHeader contains stats when a header is received.
// FullMethod, addresses and Compression are only valid if Client is false.
type InHeader struct {
// Client is true if this InHeader is from client side.
Client bool
// WireLength is the wire length of header.
WireLength int
// The following fields are valid only if Client is false.
// FullMethod is the full RPC method string, i.e., /package.service/method.
FullMethod string
// RemoteAddr is the remote address of the corresponding connection.
@ -103,7 +103,7 @@ type InHeader struct {
Compression string
}
// IsClient indicates if this is from client side.
// IsClient indicates if the stats information is from client side.
func (s *InHeader) IsClient() bool { return s.Client }
func (s *InHeader) isRPCStats() {}
@ -116,7 +116,7 @@ type InTrailer struct {
WireLength int
}
// IsClient indicates if this is from client side.
// IsClient indicates if the stats information is from client side.
func (s *InTrailer) IsClient() bool { return s.Client }
func (s *InTrailer) isRPCStats() {}
@ -137,19 +137,19 @@ type OutPayload struct {
SentTime time.Time
}
// IsClient indicates if this is from client side.
// IsClient indicates if this stats information is from client side.
func (s *OutPayload) IsClient() bool { return s.Client }
func (s *OutPayload) isRPCStats() {}
// OutHeader contains stats when a header is sent.
// FullMethod, addresses and Compression are only valid if Client is true.
type OutHeader struct {
// Client is true if this OutHeader is from client side.
Client bool
// WireLength is the wire length of header.
WireLength int
// The following fields are valid only if Client is true.
// FullMethod is the full RPC method string, i.e., /package.service/method.
FullMethod string
// RemoteAddr is the remote address of the corresponding connection.
@ -160,7 +160,7 @@ type OutHeader struct {
Compression string
}
// IsClient indicates if this is from client side.
// IsClient indicates if this stats information is from client side.
func (s *OutHeader) IsClient() bool { return s.Client }
func (s *OutHeader) isRPCStats() {}
@ -173,7 +173,7 @@ type OutTrailer struct {
WireLength int
}
// IsClient indicates if this is from client side.
// IsClient indicates if this stats information is from client side.
func (s *OutTrailer) IsClient() bool { return s.Client }
func (s *OutTrailer) isRPCStats() {}
@ -184,7 +184,7 @@ type End struct {
Client bool
// EndTime is the time when the RPC ends.
EndTime time.Time
// Error is the error just happened. Its type is gRPC error.
// Error is the error just happened. It implements status.Status if non-nil.
Error error
}

145
vendor/google.golang.org/grpc/status/status.go generated vendored Normal file
View File

@ -0,0 +1,145 @@
/*
*
* Copyright 2017, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
// Package status implements errors returned by gRPC. These errors are
// serialized and transmitted on the wire between server and client, and allow
// for additional data to be transmitted via the Details field in the status
// proto. gRPC service handlers should return an error created by this
// package, and gRPC clients should expect a corresponding error to be
// returned from the RPC call.
//
// This package upholds the invariants that a non-nil error may not
// contain an OK code, and an OK code must result in a nil error.
package status
import (
"fmt"
"github.com/golang/protobuf/proto"
spb "google.golang.org/genproto/googleapis/rpc/status"
"google.golang.org/grpc/codes"
)
// statusError is an alias of a status proto. It implements error and Status,
// and a nil statusError should never be returned by this package.
type statusError spb.Status
func (se *statusError) Error() string {
p := (*spb.Status)(se)
return fmt.Sprintf("rpc error: code = %s desc = %s", codes.Code(p.GetCode()), p.GetMessage())
}
func (se *statusError) status() *Status {
return &Status{s: (*spb.Status)(se)}
}
// Status represents an RPC status code, message, and details. It is immutable
// and should be created with New, Newf, or FromProto.
type Status struct {
s *spb.Status
}
// Code returns the status code contained in s.
func (s *Status) Code() codes.Code {
if s == nil || s.s == nil {
return codes.OK
}
return codes.Code(s.s.Code)
}
// Message returns the message contained in s.
func (s *Status) Message() string {
if s == nil || s.s == nil {
return ""
}
return s.s.Message
}
// Proto returns s's status as an spb.Status proto message.
func (s *Status) Proto() *spb.Status {
if s == nil {
return nil
}
return proto.Clone(s.s).(*spb.Status)
}
// Err returns an immutable error representing s; returns nil if s.Code() is
// OK.
func (s *Status) Err() error {
if s.Code() == codes.OK {
return nil
}
return (*statusError)(s.s)
}
// New returns a Status representing c and msg.
func New(c codes.Code, msg string) *Status {
return &Status{s: &spb.Status{Code: int32(c), Message: msg}}
}
// Newf returns New(c, fmt.Sprintf(format, a...)).
func Newf(c codes.Code, format string, a ...interface{}) *Status {
return New(c, fmt.Sprintf(format, a...))
}
// Error returns an error representing c and msg. If c is OK, returns nil.
func Error(c codes.Code, msg string) error {
return New(c, msg).Err()
}
// Errorf returns Error(c, fmt.Sprintf(format, a...)).
func Errorf(c codes.Code, format string, a ...interface{}) error {
return Error(c, fmt.Sprintf(format, a...))
}
// ErrorProto returns an error representing s. If s.Code is OK, returns nil.
func ErrorProto(s *spb.Status) error {
return FromProto(s).Err()
}
// FromProto returns a Status representing s.
func FromProto(s *spb.Status) *Status {
return &Status{s: proto.Clone(s).(*spb.Status)}
}
// FromError returns a Status representing err if it was produced from this
// package, otherwise it returns nil, false.
func FromError(err error) (s *Status, ok bool) {
if err == nil {
return &Status{s: &spb.Status{Code: int32(codes.OK)}}, true
}
if s, ok := err.(*statusError); ok {
return s.status(), true
}
return nil, false
}

View File

@ -45,6 +45,7 @@ import (
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/stats"
"google.golang.org/grpc/status"
"google.golang.org/grpc/transport"
)
@ -112,17 +113,24 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
cancel context.CancelFunc
)
c := defaultCallInfo
if mc, ok := cc.getMethodConfig(method); ok {
c.failFast = !mc.WaitForReady
if mc.Timeout > 0 {
ctx, cancel = context.WithTimeout(ctx, mc.Timeout)
}
mc := cc.GetMethodConfig(method)
if mc.WaitForReady != nil {
c.failFast = !*mc.WaitForReady
}
if mc.Timeout != nil {
ctx, cancel = context.WithTimeout(ctx, *mc.Timeout)
}
opts = append(cc.dopts.callOptions, opts...)
for _, o := range opts {
if err := o.before(&c); err != nil {
return nil, toRPCErr(err)
}
}
c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize)
c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize)
callHdr := &transport.CallHdr{
Host: cc.authority,
Method: method,
@ -131,6 +139,9 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
if cc.dopts.cp != nil {
callHdr.SendCompress = cc.dopts.cp.Type()
}
if c.creds != nil {
callHdr.Creds = c.creds
}
var trInfo traceInfo
if EnableTracing {
trInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method)
@ -150,26 +161,27 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
}
}()
}
ctx = newContextWithRPCInfo(ctx)
sh := cc.dopts.copts.StatsHandler
if sh != nil {
ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method})
ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast})
begin := &stats.Begin{
Client: true,
BeginTime: time.Now(),
FailFast: c.failFast,
}
sh.HandleRPC(ctx, begin)
}
defer func() {
if err != nil && sh != nil {
// Only handle end stats if err != nil.
end := &stats.End{
Client: true,
Error: err,
defer func() {
if err != nil {
// Only handle end stats if err != nil.
end := &stats.End{
Client: true,
Error: err,
}
sh.HandleRPC(ctx, end)
}
sh.HandleRPC(ctx, end)
}
}()
}()
}
gopts := BalancerGetOptions{
BlockingWait: !c.failFast,
}
@ -177,7 +189,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
t, put, err = cc.getTransport(ctx, gopts)
if err != nil {
// TODO(zhaoq): Probably revisit the error handling.
if _, ok := err.(*rpcError); ok {
if _, ok := status.FromError(err); ok {
return nil, err
}
if err == errConnClosing || err == errConnUnavailable {
@ -192,14 +204,17 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
s, err = t.NewStream(ctx, callHdr)
if err != nil {
if _, ok := err.(transport.ConnectionError); ok && put != nil {
// If error is connection error, transport was sending data on wire,
// and we are not sure if anything has been sent on wire.
// If error is not connection error, we are sure nothing has been sent.
updateRPCInfoInContext(ctx, rpcInfo{bytesSent: true, bytesReceived: false})
}
if put != nil {
put()
put = nil
}
if _, ok := err.(transport.ConnectionError); ok || err == transport.ErrStreamDrain {
if c.failFast {
return nil, toRPCErr(err)
}
if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast {
continue
}
return nil, toRPCErr(err)
@ -207,14 +222,13 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
break
}
cs := &clientStream{
opts: opts,
c: c,
desc: desc,
codec: cc.dopts.codec,
cp: cc.dopts.cp,
dc: cc.dopts.dc,
maxMsgSize: cc.dopts.maxMsgSize,
cancel: cancel,
opts: opts,
c: c,
desc: desc,
codec: cc.dopts.codec,
cp: cc.dopts.cp,
dc: cc.dopts.dc,
cancel: cancel,
put: put,
t: t,
@ -236,14 +250,13 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
select {
case <-t.Error():
// Incur transport error, simply exit.
case <-cc.ctx.Done():
cs.finish(ErrClientConnClosing)
cs.closeTransportStream(ErrClientConnClosing)
case <-s.Done():
// TODO: The trace of the RPC is terminated here when there is no pending
// I/O, which is probably not the optimal solution.
if s.StatusCode() == codes.OK {
cs.finish(nil)
} else {
cs.finish(Errorf(s.StatusCode(), "%s", s.StatusDesc()))
}
cs.finish(s.Status().Err())
cs.closeTransportStream(nil)
case <-s.GoAway():
cs.finish(errConnDrain)
@ -259,24 +272,24 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
// clientStream implements a client side Stream.
type clientStream struct {
opts []CallOption
c callInfo
t transport.ClientTransport
s *transport.Stream
p *parser
desc *StreamDesc
codec Codec
cp Compressor
cbuf *bytes.Buffer
dc Decompressor
maxMsgSize int
cancel context.CancelFunc
opts []CallOption
c callInfo
t transport.ClientTransport
s *transport.Stream
p *parser
desc *StreamDesc
codec Codec
cp Compressor
cbuf *bytes.Buffer
dc Decompressor
cancel context.CancelFunc
tracing bool // set to EnableTracing when the clientStream is created.
mu sync.Mutex
put func()
closed bool
mu sync.Mutex
put func()
closed bool
finished bool
// trInfo.tr is set when the clientStream is created (if EnableTracing is true),
// and is set to nil when the clientStream's finish method is called.
trInfo traceInfo
@ -351,7 +364,13 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) {
}
}()
if err != nil {
return Errorf(codes.Internal, "grpc: %v", err)
return err
}
if cs.c.maxSendMessageSize == nil {
return Errorf(codes.Internal, "callInfo maxSendMessageSize field uninitialized(nil)")
}
if len(out) > *cs.c.maxSendMessageSize {
return Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(out), *cs.c.maxSendMessageSize)
}
err = cs.t.Write(cs.s, out, &transport.Options{Last: false})
if err == nil && outPayload != nil {
@ -362,28 +381,16 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) {
}
func (cs *clientStream) RecvMsg(m interface{}) (err error) {
defer func() {
if err != nil && cs.statsHandler != nil {
// Only generate End if err != nil.
// If err == nil, it's not the last RecvMsg.
// The last RecvMsg gets either an RPC error or io.EOF.
end := &stats.End{
Client: true,
EndTime: time.Now(),
}
if err != io.EOF {
end.Error = toRPCErr(err)
}
cs.statsHandler.HandleRPC(cs.statsCtx, end)
}
}()
var inPayload *stats.InPayload
if cs.statsHandler != nil {
inPayload = &stats.InPayload{
Client: true,
}
}
err = recv(cs.p, cs.codec, cs.s, cs.dc, m, cs.maxMsgSize, inPayload)
if cs.c.maxReceiveMessageSize == nil {
return Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)")
}
err = recv(cs.p, cs.codec, cs.s, cs.dc, m, *cs.c.maxReceiveMessageSize, inPayload)
defer func() {
// err != nil indicates the termination of the stream.
if err != nil {
@ -406,17 +413,20 @@ func (cs *clientStream) RecvMsg(m interface{}) (err error) {
}
// Special handling for client streaming rpc.
// This recv expects EOF or errors, so we don't collect inPayload.
err = recv(cs.p, cs.codec, cs.s, cs.dc, m, cs.maxMsgSize, nil)
if cs.c.maxReceiveMessageSize == nil {
return Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)")
}
err = recv(cs.p, cs.codec, cs.s, cs.dc, m, *cs.c.maxReceiveMessageSize, nil)
cs.closeTransportStream(err)
if err == nil {
return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>"))
}
if err == io.EOF {
if cs.s.StatusCode() == codes.OK {
cs.finish(err)
return nil
if se := cs.s.Status().Err(); se != nil {
return se
}
return Errorf(cs.s.StatusCode(), "%s", cs.s.StatusDesc())
cs.finish(err)
return nil
}
return toRPCErr(err)
}
@ -424,11 +434,11 @@ func (cs *clientStream) RecvMsg(m interface{}) (err error) {
cs.closeTransportStream(err)
}
if err == io.EOF {
if cs.s.StatusCode() == codes.OK {
// Returns io.EOF to indicate the end of the stream.
return
if statusErr := cs.s.Status().Err(); statusErr != nil {
return statusErr
}
return Errorf(cs.s.StatusCode(), "%s", cs.s.StatusDesc())
// Returns io.EOF to indicate the end of the stream.
return
}
return toRPCErr(err)
}
@ -462,20 +472,39 @@ func (cs *clientStream) closeTransportStream(err error) {
}
func (cs *clientStream) finish(err error) {
cs.mu.Lock()
defer cs.mu.Unlock()
if cs.finished {
return
}
cs.finished = true
defer func() {
if cs.cancel != nil {
cs.cancel()
}
}()
cs.mu.Lock()
defer cs.mu.Unlock()
for _, o := range cs.opts {
o.after(&cs.c)
}
if cs.put != nil {
updateRPCInfoInContext(cs.s.Context(), rpcInfo{
bytesSent: cs.s.BytesSent(),
bytesReceived: cs.s.BytesReceived(),
})
cs.put()
cs.put = nil
}
if cs.statsHandler != nil {
end := &stats.End{
Client: true,
EndTime: time.Now(),
}
if err != io.EOF {
// end.Error is nil if the RPC finished successfully.
end.Error = toRPCErr(err)
}
cs.statsHandler.HandleRPC(cs.statsCtx, end)
}
if !cs.tracing {
return
}
@ -512,17 +541,16 @@ type ServerStream interface {
// serverStream implements a server side Stream.
type serverStream struct {
t transport.ServerTransport
s *transport.Stream
p *parser
codec Codec
cp Compressor
dc Decompressor
cbuf *bytes.Buffer
maxMsgSize int
statusCode codes.Code
statusDesc string
trInfo *traceInfo
t transport.ServerTransport
s *transport.Stream
p *parser
codec Codec
cp Compressor
dc Decompressor
cbuf *bytes.Buffer
maxReceiveMessageSize int
maxSendMessageSize int
trInfo *traceInfo
statsHandler stats.Handler
@ -578,9 +606,11 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) {
}
}()
if err != nil {
err = Errorf(codes.Internal, "grpc: %v", err)
return err
}
if len(out) > ss.maxSendMessageSize {
return Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(out), ss.maxSendMessageSize)
}
if err := ss.t.Write(ss.s, out, &transport.Options{Last: false}); err != nil {
return toRPCErr(err)
}
@ -610,7 +640,7 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) {
if ss.statsHandler != nil {
inPayload = &stats.InPayload{}
}
if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxMsgSize, inPayload); err != nil {
if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, inPayload); err != nil {
if err == io.EOF {
return err
}

View File

@ -46,12 +46,20 @@ const (
// The default value of flow control window size in HTTP2 spec.
defaultWindowSize = 65535
// The initial window size for flow control.
initialWindowSize = defaultWindowSize // for an RPC
initialConnWindowSize = defaultWindowSize * 16 // for a connection
infinity = time.Duration(math.MaxInt64)
defaultKeepaliveTime = infinity
defaultKeepaliveTimeout = time.Duration(20 * time.Second)
defaultMaxStreamsClient = 100
initialWindowSize = defaultWindowSize // for an RPC
initialConnWindowSize = defaultWindowSize * 16 // for a connection
infinity = time.Duration(math.MaxInt64)
defaultClientKeepaliveTime = infinity
defaultClientKeepaliveTimeout = time.Duration(20 * time.Second)
defaultMaxStreamsClient = 100
defaultMaxConnectionIdle = infinity
defaultMaxConnectionAge = infinity
defaultMaxConnectionAgeGrace = infinity
defaultServerKeepaliveTime = time.Duration(2 * time.Hour)
defaultServerKeepaliveTimeout = time.Duration(20 * time.Second)
defaultKeepalivePolicyMinTime = time.Duration(5 * time.Minute)
// max window limit set by HTTP2 Specs.
maxWindowSize = math.MaxInt32
)
// The following defines various control items which could flow through
@ -60,6 +68,7 @@ const (
type windowUpdate struct {
streamID uint32
increment uint32
flush bool
}
func (*windowUpdate) item() {}
@ -79,6 +88,8 @@ type resetStream struct {
func (*resetStream) item() {}
type goAway struct {
code http2.ErrCode
debugData []byte
}
func (*goAway) item() {}
@ -159,6 +170,40 @@ type inFlow struct {
// The amount of data the application has consumed but grpc has not sent
// window update for them. Used to reduce window update frequency.
pendingUpdate uint32
// delta is the extra window update given by receiver when an application
// is reading data bigger in size than the inFlow limit.
delta uint32
}
func (f *inFlow) maybeAdjust(n uint32) uint32 {
if n > uint32(math.MaxInt32) {
n = uint32(math.MaxInt32)
}
f.mu.Lock()
defer f.mu.Unlock()
// estSenderQuota is the receiver's view of the maximum number of bytes the sender
// can send without a window update.
estSenderQuota := int32(f.limit - (f.pendingData + f.pendingUpdate))
// estUntransmittedData is the maximum number of bytes the sends might not have put
// on the wire yet. A value of 0 or less means that we have already received all or
// more bytes than the application is requesting to read.
estUntransmittedData := int32(n - f.pendingData) // Casting into int32 since it could be negative.
// This implies that unless we send a window update, the sender won't be able to send all the bytes
// for this message. Therefore we must send an update over the limit since there's an active read
// request from the application.
if estUntransmittedData > estSenderQuota {
// Sender's window shouldn't go more than 2^31 - 1 as speecified in the HTTP spec.
if f.limit+n > maxWindowSize {
f.delta = maxWindowSize - f.limit
} else {
// Send a window update for the whole message and not just the difference between
// estUntransmittedData and estSenderQuota. This will be helpful in case the message
// is padded; We will fallback on the current available window(at least a 1/4th of the limit).
f.delta = n
}
return f.delta
}
return 0
}
// onData is invoked when some data frame is received. It updates pendingData.
@ -166,7 +211,7 @@ func (f *inFlow) onData(n uint32) error {
f.mu.Lock()
defer f.mu.Unlock()
f.pendingData += n
if f.pendingData+f.pendingUpdate > f.limit {
if f.pendingData+f.pendingUpdate > f.limit+f.delta {
return fmt.Errorf("received %d-bytes data exceeding the limit %d bytes", f.pendingData+f.pendingUpdate, f.limit)
}
return nil
@ -181,6 +226,13 @@ func (f *inFlow) onRead(n uint32) uint32 {
return 0
}
f.pendingData -= n
if n > f.delta {
n -= f.delta
f.delta = 0
} else {
f.delta -= n
n = 0
}
f.pendingUpdate += n
if f.pendingUpdate >= f.limit/4 {
wu := f.pendingUpdate
@ -190,10 +242,10 @@ func (f *inFlow) onRead(n uint32) uint32 {
return 0
}
func (f *inFlow) resetPendingData() uint32 {
func (f *inFlow) resetPendingUpdate() uint32 {
f.mu.Lock()
defer f.mu.Unlock()
n := f.pendingData
f.pendingData = 0
n := f.pendingUpdate
f.pendingUpdate = 0
return n
}

View File

@ -37,6 +37,8 @@ package transport
import (
"net"
"google.golang.org/grpc/codes"
"golang.org/x/net/context"
)
@ -44,3 +46,14 @@ import (
func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
return (&net.Dialer{Cancel: ctx.Done()}).Dial(network, address)
}
// ContextErr converts the error from context package into a StreamError.
func ContextErr(err error) StreamError {
switch err {
case context.DeadlineExceeded:
return streamErrorf(codes.DeadlineExceeded, "%v", err)
case context.Canceled:
return streamErrorf(codes.Canceled, "%v", err)
}
return streamErrorf(codes.Internal, "Unexpected error from context packet: %v", err)
}

View File

@ -35,12 +35,26 @@
package transport
import (
"context"
"net"
"golang.org/x/net/context"
"google.golang.org/grpc/codes"
netctx "golang.org/x/net/context"
)
// dialContext connects to the address on the named network.
func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
return (&net.Dialer{}).DialContext(ctx, network, address)
}
// ContextErr converts the error from context package into a StreamError.
func ContextErr(err error) StreamError {
switch err {
case context.DeadlineExceeded, netctx.DeadlineExceeded:
return streamErrorf(codes.DeadlineExceeded, "%v", err)
case context.Canceled, netctx.Canceled:
return streamErrorf(codes.Canceled, "%v", err)
}
return streamErrorf(codes.Internal, "Unexpected error from context packet: %v", err)
}

View File

@ -53,6 +53,7 @@ import (
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/status"
)
// NewServerHandlerTransport returns a ServerTransport handling gRPC
@ -101,14 +102,9 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request) (ServerTr
continue
}
for _, v := range vv {
if k == "user-agent" {
// user-agent is special. Copying logic of http_util.go.
if i := strings.LastIndex(v, " "); i == -1 {
// There is no application user agent string being set
continue
} else {
v = v[:i]
}
v, err := decodeMetadataHeader(k, v)
if err != nil {
return nil, streamErrorf(codes.InvalidArgument, "malformed binary metadata: %v", err)
}
metakv = append(metakv, k, v)
}
@ -174,15 +170,22 @@ func (a strAddr) String() string { return string(a) }
// do runs fn in the ServeHTTP goroutine.
func (ht *serverHandlerTransport) do(fn func()) error {
// Avoid a panic writing to closed channel. Imperfect but maybe good enough.
select {
case ht.writes <- fn:
return nil
case <-ht.closedCh:
return ErrConnClosing
default:
select {
case ht.writes <- fn:
return nil
case <-ht.closedCh:
return ErrConnClosing
}
}
}
func (ht *serverHandlerTransport) WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error {
func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) error {
err := ht.do(func() {
ht.writeCommonHeaders(s)
@ -192,10 +195,13 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, statusCode codes.Code,
ht.rw.(http.Flusher).Flush()
h := ht.rw.Header()
h.Set("Grpc-Status", fmt.Sprintf("%d", statusCode))
if statusDesc != "" {
h.Set("Grpc-Message", encodeGrpcMessage(statusDesc))
h.Set("Grpc-Status", fmt.Sprintf("%d", st.Code()))
if m := st.Message(); m != "" {
h.Set("Grpc-Message", encodeGrpcMessage(m))
}
// TODO: Support Grpc-Status-Details-Bin
if md := s.Trailer(); len(md) > 0 {
for k, vv := range md {
// Clients don't tolerate reading restricted headers after some non restricted ones were sent.
@ -203,10 +209,9 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, statusCode codes.Code,
continue
}
for _, v := range vv {
// http2 ResponseWriter mechanism to
// send undeclared Trailers after the
// headers have possibly been written.
h.Add(http2.TrailerPrefix+k, v)
// http2 ResponseWriter mechanism to send undeclared Trailers after
// the headers have possibly been written.
h.Add(http2.TrailerPrefix+k, encodeMetadataHeader(k, v))
}
}
}
@ -234,6 +239,7 @@ func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) {
// and https://golang.org/pkg/net/http/#example_ResponseWriter_trailers
h.Add("Trailer", "Grpc-Status")
h.Add("Trailer", "Grpc-Message")
// TODO: Support Grpc-Status-Details-Bin
if s.sendCompress != "" {
h.Set("Grpc-Encoding", s.sendCompress)
@ -260,6 +266,7 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error {
continue
}
for _, v := range vv {
v = encodeMetadataHeader(k, v)
h.Add(k, v)
}
}
@ -300,13 +307,13 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace
req := ht.req
s := &Stream{
id: 0, // irrelevant
windowHandler: func(int) {}, // nothing
cancel: cancel,
buf: newRecvBuffer(),
st: ht,
method: req.URL.Path,
recvCompress: req.Header.Get("grpc-encoding"),
id: 0, // irrelevant
requestRead: func(int) {},
cancel: cancel,
buf: newRecvBuffer(),
st: ht,
method: req.URL.Path,
recvCompress: req.Header.Get("grpc-encoding"),
}
pr := &peer.Peer{
Addr: ht.RemoteAddr(),
@ -314,10 +321,13 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace
if req.TLS != nil {
pr.AuthInfo = credentials.TLSInfo{State: *req.TLS}
}
ctx = metadata.NewContext(ctx, ht.headerMD)
ctx = metadata.NewIncomingContext(ctx, ht.headerMD)
ctx = peer.NewContext(ctx, pr)
s.ctx = newContextWithStream(ctx, s)
s.dec = &recvBufferReader{ctx: s.ctx, recv: s.buf}
s.trReader = &transportReader{
reader: &recvBufferReader{ctx: s.ctx, recv: s.buf},
windowHandler: func(int) {},
}
// readerDone is closed when the Body.Read-ing goroutine exits.
readerDone := make(chan struct{})

View File

@ -35,7 +35,6 @@ package transport
import (
"bytes"
"fmt"
"io"
"math"
"net"
@ -54,6 +53,7 @@ import (
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/stats"
"google.golang.org/grpc/status"
)
// http2Client implements the ClientTransport interface with HTTP2.
@ -101,6 +101,8 @@ type http2Client struct {
// The scheme used: https if TLS is on, http otherwise.
scheme string
isSecure bool
creds []credentials.PerRPCCredentials
// Boolean to keep track of reading activity on transport.
@ -110,6 +112,8 @@ type http2Client struct {
statsHandler stats.Handler
initialWindowSize int32
mu sync.Mutex // guard the following variables
state transportState // the state of underlying connection
activeStreams map[uint32]*Stream
@ -121,6 +125,9 @@ type http2Client struct {
goAwayID uint32
// prevGoAway ID records the Last-Stream-ID in the previous GOAway frame.
prevGoAwayID uint32
// goAwayReason records the http2.ErrCode and debug data received with the
// GoAway frame.
goAwayReason GoAwayReason
}
func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr string) (net.Conn, error) {
@ -166,9 +173,9 @@ func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions) (
conn, err := dial(ctx, opts.Dialer, addr.Addr)
if err != nil {
if opts.FailOnNonTempDialError {
return nil, connectionErrorf(isTemporary(err), err, "transport: %v", err)
return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err)
}
return nil, connectionErrorf(true, err, "transport: %v", err)
return nil, connectionErrorf(true, err, "transport: Error while dialing %v", err)
}
// Any further errors will close the underlying connection
defer func(conn net.Conn) {
@ -176,7 +183,10 @@ func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions) (
conn.Close()
}
}(conn)
var authInfo credentials.AuthInfo
var (
isSecure bool
authInfo credentials.AuthInfo
)
if creds := opts.TransportCredentials; creds != nil {
scheme = "https"
conn, authInfo, err = creds.ClientHandshake(ctx, addr.Addr, conn)
@ -184,16 +194,21 @@ func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions) (
// Credentials handshake errors are typically considered permanent
// to avoid retrying on e.g. bad certificates.
temp := isTemporary(err)
return nil, connectionErrorf(temp, err, "transport: %v", err)
return nil, connectionErrorf(temp, err, "transport: authentication handshake failed: %v", err)
}
isSecure = true
}
kp := opts.KeepaliveParams
// Validate keepalive parameters.
if kp.Time == 0 {
kp.Time = defaultKeepaliveTime
kp.Time = defaultClientKeepaliveTime
}
if kp.Timeout == 0 {
kp.Timeout = defaultKeepaliveTimeout
kp.Timeout = defaultClientKeepaliveTimeout
}
icwz := int32(initialConnWindowSize)
if opts.InitialConnWindowSize >= defaultWindowSize {
icwz = opts.InitialConnWindowSize
}
var buf bytes.Buffer
t := &http2Client{
@ -206,27 +221,32 @@ func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions) (
localAddr: conn.LocalAddr(),
authInfo: authInfo,
// The client initiated stream id is odd starting from 1.
nextID: 1,
writableChan: make(chan int, 1),
shutdownChan: make(chan struct{}),
errorChan: make(chan struct{}),
goAway: make(chan struct{}),
awakenKeepalive: make(chan struct{}, 1),
framer: newFramer(conn),
hBuf: &buf,
hEnc: hpack.NewEncoder(&buf),
controlBuf: newRecvBuffer(),
fc: &inFlow{limit: initialConnWindowSize},
sendQuotaPool: newQuotaPool(defaultWindowSize),
scheme: scheme,
state: reachable,
activeStreams: make(map[uint32]*Stream),
creds: opts.PerRPCCredentials,
maxStreams: defaultMaxStreamsClient,
streamsQuota: newQuotaPool(defaultMaxStreamsClient),
streamSendQuota: defaultWindowSize,
kp: kp,
statsHandler: opts.StatsHandler,
nextID: 1,
writableChan: make(chan int, 1),
shutdownChan: make(chan struct{}),
errorChan: make(chan struct{}),
goAway: make(chan struct{}),
awakenKeepalive: make(chan struct{}, 1),
framer: newFramer(conn),
hBuf: &buf,
hEnc: hpack.NewEncoder(&buf),
controlBuf: newRecvBuffer(),
fc: &inFlow{limit: uint32(icwz)},
sendQuotaPool: newQuotaPool(defaultWindowSize),
scheme: scheme,
state: reachable,
activeStreams: make(map[uint32]*Stream),
isSecure: isSecure,
creds: opts.PerRPCCredentials,
maxStreams: defaultMaxStreamsClient,
streamsQuota: newQuotaPool(defaultMaxStreamsClient),
streamSendQuota: defaultWindowSize,
kp: kp,
statsHandler: opts.StatsHandler,
initialWindowSize: initialWindowSize,
}
if opts.InitialWindowSize >= defaultWindowSize {
t.initialWindowSize = opts.InitialWindowSize
}
// Make sure awakenKeepalive can't be written upon.
// keepalive routine will make it writable, if need be.
@ -249,29 +269,29 @@ func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions) (
n, err := t.conn.Write(clientPreface)
if err != nil {
t.Close()
return nil, connectionErrorf(true, err, "transport: %v", err)
return nil, connectionErrorf(true, err, "transport: failed to write client preface: %v", err)
}
if n != len(clientPreface) {
t.Close()
return nil, connectionErrorf(true, err, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface))
}
if initialWindowSize != defaultWindowSize {
if t.initialWindowSize != defaultWindowSize {
err = t.framer.writeSettings(true, http2.Setting{
ID: http2.SettingInitialWindowSize,
Val: uint32(initialWindowSize),
Val: uint32(t.initialWindowSize),
})
} else {
err = t.framer.writeSettings(true)
}
if err != nil {
t.Close()
return nil, connectionErrorf(true, err, "transport: %v", err)
return nil, connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err)
}
// Adjust the connection flow control window if needed.
if delta := uint32(initialConnWindowSize - defaultWindowSize); delta > 0 {
if delta := uint32(icwz - defaultWindowSize); delta > 0 {
if err := t.framer.writeWindowUpdate(true, 0, delta); err != nil {
t.Close()
return nil, connectionErrorf(true, err, "transport: %v", err)
return nil, connectionErrorf(true, err, "transport: failed to write window update: %v", err)
}
}
go t.controller()
@ -291,27 +311,33 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
method: callHdr.Method,
sendCompress: callHdr.SendCompress,
buf: newRecvBuffer(),
fc: &inFlow{limit: initialWindowSize},
fc: &inFlow{limit: uint32(t.initialWindowSize)},
sendQuotaPool: newQuotaPool(int(t.streamSendQuota)),
headerChan: make(chan struct{}),
}
t.nextID += 2
s.windowHandler = func(n int) {
t.updateWindow(s, uint32(n))
s.requestRead = func(n int) {
t.adjustWindow(s, uint32(n))
}
// The client side stream context should have exactly the same life cycle with the user provided context.
// That means, s.ctx should be read-only. And s.ctx is done iff ctx is done.
// So we use the original context here instead of creating a copy.
s.ctx = ctx
s.dec = &recvBufferReader{
ctx: s.ctx,
goAway: s.goAway,
recv: s.buf,
s.trReader = &transportReader{
reader: &recvBufferReader{
ctx: s.ctx,
goAway: s.goAway,
recv: s.buf,
},
windowHandler: func(n int) {
t.updateWindow(s, uint32(n))
},
}
return s
}
// NewStream creates a stream and register it into the transport as "active"
// NewStream creates a stream and registers it into the transport as "active"
// streams.
func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) {
pr := &peer.Peer{
@ -321,10 +347,13 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
if t.authInfo != nil {
pr.AuthInfo = t.authInfo
}
userCtx := ctx
ctx = peer.NewContext(ctx, pr)
authData := make(map[string]string)
for _, c := range t.creds {
var (
authData = make(map[string]string)
audience string
)
// Create an audience string only if needed.
if len(t.creds) > 0 || callHdr.Creds != nil {
// Construct URI required to get auth request metadata.
var port string
if pos := strings.LastIndex(t.target, ":"); pos != -1 {
@ -335,17 +364,39 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
}
pos := strings.LastIndex(callHdr.Method, "/")
if pos == -1 {
return nil, streamErrorf(codes.InvalidArgument, "transport: malformed method name: %q", callHdr.Method)
pos = len(callHdr.Method)
}
audience := "https://" + callHdr.Host + port + callHdr.Method[:pos]
audience = "https://" + callHdr.Host + port + callHdr.Method[:pos]
}
for _, c := range t.creds {
data, err := c.GetRequestMetadata(ctx, audience)
if err != nil {
return nil, streamErrorf(codes.InvalidArgument, "transport: %v", err)
return nil, streamErrorf(codes.Internal, "transport: %v", err)
}
for k, v := range data {
// Capital header names are illegal in HTTP/2.
k = strings.ToLower(k)
authData[k] = v
}
}
callAuthData := make(map[string]string)
// Check if credentials.PerRPCCredentials were provided via call options.
// Note: if these credentials are provided both via dial options and call
// options, then both sets of credentials will be applied.
if callCreds := callHdr.Creds; callCreds != nil {
if !t.isSecure && callCreds.RequireTransportSecurity() {
return nil, streamErrorf(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure conneciton")
}
data, err := callCreds.GetRequestMetadata(ctx, audience)
if err != nil {
return nil, streamErrorf(codes.Internal, "transport: %v", err)
}
for k, v := range data {
// Capital header names are illegal in HTTP/2
k = strings.ToLower(k)
callAuthData[k] = v
}
}
t.mu.Lock()
if t.activeStreams == nil {
t.mu.Unlock()
@ -388,7 +439,6 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
return nil, ErrConnClosing
}
s := t.newStream(ctx, callHdr)
s.clientStatsCtx = userCtx
t.activeStreams[s.id] = s
// If the number of active streams change from 0 to 1, then check if keepalive
// has gone dormant. If so, wake it up.
@ -424,33 +474,34 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
}
for k, v := range authData {
// Capital header names are illegal in HTTP/2.
k = strings.ToLower(k)
t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: v})
t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
}
for k, v := range callAuthData {
t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
}
var (
hasMD bool
endHeaders bool
)
if md, ok := metadata.FromContext(ctx); ok {
if md, ok := metadata.FromOutgoingContext(ctx); ok {
hasMD = true
for k, v := range md {
for k, vv := range md {
// HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set.
if isReservedHeader(k) {
continue
}
for _, entry := range v {
t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry})
for _, v := range vv {
t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
}
}
}
if md, ok := t.md.(*metadata.MD); ok {
for k, v := range *md {
for k, vv := range *md {
if isReservedHeader(k) {
continue
}
for _, entry := range v {
t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry})
for _, v := range vv {
t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
}
}
}
@ -490,6 +541,10 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
return nil, connectionErrorf(true, err, "transport: %v", err)
}
}
s.mu.Lock()
s.bytesSent = true
s.mu.Unlock()
if t.statsHandler != nil {
outHeader := &stats.OutHeader{
Client: true,
@ -499,7 +554,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
LocalAddr: t.localAddr,
Compression: callHdr.SendCompress,
}
t.statsHandler.HandleRPC(s.clientStatsCtx, outHeader)
t.statsHandler.HandleRPC(s.ctx, outHeader)
}
t.writableChan <- 0
return s, nil
@ -513,6 +568,10 @@ func (t *http2Client) CloseStream(s *Stream, err error) {
t.mu.Unlock()
return
}
if err != nil {
// notify in-flight streams, before the deletion
s.write(recvMsg{err: err})
}
delete(t.activeStreams, s.id)
if t.state == draining && len(t.activeStreams) == 0 {
// The transport is draining and s is the last live stream on t.
@ -542,11 +601,6 @@ func (t *http2Client) CloseStream(s *Stream, err error) {
s.mu.Lock()
rstStream = s.rstStream
rstError = s.rstError
if q := s.fc.resetPendingData(); q > 0 {
if n := t.fc.onRead(q); n > 0 {
t.controlBuf.put(&windowUpdate{0, n})
}
}
if s.state == streamDone {
s.mu.Unlock()
return
@ -755,6 +809,24 @@ func (t *http2Client) getStream(f http2.Frame) (*Stream, bool) {
return s, ok
}
// adjustWindow sends out extra window update over the initial window size
// of stream if the application is requesting data larger in size than
// the window.
func (t *http2Client) adjustWindow(s *Stream, n uint32) {
s.mu.Lock()
defer s.mu.Unlock()
if s.state == streamDone {
return
}
if w := s.fc.maybeAdjust(n); w > 0 {
// Piggyback conneciton's window update along.
if cw := t.fc.resetPendingUpdate(); cw > 0 {
t.controlBuf.put(&windowUpdate{0, cw, false})
}
t.controlBuf.put(&windowUpdate{s.id, w, true})
}
}
// updateWindow adjusts the inbound quota for the stream and the transport.
// Window updates will deliver to the controller for sending when
// the cumulative quota exceeds the corresponding threshold.
@ -764,11 +836,11 @@ func (t *http2Client) updateWindow(s *Stream, n uint32) {
if s.state == streamDone {
return
}
if w := t.fc.onRead(n); w > 0 {
t.controlBuf.put(&windowUpdate{0, w})
}
if w := s.fc.onRead(n); w > 0 {
t.controlBuf.put(&windowUpdate{s.id, w})
if cw := t.fc.resetPendingUpdate(); cw > 0 {
t.controlBuf.put(&windowUpdate{0, cw, false})
}
t.controlBuf.put(&windowUpdate{s.id, w, true})
}
}
@ -778,43 +850,39 @@ func (t *http2Client) handleData(f *http2.DataFrame) {
t.notifyError(connectionErrorf(true, err, "%v", err))
return
}
// Decouple connection's flow control from application's read.
// An update on connection's flow control should not depend on
// whether user application has read the data or not. Such a
// restriction is already imposed on the stream's flow control,
// and therefore the sender will be blocked anyways.
// Decoupling the connection flow control will prevent other
// active(fast) streams from starving in presence of slow or
// inactive streams.
if w := t.fc.onRead(uint32(size)); w > 0 {
t.controlBuf.put(&windowUpdate{0, w, true})
}
// Select the right stream to dispatch.
s, ok := t.getStream(f)
if !ok {
if w := t.fc.onRead(uint32(size)); w > 0 {
t.controlBuf.put(&windowUpdate{0, w})
}
return
}
if size > 0 {
if f.Header().Flags.Has(http2.FlagDataPadded) {
if w := t.fc.onRead(uint32(size) - uint32(len(f.Data()))); w > 0 {
t.controlBuf.put(&windowUpdate{0, w})
}
}
s.mu.Lock()
if s.state == streamDone {
s.mu.Unlock()
// The stream has been closed. Release the corresponding quota.
if w := t.fc.onRead(uint32(size)); w > 0 {
t.controlBuf.put(&windowUpdate{0, w})
}
return
}
if err := s.fc.onData(uint32(size)); err != nil {
s.state = streamDone
s.statusCode = codes.Internal
s.statusDesc = err.Error()
s.rstStream = true
s.rstError = http2.ErrCodeFlowControl
close(s.done)
s.finish(status.New(codes.Internal, err.Error()))
s.mu.Unlock()
s.write(recvMsg{err: io.EOF})
return
}
if f.Header().Flags.Has(http2.FlagDataPadded) {
if w := s.fc.onRead(uint32(size) - uint32(len(f.Data()))); w > 0 {
t.controlBuf.put(&windowUpdate{s.id, w})
t.controlBuf.put(&windowUpdate{s.id, w, true})
}
}
s.mu.Unlock()
@ -835,10 +903,7 @@ func (t *http2Client) handleData(f *http2.DataFrame) {
s.mu.Unlock()
return
}
s.state = streamDone
s.statusCode = codes.Internal
s.statusDesc = "server closed the stream without sending trailers"
close(s.done)
s.finish(status.New(codes.Internal, "server closed the stream without sending trailers"))
s.mu.Unlock()
s.write(recvMsg{err: io.EOF})
}
@ -854,18 +919,16 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) {
s.mu.Unlock()
return
}
s.state = streamDone
if !s.headerDone {
close(s.headerChan)
s.headerDone = true
}
s.statusCode, ok = http2ErrConvTab[http2.ErrCode(f.ErrCode)]
statusCode, ok := http2ErrConvTab[http2.ErrCode(f.ErrCode)]
if !ok {
grpclog.Println("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error ", f.ErrCode)
s.statusCode = codes.Unknown
statusCode = codes.Unknown
}
s.statusDesc = fmt.Sprintf("stream terminated by RST_STREAM with error code: %d", f.ErrCode)
close(s.done)
s.finish(status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %d", f.ErrCode))
s.mu.Unlock()
s.write(recvMsg{err: io.EOF})
}
@ -893,6 +956,9 @@ func (t *http2Client) handlePing(f *http2.PingFrame) {
}
func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
if f.ErrCode == http2.ErrCodeEnhanceYourCalm {
grpclog.Printf("Client received GoAway with http2.ErrCodeEnhanceYourCalm.")
}
t.mu.Lock()
if t.state == reachable || t.state == draining {
if f.LastStreamID > 0 && f.LastStreamID%2 != 1 {
@ -914,6 +980,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
t.mu.Unlock()
return
default:
t.setGoAwayReason(f)
}
t.goAwayID = f.LastStreamID
close(t.goAway)
@ -921,6 +988,26 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
t.mu.Unlock()
}
// setGoAwayReason sets the value of t.goAwayReason based
// on the GoAway frame received.
// It expects a lock on transport's mutext to be held by
// the caller.
func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) {
t.goAwayReason = NoReason
switch f.ErrCode {
case http2.ErrCodeEnhanceYourCalm:
if string(f.DebugData()) == "too_many_pings" {
t.goAwayReason = TooManyPings
}
}
}
func (t *http2Client) GetGoAwayReason() GoAwayReason {
t.mu.Lock()
defer t.mu.Unlock()
return t.goAwayReason
}
func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) {
id := f.Header().StreamID
incr := f.Increment
@ -939,18 +1026,18 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
if !ok {
return
}
s.mu.Lock()
s.bytesReceived = true
s.mu.Unlock()
var state decodeState
for _, hf := range frame.Fields {
state.processHeaderField(hf)
}
if state.err != nil {
if err := state.decodeResponseHeader(frame); err != nil {
s.mu.Lock()
if !s.headerDone {
close(s.headerChan)
s.headerDone = true
}
s.mu.Unlock()
s.write(recvMsg{err: state.err})
s.write(recvMsg{err: err})
// Something wrong. Stops reading even when there is remaining.
return
}
@ -964,13 +1051,13 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
Client: true,
WireLength: int(frame.Header().Length),
}
t.statsHandler.HandleRPC(s.clientStatsCtx, inHeader)
t.statsHandler.HandleRPC(s.ctx, inHeader)
} else {
inTrailer := &stats.InTrailer{
Client: true,
WireLength: int(frame.Header().Length),
}
t.statsHandler.HandleRPC(s.clientStatsCtx, inTrailer)
t.statsHandler.HandleRPC(s.ctx, inTrailer)
}
}
}()
@ -995,10 +1082,7 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
if len(state.mdata) > 0 {
s.trailer = state.mdata
}
s.statusCode = state.statusCode
s.statusDesc = state.statusDesc
close(s.done)
s.state = streamDone
s.finish(state.status())
s.mu.Unlock()
s.write(recvMsg{err: io.EOF})
}
@ -1097,7 +1181,7 @@ func (t *http2Client) applySettings(ss []http2.Setting) {
t.mu.Lock()
for _, stream := range t.activeStreams {
// Adjust the sending quota for each stream.
stream.sendQuotaPool.add(int(s.Val - t.streamSendQuota))
stream.sendQuotaPool.add(int(s.Val) - int(t.streamSendQuota))
}
t.streamSendQuota = s.Val
t.mu.Unlock()
@ -1116,7 +1200,7 @@ func (t *http2Client) controller() {
case <-t.writableChan:
switch i := i.(type) {
case *windowUpdate:
t.framer.writeWindowUpdate(true, i.streamID, i.increment)
t.framer.writeWindowUpdate(i.flush, i.streamID, i.increment)
case *settings:
if i.ack {
t.framer.writeSettingsAck(true)

View File

@ -38,19 +38,25 @@ import (
"errors"
"io"
"math"
"math/rand"
"net"
"strconv"
"sync"
"sync/atomic"
"time"
"github.com/golang/protobuf/proto"
"golang.org/x/net/context"
"golang.org/x/net/http2"
"golang.org/x/net/http2/hpack"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/stats"
"google.golang.org/grpc/status"
"google.golang.org/grpc/tap"
)
@ -90,11 +96,35 @@ type http2Server struct {
stats stats.Handler
// Flag to keep track of reading activity on transport.
// 1 is true and 0 is false.
activity uint32 // Accessed atomically.
// Keepalive and max-age parameters for the server.
kp keepalive.ServerParameters
// Keepalive enforcement policy.
kep keepalive.EnforcementPolicy
// The time instance last ping was received.
lastPingAt time.Time
// Number of times the client has violated keepalive ping policy so far.
pingStrikes uint8
// Flag to signify that number of ping strikes should be reset to 0.
// This is set whenever data or header frames are sent.
// 1 means yes.
resetPingStrikes uint32 // Accessed atomically.
initialWindowSize int32
mu sync.Mutex // guard the following
state transportState
activeStreams map[uint32]*Stream
// the per-stream outbound flow control window size set by the peer.
streamSendQuota uint32
// idle is the time instant when the connection went idle.
// This is either the begining of the connection or when the number of
// RPCs go down to 0.
// When the connection is busy, this value is set to 0.
idle time.Time
}
// newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is
@ -114,41 +144,75 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
Val: maxStreams,
})
}
if initialWindowSize != defaultWindowSize {
iwz := int32(initialWindowSize)
if config.InitialWindowSize >= defaultWindowSize {
iwz = config.InitialWindowSize
}
icwz := int32(initialConnWindowSize)
if config.InitialConnWindowSize >= defaultWindowSize {
icwz = config.InitialConnWindowSize
}
if iwz != defaultWindowSize {
settings = append(settings, http2.Setting{
ID: http2.SettingInitialWindowSize,
Val: uint32(initialWindowSize)})
Val: uint32(iwz)})
}
if err := framer.writeSettings(true, settings...); err != nil {
return nil, connectionErrorf(true, err, "transport: %v", err)
}
// Adjust the connection flow control window if needed.
if delta := uint32(initialConnWindowSize - defaultWindowSize); delta > 0 {
if delta := uint32(icwz - defaultWindowSize); delta > 0 {
if err := framer.writeWindowUpdate(true, 0, delta); err != nil {
return nil, connectionErrorf(true, err, "transport: %v", err)
}
}
kp := config.KeepaliveParams
if kp.MaxConnectionIdle == 0 {
kp.MaxConnectionIdle = defaultMaxConnectionIdle
}
if kp.MaxConnectionAge == 0 {
kp.MaxConnectionAge = defaultMaxConnectionAge
}
// Add a jitter to MaxConnectionAge.
kp.MaxConnectionAge += getJitter(kp.MaxConnectionAge)
if kp.MaxConnectionAgeGrace == 0 {
kp.MaxConnectionAgeGrace = defaultMaxConnectionAgeGrace
}
if kp.Time == 0 {
kp.Time = defaultServerKeepaliveTime
}
if kp.Timeout == 0 {
kp.Timeout = defaultServerKeepaliveTimeout
}
kep := config.KeepalivePolicy
if kep.MinTime == 0 {
kep.MinTime = defaultKeepalivePolicyMinTime
}
var buf bytes.Buffer
t := &http2Server{
ctx: context.Background(),
conn: conn,
remoteAddr: conn.RemoteAddr(),
localAddr: conn.LocalAddr(),
authInfo: config.AuthInfo,
framer: framer,
hBuf: &buf,
hEnc: hpack.NewEncoder(&buf),
maxStreams: maxStreams,
inTapHandle: config.InTapHandle,
controlBuf: newRecvBuffer(),
fc: &inFlow{limit: initialConnWindowSize},
sendQuotaPool: newQuotaPool(defaultWindowSize),
state: reachable,
writableChan: make(chan int, 1),
shutdownChan: make(chan struct{}),
activeStreams: make(map[uint32]*Stream),
streamSendQuota: defaultWindowSize,
stats: config.StatsHandler,
ctx: context.Background(),
conn: conn,
remoteAddr: conn.RemoteAddr(),
localAddr: conn.LocalAddr(),
authInfo: config.AuthInfo,
framer: framer,
hBuf: &buf,
hEnc: hpack.NewEncoder(&buf),
maxStreams: maxStreams,
inTapHandle: config.InTapHandle,
controlBuf: newRecvBuffer(),
fc: &inFlow{limit: uint32(icwz)},
sendQuotaPool: newQuotaPool(defaultWindowSize),
state: reachable,
writableChan: make(chan int, 1),
shutdownChan: make(chan struct{}),
activeStreams: make(map[uint32]*Stream),
streamSendQuota: defaultWindowSize,
stats: config.StatsHandler,
kp: kp,
idle: time.Now(),
kep: kep,
initialWindowSize: iwz,
}
if t.stats != nil {
t.ctx = t.stats.TagConn(t.ctx, &stats.ConnTagInfo{
@ -159,6 +223,7 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
t.stats.HandleConn(t.ctx, connBegin)
}
go t.controller()
go t.keepalive()
t.writableChan <- 0
return t, nil
}
@ -170,18 +235,17 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
id: frame.Header().StreamID,
st: t,
buf: buf,
fc: &inFlow{limit: initialWindowSize},
fc: &inFlow{limit: uint32(t.initialWindowSize)},
}
var state decodeState
for _, hf := range frame.Fields {
state.processHeaderField(hf)
}
if err := state.err; err != nil {
if se, ok := err.(StreamError); ok {
t.controlBuf.put(&resetStream{s.id, statusCodeConvTab[se.Code]})
if err := state.processHeaderField(hf); err != nil {
if se, ok := err.(StreamError); ok {
t.controlBuf.put(&resetStream{s.id, statusCodeConvTab[se.Code]})
}
return
}
return
}
if frame.StreamEnded() {
@ -208,12 +272,16 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
s.ctx = newContextWithStream(s.ctx, s)
// Attach the received metadata to the context.
if len(state.mdata) > 0 {
s.ctx = metadata.NewContext(s.ctx, state.mdata)
s.ctx = metadata.NewIncomingContext(s.ctx, state.mdata)
}
s.dec = &recvBufferReader{
ctx: s.ctx,
recv: s.buf,
s.trReader = &transportReader{
reader: &recvBufferReader{
ctx: s.ctx,
recv: s.buf,
},
windowHandler: func(n int) {
t.updateWindow(s, uint32(n))
},
}
s.recvCompress = state.encoding
s.method = state.method
@ -224,7 +292,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
}
s.ctx, err = t.inTapHandle(s.ctx, info)
if err != nil {
// TODO: Log the real error.
grpclog.Printf("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err)
t.controlBuf.put(&resetStream{s.id, http2.ErrCodeRefusedStream})
return
}
@ -248,9 +316,12 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
t.maxStreamID = s.id
s.sendQuotaPool = newQuotaPool(int(t.streamSendQuota))
t.activeStreams[s.id] = s
if len(t.activeStreams) == 1 {
t.idle = time.Time{}
}
t.mu.Unlock()
s.windowHandler = func(n int) {
t.updateWindow(s, uint32(n))
s.requestRead = func(n int) {
t.adjustWindow(s, uint32(n))
}
s.ctx = traceCtx(s.ctx, s.method)
if t.stats != nil {
@ -275,7 +346,10 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.
// Check the validity of client preface.
preface := make([]byte, len(clientPreface))
if _, err := io.ReadFull(t.conn, preface); err != nil {
grpclog.Printf("transport: http2Server.HandleStreams failed to receive the preface from client: %v", err)
// Only log if it isn't a simple tcp accept check (ie: tcp balancer doing open/close socket)
if err != io.EOF {
grpclog.Printf("transport: http2Server.HandleStreams failed to receive the preface from client: %v", err)
}
t.Close()
return
}
@ -291,10 +365,11 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.
return
}
if err != nil {
grpclog.Printf("transport: http2Server.HandleStreams failed to read frame: %v", err)
grpclog.Printf("transport: http2Server.HandleStreams failed to read initial settings frame: %v", err)
t.Close()
return
}
atomic.StoreUint32(&t.activity, 1)
sf, ok := frame.(*http2.SettingsFrame)
if !ok {
grpclog.Printf("transport: http2Server.HandleStreams saw invalid preface type %T from client", frame)
@ -305,6 +380,7 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.
for {
frame, err := t.framer.readFrame()
atomic.StoreUint32(&t.activity, 1)
if err != nil {
if se, ok := err.(http2.StreamError); ok {
t.mu.Lock()
@ -363,6 +439,23 @@ func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) {
return s, true
}
// adjustWindow sends out extra window update over the initial window size
// of stream if the application is requesting data larger in size than
// the window.
func (t *http2Server) adjustWindow(s *Stream, n uint32) {
s.mu.Lock()
defer s.mu.Unlock()
if s.state == streamDone {
return
}
if w := s.fc.maybeAdjust(n); w > 0 {
if cw := t.fc.resetPendingUpdate(); cw > 0 {
t.controlBuf.put(&windowUpdate{0, cw, false})
}
t.controlBuf.put(&windowUpdate{s.id, w, true})
}
}
// updateWindow adjusts the inbound quota for the stream and the transport.
// Window updates will deliver to the controller for sending when
// the cumulative quota exceeds the corresponding threshold.
@ -372,11 +465,11 @@ func (t *http2Server) updateWindow(s *Stream, n uint32) {
if s.state == streamDone {
return
}
if w := t.fc.onRead(n); w > 0 {
t.controlBuf.put(&windowUpdate{0, w})
}
if w := s.fc.onRead(n); w > 0 {
t.controlBuf.put(&windowUpdate{s.id, w})
if cw := t.fc.resetPendingUpdate(); cw > 0 {
t.controlBuf.put(&windowUpdate{0, cw, false})
}
t.controlBuf.put(&windowUpdate{s.id, w, true})
}
}
@ -387,27 +480,26 @@ func (t *http2Server) handleData(f *http2.DataFrame) {
t.Close()
return
}
// Decouple connection's flow control from application's read.
// An update on connection's flow control should not depend on
// whether user application has read the data or not. Such a
// restriction is already imposed on the stream's flow control,
// and therefore the sender will be blocked anyways.
// Decoupling the connection flow control will prevent other
// active(fast) streams from starving in presence of slow or
// inactive streams.
if w := t.fc.onRead(uint32(size)); w > 0 {
t.controlBuf.put(&windowUpdate{0, w, true})
}
// Select the right stream to dispatch.
s, ok := t.getStream(f)
if !ok {
if w := t.fc.onRead(uint32(size)); w > 0 {
t.controlBuf.put(&windowUpdate{0, w})
}
return
}
if size > 0 {
if f.Header().Flags.Has(http2.FlagDataPadded) {
if w := t.fc.onRead(uint32(size) - uint32(len(f.Data()))); w > 0 {
t.controlBuf.put(&windowUpdate{0, w})
}
}
s.mu.Lock()
if s.state == streamDone {
s.mu.Unlock()
// The stream has been closed. Release the corresponding quota.
if w := t.fc.onRead(uint32(size)); w > 0 {
t.controlBuf.put(&windowUpdate{0, w})
}
return
}
if err := s.fc.onData(uint32(size)); err != nil {
@ -418,7 +510,7 @@ func (t *http2Server) handleData(f *http2.DataFrame) {
}
if f.Header().Flags.Has(http2.FlagDataPadded) {
if w := s.fc.onRead(uint32(size) - uint32(len(f.Data()))); w > 0 {
t.controlBuf.put(&windowUpdate{s.id, w})
t.controlBuf.put(&windowUpdate{s.id, w, true})
}
}
s.mu.Unlock()
@ -463,6 +555,11 @@ func (t *http2Server) handleSettings(f *http2.SettingsFrame) {
t.controlBuf.put(&settings{ack: true, ss: ss})
}
const (
maxPingStrikes = 2
defaultPingTimeout = 2 * time.Hour
)
func (t *http2Server) handlePing(f *http2.PingFrame) {
if f.IsAck() { // Do nothing.
return
@ -470,6 +567,38 @@ func (t *http2Server) handlePing(f *http2.PingFrame) {
pingAck := &ping{ack: true}
copy(pingAck.data[:], f.Data[:])
t.controlBuf.put(pingAck)
now := time.Now()
defer func() {
t.lastPingAt = now
}()
// A reset ping strikes means that we don't need to check for policy
// violation for this ping and the pingStrikes counter should be set
// to 0.
if atomic.CompareAndSwapUint32(&t.resetPingStrikes, 1, 0) {
t.pingStrikes = 0
return
}
t.mu.Lock()
ns := len(t.activeStreams)
t.mu.Unlock()
if ns < 1 && !t.kep.PermitWithoutStream {
// Keepalive shouldn't be active thus, this new ping should
// have come after atleast defaultPingTimeout.
if t.lastPingAt.Add(defaultPingTimeout).After(now) {
t.pingStrikes++
}
} else {
// Check if keepalive policy is respected.
if t.lastPingAt.Add(t.kep.MinTime).After(now) {
t.pingStrikes++
}
}
if t.pingStrikes > maxPingStrikes {
// Send goaway and close the connection.
t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings")})
}
}
func (t *http2Server) handleWindowUpdate(f *http2.WindowUpdateFrame) {
@ -488,6 +617,13 @@ func (t *http2Server) writeHeaders(s *Stream, b *bytes.Buffer, endStream bool) e
first := true
endHeaders := false
var err error
defer func() {
if err == nil {
// Reset ping strikes when seding headers since that might cause the
// peer to send ping.
atomic.StoreUint32(&t.resetPingStrikes, 1)
}
}()
// Sends the headers in a single batch.
for !endHeaders {
size := t.hBuf.Len()
@ -542,13 +678,13 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
if s.sendCompress != "" {
t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress})
}
for k, v := range md {
for k, vv := range md {
if isReservedHeader(k) {
// Clients don't tolerate reading restricted headers after some non restricted ones were sent.
continue
}
for _, entry := range v {
t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry})
for _, v := range vv {
t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
}
}
bufLen := t.hBuf.Len()
@ -569,7 +705,7 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
// There is no further I/O operations being able to perform on this stream.
// TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early
// OK is adopted.
func (t *http2Server) WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error {
func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
var headersSent, hasHeader bool
s.mu.Lock()
if s.state == streamDone {
@ -600,17 +736,28 @@ func (t *http2Server) WriteStatus(s *Stream, statusCode codes.Code, statusDesc s
t.hEnc.WriteField(
hpack.HeaderField{
Name: "grpc-status",
Value: strconv.Itoa(int(statusCode)),
Value: strconv.Itoa(int(st.Code())),
})
t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(statusDesc)})
t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())})
if p := st.Proto(); p != nil && len(p.Details) > 0 {
stBytes, err := proto.Marshal(p)
if err != nil {
// TODO: return error instead, when callers are able to handle it.
panic(err)
}
t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)})
}
// Attach the trailer metadata.
for k, v := range s.trailer {
for k, vv := range s.trailer {
// Clients don't tolerate reading restricted headers after some non restricted ones were sent.
if isReservedHeader(k) {
continue
}
for _, entry := range v {
t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry})
for _, v := range vv {
t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
}
}
bufLen := t.hBuf.Len()
@ -631,7 +778,7 @@ func (t *http2Server) WriteStatus(s *Stream, statusCode codes.Code, statusDesc s
// Write converts the data into HTTP2 data frame and sends it out. Non-nil error
// is returns if it fails (e.g., framing error, transport error).
func (t *http2Server) Write(s *Stream, data []byte, opts *Options) error {
func (t *http2Server) Write(s *Stream, data []byte, opts *Options) (err error) {
// TODO(zhaoq): Support multi-writers for a single stream.
var writeHeaderFrame bool
s.mu.Lock()
@ -646,6 +793,13 @@ func (t *http2Server) Write(s *Stream, data []byte, opts *Options) error {
if writeHeaderFrame {
t.WriteHeader(s, nil)
}
defer func() {
if err == nil {
// Reset ping strikes when sending data since this might cause
// the peer to send ping.
atomic.StoreUint32(&t.resetPingStrikes, 1)
}
}()
r := bytes.NewBuffer(data)
for {
if r.Len() == 0 {
@ -727,7 +881,7 @@ func (t *http2Server) applySettings(ss []http2.Setting) {
t.mu.Lock()
defer t.mu.Unlock()
for _, stream := range t.activeStreams {
stream.sendQuotaPool.add(int(s.Val - t.streamSendQuota))
stream.sendQuotaPool.add(int(s.Val) - int(t.streamSendQuota))
}
t.streamSendQuota = s.Val
}
@ -735,6 +889,91 @@ func (t *http2Server) applySettings(ss []http2.Setting) {
}
}
// keepalive running in a separate goroutine does the following:
// 1. Gracefully closes an idle connection after a duration of keepalive.MaxConnectionIdle.
// 2. Gracefully closes any connection after a duration of keepalive.MaxConnectionAge.
// 3. Forcibly closes a connection after an additive period of keepalive.MaxConnectionAgeGrace over keepalive.MaxConnectionAge.
// 4. Makes sure a connection is alive by sending pings with a frequency of keepalive.Time and closes a non-resposive connection
// after an additional duration of keepalive.Timeout.
func (t *http2Server) keepalive() {
p := &ping{}
var pingSent bool
maxIdle := time.NewTimer(t.kp.MaxConnectionIdle)
maxAge := time.NewTimer(t.kp.MaxConnectionAge)
keepalive := time.NewTimer(t.kp.Time)
// NOTE: All exit paths of this function should reset their
// respecitve timers. A failure to do so will cause the
// following clean-up to deadlock and eventually leak.
defer func() {
if !maxIdle.Stop() {
<-maxIdle.C
}
if !maxAge.Stop() {
<-maxAge.C
}
if !keepalive.Stop() {
<-keepalive.C
}
}()
for {
select {
case <-maxIdle.C:
t.mu.Lock()
idle := t.idle
if idle.IsZero() { // The connection is non-idle.
t.mu.Unlock()
maxIdle.Reset(t.kp.MaxConnectionIdle)
continue
}
val := t.kp.MaxConnectionIdle - time.Since(idle)
if val <= 0 {
// The connection has been idle for a duration of keepalive.MaxConnectionIdle or more.
// Gracefully close the connection.
t.state = draining
t.mu.Unlock()
t.Drain()
// Reseting the timer so that the clean-up doesn't deadlock.
maxIdle.Reset(infinity)
return
}
t.mu.Unlock()
maxIdle.Reset(val)
case <-maxAge.C:
t.mu.Lock()
t.state = draining
t.mu.Unlock()
t.Drain()
maxAge.Reset(t.kp.MaxConnectionAgeGrace)
select {
case <-maxAge.C:
// Close the connection after grace period.
t.Close()
// Reseting the timer so that the clean-up doesn't deadlock.
maxAge.Reset(infinity)
case <-t.shutdownChan:
}
return
case <-keepalive.C:
if atomic.CompareAndSwapUint32(&t.activity, 1, 0) {
pingSent = false
keepalive.Reset(t.kp.Time)
continue
}
if pingSent {
t.Close()
// Reseting the timer so that the clean-up doesn't deadlock.
keepalive.Reset(infinity)
return
}
pingSent = true
t.controlBuf.put(p)
keepalive.Reset(t.kp.Timeout)
case <-t.shutdownChan:
return
}
}
}
// controller running in a separate goroutine takes charge of sending control
// frames (e.g., window update, reset stream, setting, etc.) to the server.
func (t *http2Server) controller() {
@ -746,7 +985,7 @@ func (t *http2Server) controller() {
case <-t.writableChan:
switch i := i.(type) {
case *windowUpdate:
t.framer.writeWindowUpdate(true, i.streamID, i.increment)
t.framer.writeWindowUpdate(i.flush, i.streamID, i.increment)
case *settings:
if i.ack {
t.framer.writeSettingsAck(true)
@ -766,7 +1005,10 @@ func (t *http2Server) controller() {
sid := t.maxStreamID
t.state = draining
t.mu.Unlock()
t.framer.writeGoAway(true, sid, http2.ErrCodeNo, nil)
t.framer.writeGoAway(true, sid, i.code, i.debugData)
if i.code == http2.ErrCodeEnhanceYourCalm {
t.Close()
}
case *flushIO:
t.framer.flushWrite()
case *ping:
@ -816,6 +1058,9 @@ func (t *http2Server) Close() (err error) {
func (t *http2Server) closeStream(s *Stream) {
t.mu.Lock()
delete(t.activeStreams, s.id)
if len(t.activeStreams) == 0 {
t.idle = time.Now()
}
if t.state == draining && len(t.activeStreams) == 0 {
defer t.Close()
}
@ -825,11 +1070,6 @@ func (t *http2Server) closeStream(s *Stream) {
// called to interrupt the potential blocking on other goroutines.
s.cancel()
s.mu.Lock()
if q := s.fc.resetPendingData(); q > 0 {
if w := t.fc.onRead(q); w > 0 {
t.controlBuf.put(&windowUpdate{0, w})
}
}
if s.state == streamDone {
s.mu.Unlock()
return
@ -843,5 +1083,17 @@ func (t *http2Server) RemoteAddr() net.Addr {
}
func (t *http2Server) Drain() {
t.controlBuf.put(&goAway{})
t.controlBuf.put(&goAway{code: http2.ErrCodeNo})
}
var rgen = rand.New(rand.NewSource(time.Now().UnixNano()))
func getJitter(v time.Duration) time.Duration {
if v == infinity {
return 0
}
// Generate a jitter between +/- 10% of the value.
r := int64(v / 10)
j := rgen.Int63n(2*r) - r
return time.Duration(j)
}

View File

@ -36,19 +36,23 @@ package transport
import (
"bufio"
"bytes"
"encoding/base64"
"fmt"
"io"
"net"
"net/http"
"strconv"
"strings"
"sync/atomic"
"time"
"github.com/golang/protobuf/proto"
"golang.org/x/net/http2"
"golang.org/x/net/http2/hpack"
spb "google.golang.org/genproto/googleapis/rpc/status"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
)
const (
@ -85,18 +89,39 @@ var (
codes.ResourceExhausted: http2.ErrCodeEnhanceYourCalm,
codes.PermissionDenied: http2.ErrCodeInadequateSecurity,
}
httpStatusConvTab = map[int]codes.Code{
// 400 Bad Request - INTERNAL.
http.StatusBadRequest: codes.Internal,
// 401 Unauthorized - UNAUTHENTICATED.
http.StatusUnauthorized: codes.Unauthenticated,
// 403 Forbidden - PERMISSION_DENIED.
http.StatusForbidden: codes.PermissionDenied,
// 404 Not Found - UNIMPLEMENTED.
http.StatusNotFound: codes.Unimplemented,
// 429 Too Many Requests - UNAVAILABLE.
http.StatusTooManyRequests: codes.Unavailable,
// 502 Bad Gateway - UNAVAILABLE.
http.StatusBadGateway: codes.Unavailable,
// 503 Service Unavailable - UNAVAILABLE.
http.StatusServiceUnavailable: codes.Unavailable,
// 504 Gateway timeout - UNAVAILABLE.
http.StatusGatewayTimeout: codes.Unavailable,
}
)
// Records the states during HPACK decoding. Must be reset once the
// decoding of the entire headers are finished.
type decodeState struct {
err error // first error encountered decoding
encoding string
// statusCode caches the stream status received from the trailer
// the server sent. Client side only.
statusCode codes.Code
statusDesc string
// statusGen caches the stream status received from the trailer the server
// sent. Client side only. Do not access directly. After all trailers are
// parsed, use the status method to retrieve the status.
statusGen *status.Status
// rawStatusCode and rawStatusMsg are set from the raw trailer fields and are not
// intended for direct access outside of parsing.
rawStatusCode *int
rawStatusMsg string
httpStatus *int
// Server side only fields.
timeoutSet bool
timeout time.Duration
@ -119,6 +144,7 @@ func isReservedHeader(hdr string) bool {
"grpc-message",
"grpc-status",
"grpc-timeout",
"grpc-status-details-bin",
"te":
return true
default:
@ -137,12 +163,6 @@ func isWhitelistedPseudoHeader(hdr string) bool {
}
}
func (d *decodeState) setErr(err error) {
if d.err == nil {
d.err = err
}
}
func validContentType(t string) bool {
e := "application/grpc"
if !strings.HasPrefix(t, e) {
@ -156,56 +176,135 @@ func validContentType(t string) bool {
return true
}
func (d *decodeState) processHeaderField(f hpack.HeaderField) {
func (d *decodeState) status() *status.Status {
if d.statusGen == nil {
// No status-details were provided; generate status using code/msg.
d.statusGen = status.New(codes.Code(int32(*(d.rawStatusCode))), d.rawStatusMsg)
}
return d.statusGen
}
const binHdrSuffix = "-bin"
func encodeBinHeader(v []byte) string {
return base64.RawStdEncoding.EncodeToString(v)
}
func decodeBinHeader(v string) ([]byte, error) {
if len(v)%4 == 0 {
// Input was padded, or padding was not necessary.
return base64.StdEncoding.DecodeString(v)
}
return base64.RawStdEncoding.DecodeString(v)
}
func encodeMetadataHeader(k, v string) string {
if strings.HasSuffix(k, binHdrSuffix) {
return encodeBinHeader(([]byte)(v))
}
return v
}
func decodeMetadataHeader(k, v string) (string, error) {
if strings.HasSuffix(k, binHdrSuffix) {
b, err := decodeBinHeader(v)
return string(b), err
}
return v, nil
}
func (d *decodeState) decodeResponseHeader(frame *http2.MetaHeadersFrame) error {
for _, hf := range frame.Fields {
if err := d.processHeaderField(hf); err != nil {
return err
}
}
// If grpc status exists, no need to check further.
if d.rawStatusCode != nil || d.statusGen != nil {
return nil
}
// If grpc status doesn't exist and http status doesn't exist,
// then it's a malformed header.
if d.httpStatus == nil {
return streamErrorf(codes.Internal, "malformed header: doesn't contain status(gRPC or HTTP)")
}
if *(d.httpStatus) != http.StatusOK {
code, ok := httpStatusConvTab[*(d.httpStatus)]
if !ok {
code = codes.Unknown
}
return streamErrorf(code, http.StatusText(*(d.httpStatus)))
}
// gRPC status doesn't exist and http status is OK.
// Set rawStatusCode to be unknown and return nil error.
// So that, if the stream has ended this Unknown status
// will be propogated to the user.
// Otherwise, it will be ignored. In which case, status from
// a later trailer, that has StreamEnded flag set, is propogated.
code := int(codes.Unknown)
d.rawStatusCode = &code
return nil
}
func (d *decodeState) processHeaderField(f hpack.HeaderField) error {
switch f.Name {
case "content-type":
if !validContentType(f.Value) {
d.setErr(streamErrorf(codes.FailedPrecondition, "transport: received the unexpected content-type %q", f.Value))
return
return streamErrorf(codes.FailedPrecondition, "transport: received the unexpected content-type %q", f.Value)
}
case "grpc-encoding":
d.encoding = f.Value
case "grpc-status":
code, err := strconv.Atoi(f.Value)
if err != nil {
d.setErr(streamErrorf(codes.Internal, "transport: malformed grpc-status: %v", err))
return
return streamErrorf(codes.Internal, "transport: malformed grpc-status: %v", err)
}
d.statusCode = codes.Code(code)
d.rawStatusCode = &code
case "grpc-message":
d.statusDesc = decodeGrpcMessage(f.Value)
d.rawStatusMsg = decodeGrpcMessage(f.Value)
case "grpc-status-details-bin":
v, err := decodeBinHeader(f.Value)
if err != nil {
return streamErrorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err)
}
s := &spb.Status{}
if err := proto.Unmarshal(v, s); err != nil {
return streamErrorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err)
}
d.statusGen = status.FromProto(s)
case "grpc-timeout":
d.timeoutSet = true
var err error
d.timeout, err = decodeTimeout(f.Value)
if err != nil {
d.setErr(streamErrorf(codes.Internal, "transport: malformed time-out: %v", err))
return
if d.timeout, err = decodeTimeout(f.Value); err != nil {
return streamErrorf(codes.Internal, "transport: malformed time-out: %v", err)
}
case ":path":
d.method = f.Value
case ":status":
code, err := strconv.Atoi(f.Value)
if err != nil {
return streamErrorf(codes.Internal, "transport: malformed http-status: %v", err)
}
d.httpStatus = &code
default:
if !isReservedHeader(f.Name) || isWhitelistedPseudoHeader(f.Name) {
if f.Name == "user-agent" {
i := strings.LastIndex(f.Value, " ")
if i == -1 {
// There is no application user agent string being set.
return
}
// Extract the application user agent string.
f.Value = f.Value[:i]
}
if d.mdata == nil {
d.mdata = make(map[string][]string)
}
k, v, err := metadata.DecodeKeyValue(f.Name, f.Value)
v, err := decodeMetadataHeader(f.Name, f.Value)
if err != nil {
grpclog.Printf("Failed to decode (%q, %q): %v", f.Name, f.Value, err)
return
return nil
}
d.mdata[k] = append(d.mdata[k], v)
d.mdata[f.Name] = append(d.mdata[f.Name], v)
}
}
return nil
}
type timeoutUnit uint8
@ -377,6 +476,9 @@ func newFramer(conn net.Conn) *framer {
writer: bufio.NewWriterSize(conn, http2IOBufSize),
}
f.fr = http2.NewFramer(f.writer, f.reader)
// Opt-in to Frame reuse API on framer to reduce garbage.
// Frames aren't safe to read from after a subsequent call to ReadFrame.
f.fr.SetReuseFrames()
f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil)
return f
}

Some files were not shown because too many files have changed in this diff Show More