diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 2cbc07222c..d59d55ef7f 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -1,6 +1,6 @@ { "ImportPath": "github.com/docker/notary", - "GoVersion": "go1.4.2", + "GoVersion": "go1.5.1", "Packages": [ "./..." ], @@ -28,10 +28,6 @@ "ImportPath": "github.com/agl/ed25519", "Rev": "d2b94fd789ea21d12fac1a4443dd3a3f79cda72c" }, - { - "ImportPath": "github.com/bradfitz/http2", - "Rev": "97124afb234048ae0c91b8883c59fcd890bf8145" - }, { "ImportPath": "github.com/bugsnag/bugsnag-go", "Comment": "v1.0.4-2-g13fd6b8", @@ -119,7 +115,7 @@ }, { "ImportPath": "github.com/golang/protobuf/proto", - "Rev": "655cdfa588ea190e901bc5590e65d5621688847c" + "Rev": "3d2510a4dd961caffa2ae781669c628d82db700a" }, { "ImportPath": "github.com/google/gofuzz", @@ -216,23 +212,23 @@ }, { "ImportPath": "golang.org/x/net/context", - "Rev": "1dfe7915deaf3f80b962c163b918868d8a6d8974" + "Rev": "47990a1ba55743e6ef1affd3a14e5bac8553615d" }, { - "ImportPath": "golang.org/x/oauth2", - "Rev": "ce5ea7da934b76b1066c527632359e2b8f65db97" + "ImportPath": "golang.org/x/net/http2", + "Rev": "47990a1ba55743e6ef1affd3a14e5bac8553615d" }, { - "ImportPath": "google.golang.org/cloud/compute/metadata", - "Rev": "f20d6dcccb44ed49de45ae3703312cb46e627db1" + "ImportPath": "golang.org/x/net/internal/timeseries", + "Rev": "47990a1ba55743e6ef1affd3a14e5bac8553615d" }, { - "ImportPath": "google.golang.org/cloud/internal", - "Rev": "f20d6dcccb44ed49de45ae3703312cb46e627db1" + "ImportPath": "golang.org/x/net/trace", + "Rev": "47990a1ba55743e6ef1affd3a14e5bac8553615d" }, { "ImportPath": "google.golang.org/grpc", - "Rev": "97f42dd262e97f4632986eddbc74c19fa022ea08" + "Rev": "3e7b7e58f491074e9577050058fb95d2351a60b0" }, { "ImportPath": "gopkg.in/yaml.v2", diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/.gitignore b/Godeps/_workspace/src/github.com/bradfitz/http2/.gitignore deleted file mode 100644 index b25c15b81f..0000000000 --- a/Godeps/_workspace/src/github.com/bradfitz/http2/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*~ diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/AUTHORS b/Godeps/_workspace/src/github.com/bradfitz/http2/AUTHORS deleted file mode 100644 index dab9dbe751..0000000000 --- a/Godeps/_workspace/src/github.com/bradfitz/http2/AUTHORS +++ /dev/null @@ -1,19 +0,0 @@ -# This file is like Go's AUTHORS file: it lists Copyright holders. -# The list of humans who have contributd is in the CONTRIBUTORS file. -# -# To contribute to this project, because it will eventually be folded -# back in to Go itself, you need to submit a CLA: -# -# http://golang.org/doc/contribute.html#copyright -# -# Then you get added to CONTRIBUTORS and you or your company get added -# to the AUTHORS file. - -Blake Mizerany github=bmizerany -Daniel Morsing github=DanielMorsing -Gabriel Aszalos github=gbbr -Google, Inc. -Keith Rarick github=kr -Matthew Keenan github=mattkeenan -Matt Layher github=mdlayher -Tatsuhiro Tsujikawa github=tatsuhiro-t diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/CONTRIBUTORS b/Godeps/_workspace/src/github.com/bradfitz/http2/CONTRIBUTORS deleted file mode 100644 index 22e8c8c544..0000000000 --- a/Godeps/_workspace/src/github.com/bradfitz/http2/CONTRIBUTORS +++ /dev/null @@ -1,19 +0,0 @@ -# This file is like Go's CONTRIBUTORS file: it lists humans. -# The list of copyright holders (which may be companies) are in the AUTHORS file. -# -# To contribute to this project, because it will eventually be folded -# back in to Go itself, you need to submit a CLA: -# -# http://golang.org/doc/contribute.html#copyright -# -# Then you get added to CONTRIBUTORS and you or your company get added -# to the AUTHORS file. - -Blake Mizerany github=bmizerany -Brad Fitzpatrick github=bradfitz -Daniel Morsing github=DanielMorsing -Gabriel Aszalos github=gbbr -Keith Rarick github=kr -Matthew Keenan github=mattkeenan -Matt Layher github=mdlayher -Tatsuhiro Tsujikawa github=tatsuhiro-t diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/HACKING b/Godeps/_workspace/src/github.com/bradfitz/http2/HACKING deleted file mode 100644 index 69aafe4d17..0000000000 --- a/Godeps/_workspace/src/github.com/bradfitz/http2/HACKING +++ /dev/null @@ -1,5 +0,0 @@ -We only accept contributions from users who have gone through Go's -contribution process (signed a CLA). - -Please acknowledge whether you have (and use the same email) if -sending a pull request. diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/LICENSE b/Godeps/_workspace/src/github.com/bradfitz/http2/LICENSE deleted file mode 100644 index 2dc6853cae..0000000000 --- a/Godeps/_workspace/src/github.com/bradfitz/http2/LICENSE +++ /dev/null @@ -1,7 +0,0 @@ -Copyright 2014 Google & the Go AUTHORS - -Go AUTHORS are: -See https://code.google.com/p/go/source/browse/AUTHORS - -Licensed under the terms of Go itself: -https://code.google.com/p/go/source/browse/LICENSE diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/buffer_test.go b/Godeps/_workspace/src/github.com/bradfitz/http2/buffer_test.go deleted file mode 100644 index 9eb13ff49c..0000000000 --- a/Godeps/_workspace/src/github.com/bradfitz/http2/buffer_test.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2014 The Go Authors. -// See https://code.google.com/p/go/source/browse/CONTRIBUTORS -// Licensed under the same terms as Go itself: -// https://code.google.com/p/go/source/browse/LICENSE - -package http2 - -import ( - "io" - "reflect" - "testing" -) - -var bufferReadTests = []struct { - buf buffer - read, wn int - werr error - wp []byte - wbuf buffer -}{ - { - buffer{[]byte{'a', 0}, 0, 1, false, nil}, - 5, 1, nil, []byte{'a'}, - buffer{[]byte{'a', 0}, 1, 1, false, nil}, - }, - { - buffer{[]byte{'a', 0}, 0, 1, true, io.EOF}, - 5, 1, io.EOF, []byte{'a'}, - buffer{[]byte{'a', 0}, 1, 1, true, io.EOF}, - }, - { - buffer{[]byte{0, 'a'}, 1, 2, false, nil}, - 5, 1, nil, []byte{'a'}, - buffer{[]byte{0, 'a'}, 2, 2, false, nil}, - }, - { - buffer{[]byte{0, 'a'}, 1, 2, true, io.EOF}, - 5, 1, io.EOF, []byte{'a'}, - buffer{[]byte{0, 'a'}, 2, 2, true, io.EOF}, - }, - { - buffer{[]byte{}, 0, 0, false, nil}, - 5, 0, errReadEmpty, []byte{}, - buffer{[]byte{}, 0, 0, false, nil}, - }, - { - buffer{[]byte{}, 0, 0, true, io.EOF}, - 5, 0, io.EOF, []byte{}, - buffer{[]byte{}, 0, 0, true, io.EOF}, - }, -} - -func TestBufferRead(t *testing.T) { - for i, tt := range bufferReadTests { - read := make([]byte, tt.read) - n, err := tt.buf.Read(read) - if n != tt.wn { - t.Errorf("#%d: wn = %d want %d", i, n, tt.wn) - continue - } - if err != tt.werr { - t.Errorf("#%d: werr = %v want %v", i, err, tt.werr) - continue - } - read = read[:n] - if !reflect.DeepEqual(read, tt.wp) { - t.Errorf("#%d: read = %+v want %+v", i, read, tt.wp) - } - if !reflect.DeepEqual(tt.buf, tt.wbuf) { - t.Errorf("#%d: buf = %+v want %+v", i, tt.buf, tt.wbuf) - } - } -} diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/errors_test.go b/Godeps/_workspace/src/github.com/bradfitz/http2/errors_test.go deleted file mode 100644 index 86d52ee658..0000000000 --- a/Godeps/_workspace/src/github.com/bradfitz/http2/errors_test.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// See https://code.google.com/p/go/source/browse/CONTRIBUTORS -// Licensed under the same terms as Go itself: -// https://code.google.com/p/go/source/browse/LICENSE - -package http2 - -import "testing" - -func TestErrCodeString(t *testing.T) { - tests := []struct { - err ErrCode - want string - }{ - {ErrCodeProtocol, "PROTOCOL_ERROR"}, - {0xd, "HTTP_1_1_REQUIRED"}, - {0xf, "unknown error code 0xf"}, - } - for i, tt := range tests { - got := tt.err.String() - if got != tt.want { - t.Errorf("%d. Error = %q; want %q", i, got, tt.want) - } - } -} diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/flow_test.go b/Godeps/_workspace/src/github.com/bradfitz/http2/flow_test.go deleted file mode 100644 index dbb656f8e5..0000000000 --- a/Godeps/_workspace/src/github.com/bradfitz/http2/flow_test.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2014 The Go Authors. -// See https://code.google.com/p/go/source/browse/CONTRIBUTORS -// Licensed under the same terms as Go itself: -// https://code.google.com/p/go/source/browse/LICENSE - -package http2 - -import "testing" - -func TestFlow(t *testing.T) { - var st flow - var conn flow - st.add(3) - conn.add(2) - - if got, want := st.available(), int32(3); got != want { - t.Errorf("available = %d; want %d", got, want) - } - st.setConnFlow(&conn) - if got, want := st.available(), int32(2); got != want { - t.Errorf("after parent setup, available = %d; want %d", got, want) - } - - st.take(2) - if got, want := conn.available(), int32(0); got != want { - t.Errorf("after taking 2, conn = %d; want %d", got, want) - } - if got, want := st.available(), int32(0); got != want { - t.Errorf("after taking 2, stream = %d; want %d", got, want) - } -} - -func TestFlowAdd(t *testing.T) { - var f flow - if !f.add(1) { - t.Fatal("failed to add 1") - } - if !f.add(-1) { - t.Fatal("failed to add -1") - } - if got, want := f.available(), int32(0); got != want { - t.Fatalf("size = %d; want %d", got, want) - } - if !f.add(1<<31 - 1) { - t.Fatal("failed to add 2^31-1") - } - if got, want := f.available(), int32(1<<31-1); got != want { - t.Fatalf("size = %d; want %d", got, want) - } - if f.add(1) { - t.Fatal("adding 1 to max shouldn't be allowed") - } - -} diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/frame_test.go b/Godeps/_workspace/src/github.com/bradfitz/http2/frame_test.go deleted file mode 100644 index 37f1735a5c..0000000000 --- a/Godeps/_workspace/src/github.com/bradfitz/http2/frame_test.go +++ /dev/null @@ -1,578 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "bytes" - "reflect" - "strings" - "testing" - "unsafe" -) - -func testFramer() (*Framer, *bytes.Buffer) { - buf := new(bytes.Buffer) - return NewFramer(buf, buf), buf -} - -func TestFrameSizes(t *testing.T) { - // Catch people rearranging the FrameHeader fields. - if got, want := int(unsafe.Sizeof(FrameHeader{})), 12; got != want { - t.Errorf("FrameHeader size = %d; want %d", got, want) - } -} - -func TestWriteRST(t *testing.T) { - fr, buf := testFramer() - var streamID uint32 = 1<<24 + 2<<16 + 3<<8 + 4 - var errCode uint32 = 7<<24 + 6<<16 + 5<<8 + 4 - fr.WriteRSTStream(streamID, ErrCode(errCode)) - const wantEnc = "\x00\x00\x04\x03\x00\x01\x02\x03\x04\x07\x06\x05\x04" - if buf.String() != wantEnc { - t.Errorf("encoded as %q; want %q", buf.Bytes(), wantEnc) - } - f, err := fr.ReadFrame() - if err != nil { - t.Fatal(err) - } - want := &RSTStreamFrame{ - FrameHeader: FrameHeader{ - valid: true, - Type: 0x3, - Flags: 0x0, - Length: 0x4, - StreamID: 0x1020304, - }, - ErrCode: 0x7060504, - } - if !reflect.DeepEqual(f, want) { - t.Errorf("parsed back %#v; want %#v", f, want) - } -} - -func TestWriteData(t *testing.T) { - fr, buf := testFramer() - var streamID uint32 = 1<<24 + 2<<16 + 3<<8 + 4 - data := []byte("ABC") - fr.WriteData(streamID, true, data) - const wantEnc = "\x00\x00\x03\x00\x01\x01\x02\x03\x04ABC" - if buf.String() != wantEnc { - t.Errorf("encoded as %q; want %q", buf.Bytes(), wantEnc) - } - f, err := fr.ReadFrame() - if err != nil { - t.Fatal(err) - } - df, ok := f.(*DataFrame) - if !ok { - t.Fatalf("got %T; want *DataFrame", f) - } - if !bytes.Equal(df.Data(), data) { - t.Errorf("got %q; want %q", df.Data(), data) - } - if f.Header().Flags&1 == 0 { - t.Errorf("didn't see END_STREAM flag") - } -} - -func TestWriteHeaders(t *testing.T) { - tests := []struct { - name string - p HeadersFrameParam - wantEnc string - wantFrame *HeadersFrame - }{ - { - "basic", - HeadersFrameParam{ - StreamID: 42, - BlockFragment: []byte("abc"), - Priority: PriorityParam{}, - }, - "\x00\x00\x03\x01\x00\x00\x00\x00*abc", - &HeadersFrame{ - FrameHeader: FrameHeader{ - valid: true, - StreamID: 42, - Type: FrameHeaders, - Length: uint32(len("abc")), - }, - Priority: PriorityParam{}, - headerFragBuf: []byte("abc"), - }, - }, - { - "basic + end flags", - HeadersFrameParam{ - StreamID: 42, - BlockFragment: []byte("abc"), - EndStream: true, - EndHeaders: true, - Priority: PriorityParam{}, - }, - "\x00\x00\x03\x01\x05\x00\x00\x00*abc", - &HeadersFrame{ - FrameHeader: FrameHeader{ - valid: true, - StreamID: 42, - Type: FrameHeaders, - Flags: FlagHeadersEndStream | FlagHeadersEndHeaders, - Length: uint32(len("abc")), - }, - Priority: PriorityParam{}, - headerFragBuf: []byte("abc"), - }, - }, - { - "with padding", - HeadersFrameParam{ - StreamID: 42, - BlockFragment: []byte("abc"), - EndStream: true, - EndHeaders: true, - PadLength: 5, - Priority: PriorityParam{}, - }, - "\x00\x00\t\x01\r\x00\x00\x00*\x05abc\x00\x00\x00\x00\x00", - &HeadersFrame{ - FrameHeader: FrameHeader{ - valid: true, - StreamID: 42, - Type: FrameHeaders, - Flags: FlagHeadersEndStream | FlagHeadersEndHeaders | FlagHeadersPadded, - Length: uint32(1 + len("abc") + 5), // pad length + contents + padding - }, - Priority: PriorityParam{}, - headerFragBuf: []byte("abc"), - }, - }, - { - "with priority", - HeadersFrameParam{ - StreamID: 42, - BlockFragment: []byte("abc"), - EndStream: true, - EndHeaders: true, - PadLength: 2, - Priority: PriorityParam{ - StreamDep: 15, - Exclusive: true, - Weight: 127, - }, - }, - "\x00\x00\v\x01-\x00\x00\x00*\x02\x80\x00\x00\x0f\u007fabc\x00\x00", - &HeadersFrame{ - FrameHeader: FrameHeader{ - valid: true, - StreamID: 42, - Type: FrameHeaders, - Flags: FlagHeadersEndStream | FlagHeadersEndHeaders | FlagHeadersPadded | FlagHeadersPriority, - Length: uint32(1 + 5 + len("abc") + 2), // pad length + priority + contents + padding - }, - Priority: PriorityParam{ - StreamDep: 15, - Exclusive: true, - Weight: 127, - }, - headerFragBuf: []byte("abc"), - }, - }, - } - for _, tt := range tests { - fr, buf := testFramer() - if err := fr.WriteHeaders(tt.p); err != nil { - t.Errorf("test %q: %v", tt.name, err) - continue - } - if buf.String() != tt.wantEnc { - t.Errorf("test %q: encoded %q; want %q", tt.name, buf.Bytes(), tt.wantEnc) - } - f, err := fr.ReadFrame() - if err != nil { - t.Errorf("test %q: failed to read the frame back: %v", tt.name, err) - continue - } - if !reflect.DeepEqual(f, tt.wantFrame) { - t.Errorf("test %q: mismatch.\n got: %#v\nwant: %#v\n", tt.name, f, tt.wantFrame) - } - } -} - -func TestWriteContinuation(t *testing.T) { - const streamID = 42 - tests := []struct { - name string - end bool - frag []byte - - wantFrame *ContinuationFrame - }{ - { - "not end", - false, - []byte("abc"), - &ContinuationFrame{ - FrameHeader: FrameHeader{ - valid: true, - StreamID: streamID, - Type: FrameContinuation, - Length: uint32(len("abc")), - }, - headerFragBuf: []byte("abc"), - }, - }, - { - "end", - true, - []byte("def"), - &ContinuationFrame{ - FrameHeader: FrameHeader{ - valid: true, - StreamID: streamID, - Type: FrameContinuation, - Flags: FlagContinuationEndHeaders, - Length: uint32(len("def")), - }, - headerFragBuf: []byte("def"), - }, - }, - } - for _, tt := range tests { - fr, _ := testFramer() - if err := fr.WriteContinuation(streamID, tt.end, tt.frag); err != nil { - t.Errorf("test %q: %v", tt.name, err) - continue - } - f, err := fr.ReadFrame() - if err != nil { - t.Errorf("test %q: failed to read the frame back: %v", tt.name, err) - continue - } - if !reflect.DeepEqual(f, tt.wantFrame) { - t.Errorf("test %q: mismatch.\n got: %#v\nwant: %#v\n", tt.name, f, tt.wantFrame) - } - } -} - -func TestWritePriority(t *testing.T) { - const streamID = 42 - tests := []struct { - name string - priority PriorityParam - wantFrame *PriorityFrame - }{ - { - "not exclusive", - PriorityParam{ - StreamDep: 2, - Exclusive: false, - Weight: 127, - }, - &PriorityFrame{ - FrameHeader{ - valid: true, - StreamID: streamID, - Type: FramePriority, - Length: 5, - }, - PriorityParam{ - StreamDep: 2, - Exclusive: false, - Weight: 127, - }, - }, - }, - - { - "exclusive", - PriorityParam{ - StreamDep: 3, - Exclusive: true, - Weight: 77, - }, - &PriorityFrame{ - FrameHeader{ - valid: true, - StreamID: streamID, - Type: FramePriority, - Length: 5, - }, - PriorityParam{ - StreamDep: 3, - Exclusive: true, - Weight: 77, - }, - }, - }, - } - for _, tt := range tests { - fr, _ := testFramer() - if err := fr.WritePriority(streamID, tt.priority); err != nil { - t.Errorf("test %q: %v", tt.name, err) - continue - } - f, err := fr.ReadFrame() - if err != nil { - t.Errorf("test %q: failed to read the frame back: %v", tt.name, err) - continue - } - if !reflect.DeepEqual(f, tt.wantFrame) { - t.Errorf("test %q: mismatch.\n got: %#v\nwant: %#v\n", tt.name, f, tt.wantFrame) - } - } -} - -func TestWriteSettings(t *testing.T) { - fr, buf := testFramer() - settings := []Setting{{1, 2}, {3, 4}} - fr.WriteSettings(settings...) - const wantEnc = "\x00\x00\f\x04\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x02\x00\x03\x00\x00\x00\x04" - if buf.String() != wantEnc { - t.Errorf("encoded as %q; want %q", buf.Bytes(), wantEnc) - } - f, err := fr.ReadFrame() - if err != nil { - t.Fatal(err) - } - sf, ok := f.(*SettingsFrame) - if !ok { - t.Fatalf("Got a %T; want a SettingsFrame", f) - } - var got []Setting - sf.ForeachSetting(func(s Setting) error { - got = append(got, s) - valBack, ok := sf.Value(s.ID) - if !ok || valBack != s.Val { - t.Errorf("Value(%d) = %v, %v; want %v, true", s.ID, valBack, ok, s.Val) - } - return nil - }) - if !reflect.DeepEqual(settings, got) { - t.Errorf("Read settings %+v != written settings %+v", got, settings) - } -} - -func TestWriteSettingsAck(t *testing.T) { - fr, buf := testFramer() - fr.WriteSettingsAck() - const wantEnc = "\x00\x00\x00\x04\x01\x00\x00\x00\x00" - if buf.String() != wantEnc { - t.Errorf("encoded as %q; want %q", buf.Bytes(), wantEnc) - } -} - -func TestWriteWindowUpdate(t *testing.T) { - fr, buf := testFramer() - const streamID = 1<<24 + 2<<16 + 3<<8 + 4 - const incr = 7<<24 + 6<<16 + 5<<8 + 4 - if err := fr.WriteWindowUpdate(streamID, incr); err != nil { - t.Fatal(err) - } - const wantEnc = "\x00\x00\x04\x08\x00\x01\x02\x03\x04\x07\x06\x05\x04" - if buf.String() != wantEnc { - t.Errorf("encoded as %q; want %q", buf.Bytes(), wantEnc) - } - f, err := fr.ReadFrame() - if err != nil { - t.Fatal(err) - } - want := &WindowUpdateFrame{ - FrameHeader: FrameHeader{ - valid: true, - Type: 0x8, - Flags: 0x0, - Length: 0x4, - StreamID: 0x1020304, - }, - Increment: 0x7060504, - } - if !reflect.DeepEqual(f, want) { - t.Errorf("parsed back %#v; want %#v", f, want) - } -} - -func TestWritePing(t *testing.T) { testWritePing(t, false) } -func TestWritePingAck(t *testing.T) { testWritePing(t, true) } - -func testWritePing(t *testing.T, ack bool) { - fr, buf := testFramer() - if err := fr.WritePing(ack, [8]byte{1, 2, 3, 4, 5, 6, 7, 8}); err != nil { - t.Fatal(err) - } - var wantFlags Flags - if ack { - wantFlags = FlagPingAck - } - var wantEnc = "\x00\x00\x08\x06" + string(wantFlags) + "\x00\x00\x00\x00" + "\x01\x02\x03\x04\x05\x06\x07\x08" - if buf.String() != wantEnc { - t.Errorf("encoded as %q; want %q", buf.Bytes(), wantEnc) - } - - f, err := fr.ReadFrame() - if err != nil { - t.Fatal(err) - } - want := &PingFrame{ - FrameHeader: FrameHeader{ - valid: true, - Type: 0x6, - Flags: wantFlags, - Length: 0x8, - StreamID: 0, - }, - Data: [8]byte{1, 2, 3, 4, 5, 6, 7, 8}, - } - if !reflect.DeepEqual(f, want) { - t.Errorf("parsed back %#v; want %#v", f, want) - } -} - -func TestReadFrameHeader(t *testing.T) { - tests := []struct { - in string - want FrameHeader - }{ - {in: "\x00\x00\x00" + "\x00" + "\x00" + "\x00\x00\x00\x00", want: FrameHeader{}}, - {in: "\x01\x02\x03" + "\x04" + "\x05" + "\x06\x07\x08\x09", want: FrameHeader{ - Length: 66051, Type: 4, Flags: 5, StreamID: 101124105, - }}, - // Ignore high bit: - {in: "\xff\xff\xff" + "\xff" + "\xff" + "\xff\xff\xff\xff", want: FrameHeader{ - Length: 16777215, Type: 255, Flags: 255, StreamID: 2147483647}}, - {in: "\xff\xff\xff" + "\xff" + "\xff" + "\x7f\xff\xff\xff", want: FrameHeader{ - Length: 16777215, Type: 255, Flags: 255, StreamID: 2147483647}}, - } - for i, tt := range tests { - got, err := readFrameHeader(make([]byte, 9), strings.NewReader(tt.in)) - if err != nil { - t.Errorf("%d. readFrameHeader(%q) = %v", i, tt.in, err) - continue - } - tt.want.valid = true - if got != tt.want { - t.Errorf("%d. readFrameHeader(%q) = %+v; want %+v", i, tt.in, got, tt.want) - } - } -} - -func TestReadWriteFrameHeader(t *testing.T) { - tests := []struct { - len uint32 - typ FrameType - flags Flags - streamID uint32 - }{ - {len: 0, typ: 255, flags: 1, streamID: 0}, - {len: 0, typ: 255, flags: 1, streamID: 1}, - {len: 0, typ: 255, flags: 1, streamID: 255}, - {len: 0, typ: 255, flags: 1, streamID: 256}, - {len: 0, typ: 255, flags: 1, streamID: 65535}, - {len: 0, typ: 255, flags: 1, streamID: 65536}, - - {len: 0, typ: 1, flags: 255, streamID: 1}, - {len: 255, typ: 1, flags: 255, streamID: 1}, - {len: 256, typ: 1, flags: 255, streamID: 1}, - {len: 65535, typ: 1, flags: 255, streamID: 1}, - {len: 65536, typ: 1, flags: 255, streamID: 1}, - {len: 16777215, typ: 1, flags: 255, streamID: 1}, - } - for _, tt := range tests { - fr, buf := testFramer() - fr.startWrite(tt.typ, tt.flags, tt.streamID) - fr.writeBytes(make([]byte, tt.len)) - fr.endWrite() - fh, err := ReadFrameHeader(buf) - if err != nil { - t.Errorf("ReadFrameHeader(%+v) = %v", tt, err) - continue - } - if fh.Type != tt.typ || fh.Flags != tt.flags || fh.Length != tt.len || fh.StreamID != tt.streamID { - t.Errorf("ReadFrameHeader(%+v) = %+v; mismatch", tt, fh) - } - } - -} - -func TestWriteTooLargeFrame(t *testing.T) { - fr, _ := testFramer() - fr.startWrite(0, 1, 1) - fr.writeBytes(make([]byte, 1<<24)) - err := fr.endWrite() - if err != ErrFrameTooLarge { - t.Errorf("endWrite = %v; want errFrameTooLarge", err) - } -} - -func TestWriteGoAway(t *testing.T) { - const debug = "foo" - fr, buf := testFramer() - if err := fr.WriteGoAway(0x01020304, 0x05060708, []byte(debug)); err != nil { - t.Fatal(err) - } - const wantEnc = "\x00\x00\v\a\x00\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x07\x08" + debug - if buf.String() != wantEnc { - t.Errorf("encoded as %q; want %q", buf.Bytes(), wantEnc) - } - f, err := fr.ReadFrame() - if err != nil { - t.Fatal(err) - } - want := &GoAwayFrame{ - FrameHeader: FrameHeader{ - valid: true, - Type: 0x7, - Flags: 0, - Length: uint32(4 + 4 + len(debug)), - StreamID: 0, - }, - LastStreamID: 0x01020304, - ErrCode: 0x05060708, - debugData: []byte(debug), - } - if !reflect.DeepEqual(f, want) { - t.Fatalf("parsed back:\n%#v\nwant:\n%#v", f, want) - } - if got := string(f.(*GoAwayFrame).DebugData()); got != debug { - t.Errorf("debug data = %q; want %q", got, debug) - } -} - -func TestWritePushPromise(t *testing.T) { - pp := PushPromiseParam{ - StreamID: 42, - PromiseID: 42, - BlockFragment: []byte("abc"), - } - fr, buf := testFramer() - if err := fr.WritePushPromise(pp); err != nil { - t.Fatal(err) - } - const wantEnc = "\x00\x00\x07\x05\x00\x00\x00\x00*\x00\x00\x00*abc" - if buf.String() != wantEnc { - t.Errorf("encoded as %q; want %q", buf.Bytes(), wantEnc) - } - f, err := fr.ReadFrame() - if err != nil { - t.Fatal(err) - } - _, ok := f.(*PushPromiseFrame) - if !ok { - t.Fatalf("got %T; want *PushPromiseFrame", f) - } - want := &PushPromiseFrame{ - FrameHeader: FrameHeader{ - valid: true, - Type: 0x5, - Flags: 0x0, - Length: 0x7, - StreamID: 42, - }, - PromiseID: 42, - headerFragBuf: []byte("abc"), - } - if !reflect.DeepEqual(f, want) { - t.Fatalf("parsed back:\n%#v\nwant:\n%#v", f, want) - } -} diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/gotrack_test.go b/Godeps/_workspace/src/github.com/bradfitz/http2/gotrack_test.go deleted file mode 100644 index 3ff4957e97..0000000000 --- a/Godeps/_workspace/src/github.com/bradfitz/http2/gotrack_test.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// See https://code.google.com/p/go/source/browse/CONTRIBUTORS -// Licensed under the same terms as Go itself: -// https://code.google.com/p/go/source/browse/LICENSE - -package http2 - -import ( - "fmt" - "strings" - "testing" -) - -func TestGoroutineLock(t *testing.T) { - DebugGoroutines = true - g := newGoroutineLock() - g.check() - - sawPanic := make(chan interface{}) - go func() { - defer func() { sawPanic <- recover() }() - g.check() // should panic - }() - e := <-sawPanic - if e == nil { - t.Fatal("did not see panic from check in other goroutine") - } - if !strings.Contains(fmt.Sprint(e), "wrong goroutine") { - t.Errorf("expected on see panic about running on the wrong goroutine; got %v", e) - } -} diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/hpack/encode_test.go b/Godeps/_workspace/src/github.com/bradfitz/http2/hpack/encode_test.go deleted file mode 100644 index dce66c90f5..0000000000 --- a/Godeps/_workspace/src/github.com/bradfitz/http2/hpack/encode_test.go +++ /dev/null @@ -1,331 +0,0 @@ -// Copyright 2014 The Go Authors. -// See https://code.google.com/p/go/source/browse/CONTRIBUTORS -// Licensed under the same terms as Go itself: -// https://code.google.com/p/go/source/browse/LICENSE - -package hpack - -import ( - "bytes" - "encoding/hex" - "reflect" - "strings" - "testing" -) - -func TestEncoderTableSizeUpdate(t *testing.T) { - tests := []struct { - size1, size2 uint32 - wantHex string - }{ - // Should emit 2 table size updates (2048 and 4096) - {2048, 4096, "3fe10f 3fe11f 82"}, - - // Should emit 1 table size update (2048) - {16384, 2048, "3fe10f 82"}, - } - for _, tt := range tests { - var buf bytes.Buffer - e := NewEncoder(&buf) - e.SetMaxDynamicTableSize(tt.size1) - e.SetMaxDynamicTableSize(tt.size2) - if err := e.WriteField(pair(":method", "GET")); err != nil { - t.Fatal(err) - } - want := removeSpace(tt.wantHex) - if got := hex.EncodeToString(buf.Bytes()); got != want { - t.Errorf("e.SetDynamicTableSize %v, %v = %q; want %q", tt.size1, tt.size2, got, want) - } - } -} - -func TestEncoderWriteField(t *testing.T) { - var buf bytes.Buffer - e := NewEncoder(&buf) - var got []HeaderField - d := NewDecoder(4<<10, func(f HeaderField) { - got = append(got, f) - }) - - tests := []struct { - hdrs []HeaderField - }{ - {[]HeaderField{ - pair(":method", "GET"), - pair(":scheme", "http"), - pair(":path", "/"), - pair(":authority", "www.example.com"), - }}, - {[]HeaderField{ - pair(":method", "GET"), - pair(":scheme", "http"), - pair(":path", "/"), - pair(":authority", "www.example.com"), - pair("cache-control", "no-cache"), - }}, - {[]HeaderField{ - pair(":method", "GET"), - pair(":scheme", "https"), - pair(":path", "/index.html"), - pair(":authority", "www.example.com"), - pair("custom-key", "custom-value"), - }}, - } - for i, tt := range tests { - buf.Reset() - got = got[:0] - for _, hf := range tt.hdrs { - if err := e.WriteField(hf); err != nil { - t.Fatal(err) - } - } - _, err := d.Write(buf.Bytes()) - if err != nil { - t.Errorf("%d. Decoder Write = %v", i, err) - } - if !reflect.DeepEqual(got, tt.hdrs) { - t.Errorf("%d. Decoded %+v; want %+v", i, got, tt.hdrs) - } - } -} - -func TestEncoderSearchTable(t *testing.T) { - e := NewEncoder(nil) - - e.dynTab.add(pair("foo", "bar")) - e.dynTab.add(pair("blake", "miz")) - e.dynTab.add(pair(":method", "GET")) - - tests := []struct { - hf HeaderField - wantI uint64 - wantMatch bool - }{ - // Name and Value match - {pair("foo", "bar"), uint64(len(staticTable) + 3), true}, - {pair("blake", "miz"), uint64(len(staticTable) + 2), true}, - {pair(":method", "GET"), 2, true}, - - // Only name match because Sensitive == true - {HeaderField{":method", "GET", true}, 2, false}, - - // Only Name matches - {pair("foo", "..."), uint64(len(staticTable) + 3), false}, - {pair("blake", "..."), uint64(len(staticTable) + 2), false}, - {pair(":method", "..."), 2, false}, - - // None match - {pair("foo-", "bar"), 0, false}, - } - for _, tt := range tests { - if gotI, gotMatch := e.searchTable(tt.hf); gotI != tt.wantI || gotMatch != tt.wantMatch { - t.Errorf("d.search(%+v) = %v, %v; want %v, %v", tt.hf, gotI, gotMatch, tt.wantI, tt.wantMatch) - } - } -} - -func TestAppendVarInt(t *testing.T) { - tests := []struct { - n byte - i uint64 - want []byte - }{ - // Fits in a byte: - {1, 0, []byte{0}}, - {2, 2, []byte{2}}, - {3, 6, []byte{6}}, - {4, 14, []byte{14}}, - {5, 30, []byte{30}}, - {6, 62, []byte{62}}, - {7, 126, []byte{126}}, - {8, 254, []byte{254}}, - - // Multiple bytes: - {5, 1337, []byte{31, 154, 10}}, - } - for _, tt := range tests { - got := appendVarInt(nil, tt.n, tt.i) - if !bytes.Equal(got, tt.want) { - t.Errorf("appendVarInt(nil, %v, %v) = %v; want %v", tt.n, tt.i, got, tt.want) - } - } -} - -func TestAppendHpackString(t *testing.T) { - tests := []struct { - s, wantHex string - }{ - // Huffman encoded - {"www.example.com", "8c f1e3 c2e5 f23a 6ba0 ab90 f4ff"}, - - // Not Huffman encoded - {"a", "01 61"}, - - // zero length - {"", "00"}, - } - for _, tt := range tests { - want := removeSpace(tt.wantHex) - buf := appendHpackString(nil, tt.s) - if got := hex.EncodeToString(buf); want != got { - t.Errorf("appendHpackString(nil, %q) = %q; want %q", tt.s, got, want) - } - } -} - -func TestAppendIndexed(t *testing.T) { - tests := []struct { - i uint64 - wantHex string - }{ - // 1 byte - {1, "81"}, - {126, "fe"}, - - // 2 bytes - {127, "ff00"}, - {128, "ff01"}, - } - for _, tt := range tests { - want := removeSpace(tt.wantHex) - buf := appendIndexed(nil, tt.i) - if got := hex.EncodeToString(buf); want != got { - t.Errorf("appendIndex(nil, %v) = %q; want %q", tt.i, got, want) - } - } -} - -func TestAppendNewName(t *testing.T) { - tests := []struct { - f HeaderField - indexing bool - wantHex string - }{ - // Incremental indexing - {HeaderField{"custom-key", "custom-value", false}, true, "40 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf"}, - - // Without indexing - {HeaderField{"custom-key", "custom-value", false}, false, "00 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf"}, - - // Never indexed - {HeaderField{"custom-key", "custom-value", true}, true, "10 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf"}, - {HeaderField{"custom-key", "custom-value", true}, false, "10 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf"}, - } - for _, tt := range tests { - want := removeSpace(tt.wantHex) - buf := appendNewName(nil, tt.f, tt.indexing) - if got := hex.EncodeToString(buf); want != got { - t.Errorf("appendNewName(nil, %+v, %v) = %q; want %q", tt.f, tt.indexing, got, want) - } - } -} - -func TestAppendIndexedName(t *testing.T) { - tests := []struct { - f HeaderField - i uint64 - indexing bool - wantHex string - }{ - // Incremental indexing - {HeaderField{":status", "302", false}, 8, true, "48 82 6402"}, - - // Without indexing - {HeaderField{":status", "302", false}, 8, false, "08 82 6402"}, - - // Never indexed - {HeaderField{":status", "302", true}, 8, true, "18 82 6402"}, - {HeaderField{":status", "302", true}, 8, false, "18 82 6402"}, - } - for _, tt := range tests { - want := removeSpace(tt.wantHex) - buf := appendIndexedName(nil, tt.f, tt.i, tt.indexing) - if got := hex.EncodeToString(buf); want != got { - t.Errorf("appendIndexedName(nil, %+v, %v) = %q; want %q", tt.f, tt.indexing, got, want) - } - } -} - -func TestAppendTableSize(t *testing.T) { - tests := []struct { - i uint32 - wantHex string - }{ - // Fits into 1 byte - {30, "3e"}, - - // Extra byte - {31, "3f00"}, - {32, "3f01"}, - } - for _, tt := range tests { - want := removeSpace(tt.wantHex) - buf := appendTableSize(nil, tt.i) - if got := hex.EncodeToString(buf); want != got { - t.Errorf("appendTableSize(nil, %v) = %q; want %q", tt.i, got, want) - } - } -} - -func TestEncoderSetMaxDynamicTableSize(t *testing.T) { - var buf bytes.Buffer - e := NewEncoder(&buf) - tests := []struct { - v uint32 - wantUpdate bool - wantMinSize uint32 - wantMaxSize uint32 - }{ - // Set new table size to 2048 - {2048, true, 2048, 2048}, - - // Set new table size to 16384, but still limited to - // 4096 - {16384, true, 2048, 4096}, - } - for _, tt := range tests { - e.SetMaxDynamicTableSize(tt.v) - if got := e.tableSizeUpdate; tt.wantUpdate != got { - t.Errorf("e.tableSizeUpdate = %v; want %v", got, tt.wantUpdate) - } - if got := e.minSize; tt.wantMinSize != got { - t.Errorf("e.minSize = %v; want %v", got, tt.wantMinSize) - } - if got := e.dynTab.maxSize; tt.wantMaxSize != got { - t.Errorf("e.maxSize = %v; want %v", got, tt.wantMaxSize) - } - } -} - -func TestEncoderSetMaxDynamicTableSizeLimit(t *testing.T) { - e := NewEncoder(nil) - // 4095 < initialHeaderTableSize means maxSize is truncated to - // 4095. - e.SetMaxDynamicTableSizeLimit(4095) - if got, want := e.dynTab.maxSize, uint32(4095); got != want { - t.Errorf("e.dynTab.maxSize = %v; want %v", got, want) - } - if got, want := e.maxSizeLimit, uint32(4095); got != want { - t.Errorf("e.maxSizeLimit = %v; want %v", got, want) - } - if got, want := e.tableSizeUpdate, true; got != want { - t.Errorf("e.tableSizeUpdate = %v; want %v", got, want) - } - // maxSize will be truncated to maxSizeLimit - e.SetMaxDynamicTableSize(16384) - if got, want := e.dynTab.maxSize, uint32(4095); got != want { - t.Errorf("e.dynTab.maxSize = %v; want %v", got, want) - } - // 8192 > current maxSizeLimit, so maxSize does not change. - e.SetMaxDynamicTableSizeLimit(8192) - if got, want := e.dynTab.maxSize, uint32(4095); got != want { - t.Errorf("e.dynTab.maxSize = %v; want %v", got, want) - } - if got, want := e.maxSizeLimit, uint32(8192); got != want { - t.Errorf("e.maxSizeLimit = %v; want %v", got, want) - } -} - -func removeSpace(s string) string { - return strings.Replace(s, " ", "", -1) -} diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/hpack/hpack_test.go b/Godeps/_workspace/src/github.com/bradfitz/http2/hpack/hpack_test.go deleted file mode 100644 index 08bee414f3..0000000000 --- a/Godeps/_workspace/src/github.com/bradfitz/http2/hpack/hpack_test.go +++ /dev/null @@ -1,648 +0,0 @@ -// Copyright 2014 The Go Authors. -// See https://code.google.com/p/go/source/browse/CONTRIBUTORS -// Licensed under the same terms as Go itself: -// https://code.google.com/p/go/source/browse/LICENSE - -package hpack - -import ( - "bufio" - "bytes" - "encoding/hex" - "fmt" - "reflect" - "regexp" - "strconv" - "strings" - "testing" -) - -func TestStaticTable(t *testing.T) { - fromSpec := ` - +-------+-----------------------------+---------------+ - | 1 | :authority | | - | 2 | :method | GET | - | 3 | :method | POST | - | 4 | :path | / | - | 5 | :path | /index.html | - | 6 | :scheme | http | - | 7 | :scheme | https | - | 8 | :status | 200 | - | 9 | :status | 204 | - | 10 | :status | 206 | - | 11 | :status | 304 | - | 12 | :status | 400 | - | 13 | :status | 404 | - | 14 | :status | 500 | - | 15 | accept-charset | | - | 16 | accept-encoding | gzip, deflate | - | 17 | accept-language | | - | 18 | accept-ranges | | - | 19 | accept | | - | 20 | access-control-allow-origin | | - | 21 | age | | - | 22 | allow | | - | 23 | authorization | | - | 24 | cache-control | | - | 25 | content-disposition | | - | 26 | content-encoding | | - | 27 | content-language | | - | 28 | content-length | | - | 29 | content-location | | - | 30 | content-range | | - | 31 | content-type | | - | 32 | cookie | | - | 33 | date | | - | 34 | etag | | - | 35 | expect | | - | 36 | expires | | - | 37 | from | | - | 38 | host | | - | 39 | if-match | | - | 40 | if-modified-since | | - | 41 | if-none-match | | - | 42 | if-range | | - | 43 | if-unmodified-since | | - | 44 | last-modified | | - | 45 | link | | - | 46 | location | | - | 47 | max-forwards | | - | 48 | proxy-authenticate | | - | 49 | proxy-authorization | | - | 50 | range | | - | 51 | referer | | - | 52 | refresh | | - | 53 | retry-after | | - | 54 | server | | - | 55 | set-cookie | | - | 56 | strict-transport-security | | - | 57 | transfer-encoding | | - | 58 | user-agent | | - | 59 | vary | | - | 60 | via | | - | 61 | www-authenticate | | - +-------+-----------------------------+---------------+ -` - bs := bufio.NewScanner(strings.NewReader(fromSpec)) - re := regexp.MustCompile(`\| (\d+)\s+\| (\S+)\s*\| (\S(.*\S)?)?\s+\|`) - for bs.Scan() { - l := bs.Text() - if !strings.Contains(l, "|") { - continue - } - m := re.FindStringSubmatch(l) - if m == nil { - continue - } - i, err := strconv.Atoi(m[1]) - if err != nil { - t.Errorf("Bogus integer on line %q", l) - continue - } - if i < 1 || i > len(staticTable) { - t.Errorf("Bogus index %d on line %q", i, l) - continue - } - if got, want := staticTable[i-1].Name, m[2]; got != want { - t.Errorf("header index %d name = %q; want %q", i, got, want) - } - if got, want := staticTable[i-1].Value, m[3]; got != want { - t.Errorf("header index %d value = %q; want %q", i, got, want) - } - } - if err := bs.Err(); err != nil { - t.Error(err) - } -} - -func (d *Decoder) mustAt(idx int) HeaderField { - if hf, ok := d.at(uint64(idx)); !ok { - panic(fmt.Sprintf("bogus index %d", idx)) - } else { - return hf - } -} - -func TestDynamicTableAt(t *testing.T) { - d := NewDecoder(4096, nil) - at := d.mustAt - if got, want := at(2), (pair(":method", "GET")); got != want { - t.Errorf("at(2) = %v; want %v", got, want) - } - d.dynTab.add(pair("foo", "bar")) - d.dynTab.add(pair("blake", "miz")) - if got, want := at(len(staticTable)+1), (pair("blake", "miz")); got != want { - t.Errorf("at(dyn 1) = %v; want %v", got, want) - } - if got, want := at(len(staticTable)+2), (pair("foo", "bar")); got != want { - t.Errorf("at(dyn 2) = %v; want %v", got, want) - } - if got, want := at(3), (pair(":method", "POST")); got != want { - t.Errorf("at(3) = %v; want %v", got, want) - } -} - -func TestDynamicTableSearch(t *testing.T) { - dt := dynamicTable{} - dt.setMaxSize(4096) - - dt.add(pair("foo", "bar")) - dt.add(pair("blake", "miz")) - dt.add(pair(":method", "GET")) - - tests := []struct { - hf HeaderField - wantI uint64 - wantMatch bool - }{ - // Name and Value match - {pair("foo", "bar"), 3, true}, - {pair(":method", "GET"), 1, true}, - - // Only name match because of Sensitive == true - {HeaderField{"blake", "miz", true}, 2, false}, - - // Only Name matches - {pair("foo", "..."), 3, false}, - {pair("blake", "..."), 2, false}, - {pair(":method", "..."), 1, false}, - - // None match - {pair("foo-", "bar"), 0, false}, - } - for _, tt := range tests { - if gotI, gotMatch := dt.search(tt.hf); gotI != tt.wantI || gotMatch != tt.wantMatch { - t.Errorf("d.search(%+v) = %v, %v; want %v, %v", tt.hf, gotI, gotMatch, tt.wantI, tt.wantMatch) - } - } -} - -func TestDynamicTableSizeEvict(t *testing.T) { - d := NewDecoder(4096, nil) - if want := uint32(0); d.dynTab.size != want { - t.Fatalf("size = %d; want %d", d.dynTab.size, want) - } - add := d.dynTab.add - add(pair("blake", "eats pizza")) - if want := uint32(15 + 32); d.dynTab.size != want { - t.Fatalf("after pizza, size = %d; want %d", d.dynTab.size, want) - } - add(pair("foo", "bar")) - if want := uint32(15 + 32 + 6 + 32); d.dynTab.size != want { - t.Fatalf("after foo bar, size = %d; want %d", d.dynTab.size, want) - } - d.dynTab.setMaxSize(15 + 32 + 1 /* slop */) - if want := uint32(6 + 32); d.dynTab.size != want { - t.Fatalf("after setMaxSize, size = %d; want %d", d.dynTab.size, want) - } - if got, want := d.mustAt(len(staticTable)+1), (pair("foo", "bar")); got != want { - t.Errorf("at(dyn 1) = %v; want %v", got, want) - } - add(pair("long", strings.Repeat("x", 500))) - if want := uint32(0); d.dynTab.size != want { - t.Fatalf("after big one, size = %d; want %d", d.dynTab.size, want) - } -} - -func TestDecoderDecode(t *testing.T) { - tests := []struct { - name string - in []byte - want []HeaderField - wantDynTab []HeaderField // newest entry first - }{ - // C.2.1 Literal Header Field with Indexing - // http://http2.github.io/http2-spec/compression.html#rfc.section.C.2.1 - {"C.2.1", dehex("400a 6375 7374 6f6d 2d6b 6579 0d63 7573 746f 6d2d 6865 6164 6572"), - []HeaderField{pair("custom-key", "custom-header")}, - []HeaderField{pair("custom-key", "custom-header")}, - }, - - // C.2.2 Literal Header Field without Indexing - // http://http2.github.io/http2-spec/compression.html#rfc.section.C.2.2 - {"C.2.2", dehex("040c 2f73 616d 706c 652f 7061 7468"), - []HeaderField{pair(":path", "/sample/path")}, - []HeaderField{}}, - - // C.2.3 Literal Header Field never Indexed - // http://http2.github.io/http2-spec/compression.html#rfc.section.C.2.3 - {"C.2.3", dehex("1008 7061 7373 776f 7264 0673 6563 7265 74"), - []HeaderField{{"password", "secret", true}}, - []HeaderField{}}, - - // C.2.4 Indexed Header Field - // http://http2.github.io/http2-spec/compression.html#rfc.section.C.2.4 - {"C.2.4", []byte("\x82"), - []HeaderField{pair(":method", "GET")}, - []HeaderField{}}, - } - for _, tt := range tests { - d := NewDecoder(4096, nil) - hf, err := d.DecodeFull(tt.in) - if err != nil { - t.Errorf("%s: %v", tt.name, err) - continue - } - if !reflect.DeepEqual(hf, tt.want) { - t.Errorf("%s: Got %v; want %v", tt.name, hf, tt.want) - } - gotDynTab := d.dynTab.reverseCopy() - if !reflect.DeepEqual(gotDynTab, tt.wantDynTab) { - t.Errorf("%s: dynamic table after = %v; want %v", tt.name, gotDynTab, tt.wantDynTab) - } - } -} - -func (dt *dynamicTable) reverseCopy() (hf []HeaderField) { - hf = make([]HeaderField, len(dt.ents)) - for i := range hf { - hf[i] = dt.ents[len(dt.ents)-1-i] - } - return -} - -type encAndWant struct { - enc []byte - want []HeaderField - wantDynTab []HeaderField - wantDynSize uint32 -} - -// C.3 Request Examples without Huffman Coding -// http://http2.github.io/http2-spec/compression.html#rfc.section.C.3 -func TestDecodeC3_NoHuffman(t *testing.T) { - testDecodeSeries(t, 4096, []encAndWant{ - {dehex("8286 8441 0f77 7777 2e65 7861 6d70 6c65 2e63 6f6d"), - []HeaderField{ - pair(":method", "GET"), - pair(":scheme", "http"), - pair(":path", "/"), - pair(":authority", "www.example.com"), - }, - []HeaderField{ - pair(":authority", "www.example.com"), - }, - 57, - }, - {dehex("8286 84be 5808 6e6f 2d63 6163 6865"), - []HeaderField{ - pair(":method", "GET"), - pair(":scheme", "http"), - pair(":path", "/"), - pair(":authority", "www.example.com"), - pair("cache-control", "no-cache"), - }, - []HeaderField{ - pair("cache-control", "no-cache"), - pair(":authority", "www.example.com"), - }, - 110, - }, - {dehex("8287 85bf 400a 6375 7374 6f6d 2d6b 6579 0c63 7573 746f 6d2d 7661 6c75 65"), - []HeaderField{ - pair(":method", "GET"), - pair(":scheme", "https"), - pair(":path", "/index.html"), - pair(":authority", "www.example.com"), - pair("custom-key", "custom-value"), - }, - []HeaderField{ - pair("custom-key", "custom-value"), - pair("cache-control", "no-cache"), - pair(":authority", "www.example.com"), - }, - 164, - }, - }) -} - -// C.4 Request Examples with Huffman Coding -// http://http2.github.io/http2-spec/compression.html#rfc.section.C.4 -func TestDecodeC4_Huffman(t *testing.T) { - testDecodeSeries(t, 4096, []encAndWant{ - {dehex("8286 8441 8cf1 e3c2 e5f2 3a6b a0ab 90f4 ff"), - []HeaderField{ - pair(":method", "GET"), - pair(":scheme", "http"), - pair(":path", "/"), - pair(":authority", "www.example.com"), - }, - []HeaderField{ - pair(":authority", "www.example.com"), - }, - 57, - }, - {dehex("8286 84be 5886 a8eb 1064 9cbf"), - []HeaderField{ - pair(":method", "GET"), - pair(":scheme", "http"), - pair(":path", "/"), - pair(":authority", "www.example.com"), - pair("cache-control", "no-cache"), - }, - []HeaderField{ - pair("cache-control", "no-cache"), - pair(":authority", "www.example.com"), - }, - 110, - }, - {dehex("8287 85bf 4088 25a8 49e9 5ba9 7d7f 8925 a849 e95b b8e8 b4bf"), - []HeaderField{ - pair(":method", "GET"), - pair(":scheme", "https"), - pair(":path", "/index.html"), - pair(":authority", "www.example.com"), - pair("custom-key", "custom-value"), - }, - []HeaderField{ - pair("custom-key", "custom-value"), - pair("cache-control", "no-cache"), - pair(":authority", "www.example.com"), - }, - 164, - }, - }) -} - -// http://http2.github.io/http2-spec/compression.html#rfc.section.C.5 -// "This section shows several consecutive header lists, corresponding -// to HTTP responses, on the same connection. The HTTP/2 setting -// parameter SETTINGS_HEADER_TABLE_SIZE is set to the value of 256 -// octets, causing some evictions to occur." -func TestDecodeC5_ResponsesNoHuff(t *testing.T) { - testDecodeSeries(t, 256, []encAndWant{ - {dehex(` -4803 3330 3258 0770 7269 7661 7465 611d -4d6f 6e2c 2032 3120 4f63 7420 3230 3133 -2032 303a 3133 3a32 3120 474d 546e 1768 -7474 7073 3a2f 2f77 7777 2e65 7861 6d70 -6c65 2e63 6f6d -`), - []HeaderField{ - pair(":status", "302"), - pair("cache-control", "private"), - pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"), - pair("location", "https://www.example.com"), - }, - []HeaderField{ - pair("location", "https://www.example.com"), - pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"), - pair("cache-control", "private"), - pair(":status", "302"), - }, - 222, - }, - {dehex("4803 3330 37c1 c0bf"), - []HeaderField{ - pair(":status", "307"), - pair("cache-control", "private"), - pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"), - pair("location", "https://www.example.com"), - }, - []HeaderField{ - pair(":status", "307"), - pair("location", "https://www.example.com"), - pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"), - pair("cache-control", "private"), - }, - 222, - }, - {dehex(` -88c1 611d 4d6f 6e2c 2032 3120 4f63 7420 -3230 3133 2032 303a 3133 3a32 3220 474d -54c0 5a04 677a 6970 7738 666f 6f3d 4153 -444a 4b48 514b 425a 584f 5157 454f 5049 -5541 5851 5745 4f49 553b 206d 6178 2d61 -6765 3d33 3630 303b 2076 6572 7369 6f6e -3d31 -`), - []HeaderField{ - pair(":status", "200"), - pair("cache-control", "private"), - pair("date", "Mon, 21 Oct 2013 20:13:22 GMT"), - pair("location", "https://www.example.com"), - pair("content-encoding", "gzip"), - pair("set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"), - }, - []HeaderField{ - pair("set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"), - pair("content-encoding", "gzip"), - pair("date", "Mon, 21 Oct 2013 20:13:22 GMT"), - }, - 215, - }, - }) -} - -// http://http2.github.io/http2-spec/compression.html#rfc.section.C.6 -// "This section shows the same examples as the previous section, but -// using Huffman encoding for the literal values. The HTTP/2 setting -// parameter SETTINGS_HEADER_TABLE_SIZE is set to the value of 256 -// octets, causing some evictions to occur. The eviction mechanism -// uses the length of the decoded literal values, so the same -// evictions occurs as in the previous section." -func TestDecodeC6_ResponsesHuffman(t *testing.T) { - testDecodeSeries(t, 256, []encAndWant{ - {dehex(` -4882 6402 5885 aec3 771a 4b61 96d0 7abe -9410 54d4 44a8 2005 9504 0b81 66e0 82a6 -2d1b ff6e 919d 29ad 1718 63c7 8f0b 97c8 -e9ae 82ae 43d3 -`), - []HeaderField{ - pair(":status", "302"), - pair("cache-control", "private"), - pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"), - pair("location", "https://www.example.com"), - }, - []HeaderField{ - pair("location", "https://www.example.com"), - pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"), - pair("cache-control", "private"), - pair(":status", "302"), - }, - 222, - }, - {dehex("4883 640e ffc1 c0bf"), - []HeaderField{ - pair(":status", "307"), - pair("cache-control", "private"), - pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"), - pair("location", "https://www.example.com"), - }, - []HeaderField{ - pair(":status", "307"), - pair("location", "https://www.example.com"), - pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"), - pair("cache-control", "private"), - }, - 222, - }, - {dehex(` -88c1 6196 d07a be94 1054 d444 a820 0595 -040b 8166 e084 a62d 1bff c05a 839b d9ab -77ad 94e7 821d d7f2 e6c7 b335 dfdf cd5b -3960 d5af 2708 7f36 72c1 ab27 0fb5 291f -9587 3160 65c0 03ed 4ee5 b106 3d50 07 -`), - []HeaderField{ - pair(":status", "200"), - pair("cache-control", "private"), - pair("date", "Mon, 21 Oct 2013 20:13:22 GMT"), - pair("location", "https://www.example.com"), - pair("content-encoding", "gzip"), - pair("set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"), - }, - []HeaderField{ - pair("set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"), - pair("content-encoding", "gzip"), - pair("date", "Mon, 21 Oct 2013 20:13:22 GMT"), - }, - 215, - }, - }) -} - -func testDecodeSeries(t *testing.T, size uint32, steps []encAndWant) { - d := NewDecoder(size, nil) - for i, step := range steps { - hf, err := d.DecodeFull(step.enc) - if err != nil { - t.Fatalf("Error at step index %d: %v", i, err) - } - if !reflect.DeepEqual(hf, step.want) { - t.Fatalf("At step index %d: Got headers %v; want %v", i, hf, step.want) - } - gotDynTab := d.dynTab.reverseCopy() - if !reflect.DeepEqual(gotDynTab, step.wantDynTab) { - t.Errorf("After step index %d, dynamic table = %v; want %v", i, gotDynTab, step.wantDynTab) - } - if d.dynTab.size != step.wantDynSize { - t.Errorf("After step index %d, dynamic table size = %v; want %v", i, d.dynTab.size, step.wantDynSize) - } - } -} - -func TestHuffmanDecode(t *testing.T) { - tests := []struct { - inHex, want string - }{ - {"f1e3 c2e5 f23a 6ba0 ab90 f4ff", "www.example.com"}, - {"a8eb 1064 9cbf", "no-cache"}, - {"25a8 49e9 5ba9 7d7f", "custom-key"}, - {"25a8 49e9 5bb8 e8b4 bf", "custom-value"}, - {"6402", "302"}, - {"aec3 771a 4b", "private"}, - {"d07a be94 1054 d444 a820 0595 040b 8166 e082 a62d 1bff", "Mon, 21 Oct 2013 20:13:21 GMT"}, - {"9d29 ad17 1863 c78f 0b97 c8e9 ae82 ae43 d3", "https://www.example.com"}, - {"9bd9 ab", "gzip"}, - {"94e7 821d d7f2 e6c7 b335 dfdf cd5b 3960 d5af 2708 7f36 72c1 ab27 0fb5 291f 9587 3160 65c0 03ed 4ee5 b106 3d50 07", - "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"}, - } - for i, tt := range tests { - var buf bytes.Buffer - in, err := hex.DecodeString(strings.Replace(tt.inHex, " ", "", -1)) - if err != nil { - t.Errorf("%d. hex input error: %v", i, err) - continue - } - if _, err := HuffmanDecode(&buf, in); err != nil { - t.Errorf("%d. decode error: %v", i, err) - continue - } - if got := buf.String(); tt.want != got { - t.Errorf("%d. decode = %q; want %q", i, got, tt.want) - } - } -} - -func TestAppendHuffmanString(t *testing.T) { - tests := []struct { - in, want string - }{ - {"www.example.com", "f1e3 c2e5 f23a 6ba0 ab90 f4ff"}, - {"no-cache", "a8eb 1064 9cbf"}, - {"custom-key", "25a8 49e9 5ba9 7d7f"}, - {"custom-value", "25a8 49e9 5bb8 e8b4 bf"}, - {"302", "6402"}, - {"private", "aec3 771a 4b"}, - {"Mon, 21 Oct 2013 20:13:21 GMT", "d07a be94 1054 d444 a820 0595 040b 8166 e082 a62d 1bff"}, - {"https://www.example.com", "9d29 ad17 1863 c78f 0b97 c8e9 ae82 ae43 d3"}, - {"gzip", "9bd9 ab"}, - {"foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1", - "94e7 821d d7f2 e6c7 b335 dfdf cd5b 3960 d5af 2708 7f36 72c1 ab27 0fb5 291f 9587 3160 65c0 03ed 4ee5 b106 3d50 07"}, - } - for i, tt := range tests { - buf := []byte{} - want := strings.Replace(tt.want, " ", "", -1) - buf = AppendHuffmanString(buf, tt.in) - if got := hex.EncodeToString(buf); want != got { - t.Errorf("%d. encode = %q; want %q", i, got, want) - } - } -} - -func TestReadVarInt(t *testing.T) { - type res struct { - i uint64 - consumed int - err error - } - tests := []struct { - n byte - p []byte - want res - }{ - // Fits in a byte: - {1, []byte{0}, res{0, 1, nil}}, - {2, []byte{2}, res{2, 1, nil}}, - {3, []byte{6}, res{6, 1, nil}}, - {4, []byte{14}, res{14, 1, nil}}, - {5, []byte{30}, res{30, 1, nil}}, - {6, []byte{62}, res{62, 1, nil}}, - {7, []byte{126}, res{126, 1, nil}}, - {8, []byte{254}, res{254, 1, nil}}, - - // Doesn't fit in a byte: - {1, []byte{1}, res{0, 0, errNeedMore}}, - {2, []byte{3}, res{0, 0, errNeedMore}}, - {3, []byte{7}, res{0, 0, errNeedMore}}, - {4, []byte{15}, res{0, 0, errNeedMore}}, - {5, []byte{31}, res{0, 0, errNeedMore}}, - {6, []byte{63}, res{0, 0, errNeedMore}}, - {7, []byte{127}, res{0, 0, errNeedMore}}, - {8, []byte{255}, res{0, 0, errNeedMore}}, - - // Ignoring top bits: - {5, []byte{255, 154, 10}, res{1337, 3, nil}}, // high dummy three bits: 111 - {5, []byte{159, 154, 10}, res{1337, 3, nil}}, // high dummy three bits: 100 - {5, []byte{191, 154, 10}, res{1337, 3, nil}}, // high dummy three bits: 101 - - // Extra byte: - {5, []byte{191, 154, 10, 2}, res{1337, 3, nil}}, // extra byte - - // Short a byte: - {5, []byte{191, 154}, res{0, 0, errNeedMore}}, - - // integer overflow: - {1, []byte{255, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128}, res{0, 0, errVarintOverflow}}, - } - for _, tt := range tests { - i, remain, err := readVarInt(tt.n, tt.p) - consumed := len(tt.p) - len(remain) - got := res{i, consumed, err} - if got != tt.want { - t.Errorf("readVarInt(%d, %v ~ %x) = %+v; want %+v", tt.n, tt.p, tt.p, got, tt.want) - } - } -} - -func dehex(s string) []byte { - s = strings.Replace(s, " ", "", -1) - s = strings.Replace(s, "\n", "", -1) - b, err := hex.DecodeString(s) - if err != nil { - panic(err) - } - return b -} diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/http2_test.go b/Godeps/_workspace/src/github.com/bradfitz/http2/http2_test.go deleted file mode 100644 index 6a1b7e8361..0000000000 --- a/Godeps/_workspace/src/github.com/bradfitz/http2/http2_test.go +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// See https://code.google.com/p/go/source/browse/CONTRIBUTORS -// Licensed under the same terms as Go itself: -// https://code.google.com/p/go/source/browse/LICENSE - -package http2 - -import ( - "bytes" - "errors" - "flag" - "fmt" - "net/http" - "os/exec" - "strconv" - "strings" - "testing" - - "github.com/bradfitz/http2/hpack" -) - -var knownFailing = flag.Bool("known_failing", false, "Run known-failing tests.") - -func condSkipFailingTest(t *testing.T) { - if !*knownFailing { - t.Skip("Skipping known-failing test without --known_failing") - } -} - -func init() { - DebugGoroutines = true - flag.BoolVar(&VerboseLogs, "verboseh2", false, "Verbose HTTP/2 debug logging") -} - -func TestSettingString(t *testing.T) { - tests := []struct { - s Setting - want string - }{ - {Setting{SettingMaxFrameSize, 123}, "[MAX_FRAME_SIZE = 123]"}, - {Setting{1<<16 - 1, 123}, "[UNKNOWN_SETTING_65535 = 123]"}, - } - for i, tt := range tests { - got := fmt.Sprint(tt.s) - if got != tt.want { - t.Errorf("%d. for %#v, string = %q; want %q", i, tt.s, got, tt.want) - } - } -} - -type twriter struct { - t testing.TB - st *serverTester // optional -} - -func (w twriter) Write(p []byte) (n int, err error) { - if w.st != nil { - ps := string(p) - for _, phrase := range w.st.logFilter { - if strings.Contains(ps, phrase) { - return len(p), nil // no logging - } - } - } - w.t.Logf("%s", p) - return len(p), nil -} - -// like encodeHeader, but don't add implicit psuedo headers. -func encodeHeaderNoImplicit(t *testing.T, headers ...string) []byte { - var buf bytes.Buffer - enc := hpack.NewEncoder(&buf) - for len(headers) > 0 { - k, v := headers[0], headers[1] - headers = headers[2:] - if err := enc.WriteField(hpack.HeaderField{Name: k, Value: v}); err != nil { - t.Fatalf("HPACK encoding error for %q/%q: %v", k, v, err) - } - } - return buf.Bytes() -} - -// Verify that curl has http2. -func requireCurl(t *testing.T) { - out, err := dockerLogs(curl(t, "--version")) - if err != nil { - t.Skipf("failed to determine curl features; skipping test") - } - if !strings.Contains(string(out), "HTTP2") { - t.Skip("curl doesn't support HTTP2; skipping test") - } -} - -func curl(t *testing.T, args ...string) (container string) { - out, err := exec.Command("docker", append([]string{"run", "-d", "--net=host", "gohttp2/curl"}, args...)...).CombinedOutput() - if err != nil { - t.Skipf("Failed to run curl in docker: %v, %s", err, out) - } - return strings.TrimSpace(string(out)) -} - -type puppetCommand struct { - fn func(w http.ResponseWriter, r *http.Request) - done chan<- bool -} - -type handlerPuppet struct { - ch chan puppetCommand -} - -func newHandlerPuppet() *handlerPuppet { - return &handlerPuppet{ - ch: make(chan puppetCommand), - } -} - -func (p *handlerPuppet) act(w http.ResponseWriter, r *http.Request) { - for cmd := range p.ch { - cmd.fn(w, r) - cmd.done <- true - } -} - -func (p *handlerPuppet) done() { close(p.ch) } -func (p *handlerPuppet) do(fn func(http.ResponseWriter, *http.Request)) { - done := make(chan bool) - p.ch <- puppetCommand{fn, done} - <-done -} -func dockerLogs(container string) ([]byte, error) { - out, err := exec.Command("docker", "wait", container).CombinedOutput() - if err != nil { - return out, err - } - exitStatus, err := strconv.Atoi(strings.TrimSpace(string(out))) - if err != nil { - return out, errors.New("unexpected exit status from docker wait") - } - out, err = exec.Command("docker", "logs", container).CombinedOutput() - exec.Command("docker", "rm", container).Run() - if err == nil && exitStatus != 0 { - err = fmt.Errorf("exit status %d: %s", exitStatus, out) - } - return out, err -} - -func kill(container string) { - exec.Command("docker", "kill", container).Run() - exec.Command("docker", "rm", container).Run() -} diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/pipe_test.go b/Godeps/_workspace/src/github.com/bradfitz/http2/pipe_test.go deleted file mode 100644 index 10c4c327c4..0000000000 --- a/Godeps/_workspace/src/github.com/bradfitz/http2/pipe_test.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2014 The Go Authors. -// See https://code.google.com/p/go/source/browse/CONTRIBUTORS -// Licensed under the same terms as Go itself: -// https://code.google.com/p/go/source/browse/LICENSE - -package http2 - -import ( - "errors" - "testing" -) - -func TestPipeClose(t *testing.T) { - var p pipe - p.c.L = &p.m - a := errors.New("a") - b := errors.New("b") - p.Close(a) - p.Close(b) - _, err := p.Read(make([]byte, 1)) - if err != a { - t.Errorf("err = %v want %v", err, a) - } -} diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/priority_test.go b/Godeps/_workspace/src/github.com/bradfitz/http2/priority_test.go deleted file mode 100644 index d9648fd328..0000000000 --- a/Godeps/_workspace/src/github.com/bradfitz/http2/priority_test.go +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// See https://code.google.com/p/go/source/browse/CONTRIBUTORS -// Licensed under the same terms as Go itself: -// https://code.google.com/p/go/source/browse/LICENSE - -package http2 - -import ( - "testing" -) - -func TestPriority(t *testing.T) { - // A -> B - // move A's parent to B - streams := make(map[uint32]*stream) - a := &stream{ - parent: nil, - weight: 16, - } - streams[1] = a - b := &stream{ - parent: a, - weight: 16, - } - streams[2] = b - adjustStreamPriority(streams, 1, PriorityParam{ - Weight: 20, - StreamDep: 2, - }) - if a.parent != b { - t.Errorf("Expected A's parent to be B") - } - if a.weight != 20 { - t.Errorf("Expected A's weight to be 20; got %d", a.weight) - } - if b.parent != nil { - t.Errorf("Expected B to have no parent") - } - if b.weight != 16 { - t.Errorf("Expected B's weight to be 16; got %d", b.weight) - } -} - -func TestPriorityExclusiveZero(t *testing.T) { - // A B and C are all children of the 0 stream. - // Exclusive reprioritization to any of the streams - // should bring the rest of the streams under the - // reprioritized stream - streams := make(map[uint32]*stream) - a := &stream{ - parent: nil, - weight: 16, - } - streams[1] = a - b := &stream{ - parent: nil, - weight: 16, - } - streams[2] = b - c := &stream{ - parent: nil, - weight: 16, - } - streams[3] = c - adjustStreamPriority(streams, 3, PriorityParam{ - Weight: 20, - StreamDep: 0, - Exclusive: true, - }) - if a.parent != c { - t.Errorf("Expected A's parent to be C") - } - if a.weight != 16 { - t.Errorf("Expected A's weight to be 16; got %d", a.weight) - } - if b.parent != c { - t.Errorf("Expected B's parent to be C") - } - if b.weight != 16 { - t.Errorf("Expected B's weight to be 16; got %d", b.weight) - } - if c.parent != nil { - t.Errorf("Expected C to have no parent") - } - if c.weight != 20 { - t.Errorf("Expected C's weight to be 20; got %d", b.weight) - } -} - -func TestPriorityOwnParent(t *testing.T) { - streams := make(map[uint32]*stream) - a := &stream{ - parent: nil, - weight: 16, - } - streams[1] = a - b := &stream{ - parent: a, - weight: 16, - } - streams[2] = b - adjustStreamPriority(streams, 1, PriorityParam{ - Weight: 20, - StreamDep: 1, - }) - if a.parent != nil { - t.Errorf("Expected A's parent to be nil") - } - if a.weight != 20 { - t.Errorf("Expected A's weight to be 20; got %d", a.weight) - } - if b.parent != a { - t.Errorf("Expected B's parent to be A") - } - if b.weight != 16 { - t.Errorf("Expected B's weight to be 16; got %d", b.weight) - } - -} diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/server_test.go b/Godeps/_workspace/src/github.com/bradfitz/http2/server_test.go deleted file mode 100644 index 1133223b85..0000000000 --- a/Godeps/_workspace/src/github.com/bradfitz/http2/server_test.go +++ /dev/null @@ -1,2252 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// See https://code.google.com/p/go/source/browse/CONTRIBUTORS -// Licensed under the same terms as Go itself: -// https://code.google.com/p/go/source/browse/LICENSE - -package http2 - -import ( - "bytes" - "crypto/tls" - "errors" - "flag" - "fmt" - "io" - "io/ioutil" - "log" - "net" - "net/http" - "net/http/httptest" - "os" - "reflect" - "runtime" - "strconv" - "strings" - "sync" - "sync/atomic" - "testing" - "time" - - "github.com/bradfitz/http2/hpack" -) - -var stderrVerbose = flag.Bool("stderr_verbose", false, "Mirror verbosity to stderr, unbuffered") - -type serverTester struct { - cc net.Conn // client conn - t testing.TB - ts *httptest.Server - fr *Framer - logBuf *bytes.Buffer - logFilter []string // substrings to filter out - scMu sync.Mutex // guards sc - sc *serverConn - - // writing headers: - headerBuf bytes.Buffer - hpackEnc *hpack.Encoder - - // reading frames: - frc chan Frame - frErrc chan error - readTimer *time.Timer -} - -func init() { - testHookOnPanicMu = new(sync.Mutex) -} - -func resetHooks() { - testHookOnPanicMu.Lock() - testHookOnPanic = nil - testHookOnPanicMu.Unlock() -} - -type serverTesterOpt string - -var optOnlyServer = serverTesterOpt("only_server") - -func newServerTester(t testing.TB, handler http.HandlerFunc, opts ...interface{}) *serverTester { - resetHooks() - - logBuf := new(bytes.Buffer) - ts := httptest.NewUnstartedServer(handler) - - tlsConfig := &tls.Config{ - InsecureSkipVerify: true, - // The h2-14 is temporary, until curl is updated. (as used by unit tests - // in Docker) - NextProtos: []string{NextProtoTLS, "h2-14"}, - } - - onlyServer := false - for _, opt := range opts { - switch v := opt.(type) { - case func(*tls.Config): - v(tlsConfig) - case func(*httptest.Server): - v(ts) - case serverTesterOpt: - onlyServer = (v == optOnlyServer) - default: - t.Fatalf("unknown newServerTester option type %T", v) - } - } - - ConfigureServer(ts.Config, &Server{}) - - st := &serverTester{ - t: t, - ts: ts, - logBuf: logBuf, - frc: make(chan Frame, 1), - frErrc: make(chan error, 1), - } - st.hpackEnc = hpack.NewEncoder(&st.headerBuf) - - var stderrv io.Writer = ioutil.Discard - if *stderrVerbose { - stderrv = os.Stderr - } - - ts.TLS = ts.Config.TLSConfig // the httptest.Server has its own copy of this TLS config - ts.Config.ErrorLog = log.New(io.MultiWriter(stderrv, twriter{t: t, st: st}, logBuf), "", log.LstdFlags) - ts.StartTLS() - - if VerboseLogs { - t.Logf("Running test server at: %s", ts.URL) - } - testHookGetServerConn = func(v *serverConn) { - st.scMu.Lock() - defer st.scMu.Unlock() - st.sc = v - st.sc.testHookCh = make(chan func()) - } - log.SetOutput(io.MultiWriter(stderrv, twriter{t: t, st: st})) - if !onlyServer { - cc, err := tls.Dial("tcp", ts.Listener.Addr().String(), tlsConfig) - if err != nil { - t.Fatal(err) - } - st.cc = cc - st.fr = NewFramer(cc, cc) - } - - return st -} - -func (st *serverTester) closeConn() { - st.scMu.Lock() - defer st.scMu.Unlock() - st.sc.conn.Close() -} - -func (st *serverTester) addLogFilter(phrase string) { - st.logFilter = append(st.logFilter, phrase) -} - -func (st *serverTester) stream(id uint32) *stream { - ch := make(chan *stream, 1) - st.sc.testHookCh <- func() { - ch <- st.sc.streams[id] - } - return <-ch -} - -func (st *serverTester) streamState(id uint32) streamState { - ch := make(chan streamState, 1) - st.sc.testHookCh <- func() { - state, _ := st.sc.state(id) - ch <- state - } - return <-ch -} - -func (st *serverTester) Close() { - st.ts.Close() - if st.cc != nil { - st.cc.Close() - } - log.SetOutput(os.Stderr) -} - -// greet initiates the client's HTTP/2 connection into a state where -// frames may be sent. -func (st *serverTester) greet() { - st.writePreface() - st.writeInitialSettings() - st.wantSettings() - st.writeSettingsAck() - st.wantSettingsAck() -} - -func (st *serverTester) writePreface() { - n, err := st.cc.Write(clientPreface) - if err != nil { - st.t.Fatalf("Error writing client preface: %v", err) - } - if n != len(clientPreface) { - st.t.Fatalf("Writing client preface, wrote %d bytes; want %d", n, len(clientPreface)) - } -} - -func (st *serverTester) writeInitialSettings() { - if err := st.fr.WriteSettings(); err != nil { - st.t.Fatalf("Error writing initial SETTINGS frame from client to server: %v", err) - } -} - -func (st *serverTester) writeSettingsAck() { - if err := st.fr.WriteSettingsAck(); err != nil { - st.t.Fatalf("Error writing ACK of server's SETTINGS: %v", err) - } -} - -func (st *serverTester) writeHeaders(p HeadersFrameParam) { - if err := st.fr.WriteHeaders(p); err != nil { - st.t.Fatalf("Error writing HEADERS: %v", err) - } -} - -func (st *serverTester) encodeHeaderField(k, v string) { - err := st.hpackEnc.WriteField(hpack.HeaderField{Name: k, Value: v}) - if err != nil { - st.t.Fatalf("HPACK encoding error for %q/%q: %v", k, v, err) - } -} - -// encodeHeader encodes headers and returns their HPACK bytes. headers -// must contain an even number of key/value pairs. There may be -// multiple pairs for keys (e.g. "cookie"). The :method, :path, and -// :scheme headers default to GET, / and https. -func (st *serverTester) encodeHeader(headers ...string) []byte { - if len(headers)%2 == 1 { - panic("odd number of kv args") - } - - st.headerBuf.Reset() - - if len(headers) == 0 { - // Fast path, mostly for benchmarks, so test code doesn't pollute - // profiles when we're looking to improve server allocations. - st.encodeHeaderField(":method", "GET") - st.encodeHeaderField(":path", "/") - st.encodeHeaderField(":scheme", "https") - return st.headerBuf.Bytes() - } - - if len(headers) == 2 && headers[0] == ":method" { - // Another fast path for benchmarks. - st.encodeHeaderField(":method", headers[1]) - st.encodeHeaderField(":path", "/") - st.encodeHeaderField(":scheme", "https") - return st.headerBuf.Bytes() - } - - pseudoCount := map[string]int{} - keys := []string{":method", ":path", ":scheme"} - vals := map[string][]string{ - ":method": {"GET"}, - ":path": {"/"}, - ":scheme": {"https"}, - } - for len(headers) > 0 { - k, v := headers[0], headers[1] - headers = headers[2:] - if _, ok := vals[k]; !ok { - keys = append(keys, k) - } - if strings.HasPrefix(k, ":") { - pseudoCount[k]++ - if pseudoCount[k] == 1 { - vals[k] = []string{v} - } else { - // Allows testing of invalid headers w/ dup pseudo fields. - vals[k] = append(vals[k], v) - } - } else { - vals[k] = append(vals[k], v) - } - } - st.headerBuf.Reset() - for _, k := range keys { - for _, v := range vals[k] { - st.encodeHeaderField(k, v) - } - } - return st.headerBuf.Bytes() -} - -// bodylessReq1 writes a HEADERS frames with StreamID 1 and EndStream and EndHeaders set. -func (st *serverTester) bodylessReq1(headers ...string) { - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, // clients send odd numbers - BlockFragment: st.encodeHeader(headers...), - EndStream: true, - EndHeaders: true, - }) -} - -func (st *serverTester) writeData(streamID uint32, endStream bool, data []byte) { - if err := st.fr.WriteData(streamID, endStream, data); err != nil { - st.t.Fatalf("Error writing DATA: %v", err) - } -} - -func (st *serverTester) readFrame() (Frame, error) { - go func() { - fr, err := st.fr.ReadFrame() - if err != nil { - st.frErrc <- err - } else { - st.frc <- fr - } - }() - t := st.readTimer - if t == nil { - t = time.NewTimer(2 * time.Second) - st.readTimer = t - } - t.Reset(2 * time.Second) - defer t.Stop() - select { - case f := <-st.frc: - return f, nil - case err := <-st.frErrc: - return nil, err - case <-t.C: - return nil, errors.New("timeout waiting for frame") - } -} - -func (st *serverTester) wantHeaders() *HeadersFrame { - f, err := st.readFrame() - if err != nil { - st.t.Fatalf("Error while expecting a HEADERS frame: %v", err) - } - hf, ok := f.(*HeadersFrame) - if !ok { - st.t.Fatalf("got a %T; want *HeadersFrame", f) - } - return hf -} - -func (st *serverTester) wantContinuation() *ContinuationFrame { - f, err := st.readFrame() - if err != nil { - st.t.Fatalf("Error while expecting a CONTINUATION frame: %v", err) - } - cf, ok := f.(*ContinuationFrame) - if !ok { - st.t.Fatalf("got a %T; want *ContinuationFrame", f) - } - return cf -} - -func (st *serverTester) wantData() *DataFrame { - f, err := st.readFrame() - if err != nil { - st.t.Fatalf("Error while expecting a DATA frame: %v", err) - } - df, ok := f.(*DataFrame) - if !ok { - st.t.Fatalf("got a %T; want *DataFrame", f) - } - return df -} - -func (st *serverTester) wantSettings() *SettingsFrame { - f, err := st.readFrame() - if err != nil { - st.t.Fatalf("Error while expecting a SETTINGS frame: %v", err) - } - sf, ok := f.(*SettingsFrame) - if !ok { - st.t.Fatalf("got a %T; want *SettingsFrame", f) - } - return sf -} - -func (st *serverTester) wantPing() *PingFrame { - f, err := st.readFrame() - if err != nil { - st.t.Fatalf("Error while expecting a PING frame: %v", err) - } - pf, ok := f.(*PingFrame) - if !ok { - st.t.Fatalf("got a %T; want *PingFrame", f) - } - return pf -} - -func (st *serverTester) wantGoAway() *GoAwayFrame { - f, err := st.readFrame() - if err != nil { - st.t.Fatalf("Error while expecting a GOAWAY frame: %v", err) - } - gf, ok := f.(*GoAwayFrame) - if !ok { - st.t.Fatalf("got a %T; want *GoAwayFrame", f) - } - return gf -} - -func (st *serverTester) wantRSTStream(streamID uint32, errCode ErrCode) { - f, err := st.readFrame() - if err != nil { - st.t.Fatalf("Error while expecting an RSTStream frame: %v", err) - } - rs, ok := f.(*RSTStreamFrame) - if !ok { - st.t.Fatalf("got a %T; want *RSTStreamFrame", f) - } - if rs.FrameHeader.StreamID != streamID { - st.t.Fatalf("RSTStream StreamID = %d; want %d", rs.FrameHeader.StreamID, streamID) - } - if rs.ErrCode != errCode { - st.t.Fatalf("RSTStream ErrCode = %d (%s); want %d (%s)", rs.ErrCode, rs.ErrCode, errCode, errCode) - } -} - -func (st *serverTester) wantWindowUpdate(streamID, incr uint32) { - f, err := st.readFrame() - if err != nil { - st.t.Fatalf("Error while expecting a WINDOW_UPDATE frame: %v", err) - } - wu, ok := f.(*WindowUpdateFrame) - if !ok { - st.t.Fatalf("got a %T; want *WindowUpdateFrame", f) - } - if wu.FrameHeader.StreamID != streamID { - st.t.Fatalf("WindowUpdate StreamID = %d; want %d", wu.FrameHeader.StreamID, streamID) - } - if wu.Increment != incr { - st.t.Fatalf("WindowUpdate increment = %d; want %d", wu.Increment, incr) - } -} - -func (st *serverTester) wantSettingsAck() { - f, err := st.readFrame() - if err != nil { - st.t.Fatal(err) - } - sf, ok := f.(*SettingsFrame) - if !ok { - st.t.Fatalf("Wanting a settings ACK, received a %T", f) - } - if !sf.Header().Flags.Has(FlagSettingsAck) { - st.t.Fatal("Settings Frame didn't have ACK set") - } - -} - -func TestServer(t *testing.T) { - gotReq := make(chan bool, 1) - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Foo", "Bar") - gotReq <- true - }) - defer st.Close() - - covers("3.5", ` - The server connection preface consists of a potentially empty - SETTINGS frame ([SETTINGS]) that MUST be the first frame the - server sends in the HTTP/2 connection. - `) - - st.writePreface() - st.writeInitialSettings() - st.wantSettings() - st.writeSettingsAck() - st.wantSettingsAck() - - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, // clients send odd numbers - BlockFragment: st.encodeHeader(), - EndStream: true, // no DATA frames - EndHeaders: true, - }) - - select { - case <-gotReq: - case <-time.After(2 * time.Second): - t.Error("timeout waiting for request") - } -} - -func TestServer_Request_Get(t *testing.T) { - testServerRequest(t, func(st *serverTester) { - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, // clients send odd numbers - BlockFragment: st.encodeHeader("foo-bar", "some-value"), - EndStream: true, // no DATA frames - EndHeaders: true, - }) - }, func(r *http.Request) { - if r.Method != "GET" { - t.Errorf("Method = %q; want GET", r.Method) - } - if r.URL.Path != "/" { - t.Errorf("URL.Path = %q; want /", r.URL.Path) - } - if r.ContentLength != 0 { - t.Errorf("ContentLength = %v; want 0", r.ContentLength) - } - if r.Close { - t.Error("Close = true; want false") - } - if !strings.Contains(r.RemoteAddr, ":") { - t.Errorf("RemoteAddr = %q; want something with a colon", r.RemoteAddr) - } - if r.Proto != "HTTP/2.0" || r.ProtoMajor != 2 || r.ProtoMinor != 0 { - t.Errorf("Proto = %q Major=%v,Minor=%v; want HTTP/2.0", r.Proto, r.ProtoMajor, r.ProtoMinor) - } - wantHeader := http.Header{ - "Foo-Bar": []string{"some-value"}, - } - if !reflect.DeepEqual(r.Header, wantHeader) { - t.Errorf("Header = %#v; want %#v", r.Header, wantHeader) - } - if n, err := r.Body.Read([]byte(" ")); err != io.EOF || n != 0 { - t.Errorf("Read = %d, %v; want 0, EOF", n, err) - } - }) -} - -func TestServer_Request_Get_PathSlashes(t *testing.T) { - testServerRequest(t, func(st *serverTester) { - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, // clients send odd numbers - BlockFragment: st.encodeHeader(":path", "/%2f/"), - EndStream: true, // no DATA frames - EndHeaders: true, - }) - }, func(r *http.Request) { - if r.RequestURI != "/%2f/" { - t.Errorf("RequestURI = %q; want /%%2f/", r.RequestURI) - } - if r.URL.Path != "///" { - t.Errorf("URL.Path = %q; want ///", r.URL.Path) - } - }) -} - -// TODO: add a test with EndStream=true on the HEADERS but setting a -// Content-Length anyway. Should we just omit it and force it to -// zero? - -func TestServer_Request_Post_NoContentLength_EndStream(t *testing.T) { - testServerRequest(t, func(st *serverTester) { - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, // clients send odd numbers - BlockFragment: st.encodeHeader(":method", "POST"), - EndStream: true, - EndHeaders: true, - }) - }, func(r *http.Request) { - if r.Method != "POST" { - t.Errorf("Method = %q; want POST", r.Method) - } - if r.ContentLength != 0 { - t.Errorf("ContentLength = %v; want 0", r.ContentLength) - } - if n, err := r.Body.Read([]byte(" ")); err != io.EOF || n != 0 { - t.Errorf("Read = %d, %v; want 0, EOF", n, err) - } - }) -} - -func TestServer_Request_Post_Body_ImmediateEOF(t *testing.T) { - testBodyContents(t, -1, "", func(st *serverTester) { - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, // clients send odd numbers - BlockFragment: st.encodeHeader(":method", "POST"), - EndStream: false, // to say DATA frames are coming - EndHeaders: true, - }) - st.writeData(1, true, nil) // just kidding. empty body. - }) -} - -func TestServer_Request_Post_Body_OneData(t *testing.T) { - const content = "Some content" - testBodyContents(t, -1, content, func(st *serverTester) { - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, // clients send odd numbers - BlockFragment: st.encodeHeader(":method", "POST"), - EndStream: false, // to say DATA frames are coming - EndHeaders: true, - }) - st.writeData(1, true, []byte(content)) - }) -} - -func TestServer_Request_Post_Body_TwoData(t *testing.T) { - const content = "Some content" - testBodyContents(t, -1, content, func(st *serverTester) { - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, // clients send odd numbers - BlockFragment: st.encodeHeader(":method", "POST"), - EndStream: false, // to say DATA frames are coming - EndHeaders: true, - }) - st.writeData(1, false, []byte(content[:5])) - st.writeData(1, true, []byte(content[5:])) - }) -} - -func TestServer_Request_Post_Body_ContentLength_Correct(t *testing.T) { - const content = "Some content" - testBodyContents(t, int64(len(content)), content, func(st *serverTester) { - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, // clients send odd numbers - BlockFragment: st.encodeHeader( - ":method", "POST", - "content-length", strconv.Itoa(len(content)), - ), - EndStream: false, // to say DATA frames are coming - EndHeaders: true, - }) - st.writeData(1, true, []byte(content)) - }) -} - -func TestServer_Request_Post_Body_ContentLength_TooLarge(t *testing.T) { - testBodyContentsFail(t, 3, "request declared a Content-Length of 3 but only wrote 2 bytes", - func(st *serverTester) { - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, // clients send odd numbers - BlockFragment: st.encodeHeader( - ":method", "POST", - "content-length", "3", - ), - EndStream: false, // to say DATA frames are coming - EndHeaders: true, - }) - st.writeData(1, true, []byte("12")) - }) -} - -func TestServer_Request_Post_Body_ContentLength_TooSmall(t *testing.T) { - testBodyContentsFail(t, 4, "sender tried to send more than declared Content-Length of 4 bytes", - func(st *serverTester) { - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, // clients send odd numbers - BlockFragment: st.encodeHeader( - ":method", "POST", - "content-length", "4", - ), - EndStream: false, // to say DATA frames are coming - EndHeaders: true, - }) - st.writeData(1, true, []byte("12345")) - }) -} - -func testBodyContents(t *testing.T, wantContentLength int64, wantBody string, write func(st *serverTester)) { - testServerRequest(t, write, func(r *http.Request) { - if r.Method != "POST" { - t.Errorf("Method = %q; want POST", r.Method) - } - if r.ContentLength != wantContentLength { - t.Errorf("ContentLength = %v; want %d", r.ContentLength, wantContentLength) - } - all, err := ioutil.ReadAll(r.Body) - if err != nil { - t.Fatal(err) - } - if string(all) != wantBody { - t.Errorf("Read = %q; want %q", all, wantBody) - } - if err := r.Body.Close(); err != nil { - t.Fatalf("Close: %v", err) - } - }) -} - -func testBodyContentsFail(t *testing.T, wantContentLength int64, wantReadError string, write func(st *serverTester)) { - testServerRequest(t, write, func(r *http.Request) { - if r.Method != "POST" { - t.Errorf("Method = %q; want POST", r.Method) - } - if r.ContentLength != wantContentLength { - t.Errorf("ContentLength = %v; want %d", r.ContentLength, wantContentLength) - } - all, err := ioutil.ReadAll(r.Body) - if err == nil { - t.Fatalf("expected an error (%q) reading from the body. Successfully read %q instead.", - wantReadError, all) - } - if !strings.Contains(err.Error(), wantReadError) { - t.Fatalf("Body.Read = %v; want substring %q", err, wantReadError) - } - if err := r.Body.Close(); err != nil { - t.Fatalf("Close: %v", err) - } - }) -} - -// Using a Host header, instead of :authority -func TestServer_Request_Get_Host(t *testing.T) { - const host = "example.com" - testServerRequest(t, func(st *serverTester) { - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, // clients send odd numbers - BlockFragment: st.encodeHeader("host", host), - EndStream: true, - EndHeaders: true, - }) - }, func(r *http.Request) { - if r.Host != host { - t.Errorf("Host = %q; want %q", r.Host, host) - } - }) -} - -// Using an :authority pseudo-header, instead of Host -func TestServer_Request_Get_Authority(t *testing.T) { - const host = "example.com" - testServerRequest(t, func(st *serverTester) { - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, // clients send odd numbers - BlockFragment: st.encodeHeader(":authority", host), - EndStream: true, - EndHeaders: true, - }) - }, func(r *http.Request) { - if r.Host != host { - t.Errorf("Host = %q; want %q", r.Host, host) - } - }) -} - -func TestServer_Request_WithContinuation(t *testing.T) { - wantHeader := http.Header{ - "Foo-One": []string{"value-one"}, - "Foo-Two": []string{"value-two"}, - "Foo-Three": []string{"value-three"}, - } - testServerRequest(t, func(st *serverTester) { - fullHeaders := st.encodeHeader( - "foo-one", "value-one", - "foo-two", "value-two", - "foo-three", "value-three", - ) - remain := fullHeaders - chunks := 0 - for len(remain) > 0 { - const maxChunkSize = 5 - chunk := remain - if len(chunk) > maxChunkSize { - chunk = chunk[:maxChunkSize] - } - remain = remain[len(chunk):] - - if chunks == 0 { - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, // clients send odd numbers - BlockFragment: chunk, - EndStream: true, // no DATA frames - EndHeaders: false, // we'll have continuation frames - }) - } else { - err := st.fr.WriteContinuation(1, len(remain) == 0, chunk) - if err != nil { - t.Fatal(err) - } - } - chunks++ - } - if chunks < 2 { - t.Fatal("too few chunks") - } - }, func(r *http.Request) { - if !reflect.DeepEqual(r.Header, wantHeader) { - t.Errorf("Header = %#v; want %#v", r.Header, wantHeader) - } - }) -} - -// Concatenated cookie headers. ("8.1.2.5 Compressing the Cookie Header Field") -func TestServer_Request_CookieConcat(t *testing.T) { - const host = "example.com" - testServerRequest(t, func(st *serverTester) { - st.bodylessReq1( - ":authority", host, - "cookie", "a=b", - "cookie", "c=d", - "cookie", "e=f", - ) - }, func(r *http.Request) { - const want = "a=b; c=d; e=f" - if got := r.Header.Get("Cookie"); got != want { - t.Errorf("Cookie = %q; want %q", got, want) - } - }) -} - -func TestServer_Request_Reject_CapitalHeader(t *testing.T) { - testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("UPPER", "v") }) -} - -func TestServer_Request_Reject_Pseudo_Missing_method(t *testing.T) { - testRejectRequest(t, func(st *serverTester) { st.bodylessReq1(":method", "") }) -} - -func TestServer_Request_Reject_Pseudo_ExactlyOne(t *testing.T) { - // 8.1.2.3 Request Pseudo-Header Fields - // "All HTTP/2 requests MUST include exactly one valid value" ... - testRejectRequest(t, func(st *serverTester) { - st.addLogFilter("duplicate pseudo-header") - st.bodylessReq1(":method", "GET", ":method", "POST") - }) -} - -func TestServer_Request_Reject_Pseudo_AfterRegular(t *testing.T) { - // 8.1.2.3 Request Pseudo-Header Fields - // "All pseudo-header fields MUST appear in the header block - // before regular header fields. Any request or response that - // contains a pseudo-header field that appears in a header - // block after a regular header field MUST be treated as - // malformed (Section 8.1.2.6)." - testRejectRequest(t, func(st *serverTester) { - st.addLogFilter("pseudo-header after regular header") - var buf bytes.Buffer - enc := hpack.NewEncoder(&buf) - enc.WriteField(hpack.HeaderField{Name: ":method", Value: "GET"}) - enc.WriteField(hpack.HeaderField{Name: "regular", Value: "foobar"}) - enc.WriteField(hpack.HeaderField{Name: ":path", Value: "/"}) - enc.WriteField(hpack.HeaderField{Name: ":scheme", Value: "https"}) - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, // clients send odd numbers - BlockFragment: buf.Bytes(), - EndStream: true, - EndHeaders: true, - }) - }) -} - -func TestServer_Request_Reject_Pseudo_Missing_path(t *testing.T) { - testRejectRequest(t, func(st *serverTester) { st.bodylessReq1(":path", "") }) -} - -func TestServer_Request_Reject_Pseudo_Missing_scheme(t *testing.T) { - testRejectRequest(t, func(st *serverTester) { st.bodylessReq1(":scheme", "") }) -} - -func TestServer_Request_Reject_Pseudo_scheme_invalid(t *testing.T) { - testRejectRequest(t, func(st *serverTester) { st.bodylessReq1(":scheme", "bogus") }) -} - -func TestServer_Request_Reject_Pseudo_Unknown(t *testing.T) { - testRejectRequest(t, func(st *serverTester) { - st.addLogFilter(`invalid pseudo-header ":unknown_thing"`) - st.bodylessReq1(":unknown_thing", "") - }) -} - -func testRejectRequest(t *testing.T, send func(*serverTester)) { - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { - t.Fatal("server request made it to handler; should've been rejected") - }) - defer st.Close() - - st.greet() - send(st) - st.wantRSTStream(1, ErrCodeProtocol) -} - -func TestServer_Ping(t *testing.T) { - st := newServerTester(t, nil) - defer st.Close() - st.greet() - - // Server should ignore this one, since it has ACK set. - ackPingData := [8]byte{1, 2, 4, 8, 16, 32, 64, 128} - if err := st.fr.WritePing(true, ackPingData); err != nil { - t.Fatal(err) - } - - // But the server should reply to this one, since ACK is false. - pingData := [8]byte{1, 2, 3, 4, 5, 6, 7, 8} - if err := st.fr.WritePing(false, pingData); err != nil { - t.Fatal(err) - } - - pf := st.wantPing() - if !pf.Flags.Has(FlagPingAck) { - t.Error("response ping doesn't have ACK set") - } - if pf.Data != pingData { - t.Errorf("response ping has data %q; want %q", pf.Data, pingData) - } -} - -func TestServer_RejectsLargeFrames(t *testing.T) { - st := newServerTester(t, nil) - defer st.Close() - st.greet() - - // Write too large of a frame (too large by one byte) - // We ignore the return value because it's expected that the server - // will only read the first 9 bytes (the headre) and then disconnect. - st.fr.WriteRawFrame(0xff, 0, 0, make([]byte, defaultMaxReadFrameSize+1)) - - gf := st.wantGoAway() - if gf.ErrCode != ErrCodeFrameSize { - t.Errorf("GOAWAY err = %v; want %v", gf.ErrCode, ErrCodeFrameSize) - } -} - -func TestServer_Handler_Sends_WindowUpdate(t *testing.T) { - puppet := newHandlerPuppet() - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { - puppet.act(w, r) - }) - defer st.Close() - defer puppet.done() - - st.greet() - - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, // clients send odd numbers - BlockFragment: st.encodeHeader(":method", "POST"), - EndStream: false, // data coming - EndHeaders: true, - }) - st.writeData(1, false, []byte("abcdef")) - puppet.do(readBodyHandler(t, "abc")) - st.wantWindowUpdate(0, 3) - st.wantWindowUpdate(1, 3) - - puppet.do(readBodyHandler(t, "def")) - st.wantWindowUpdate(0, 3) - st.wantWindowUpdate(1, 3) - - st.writeData(1, true, []byte("ghijkl")) // END_STREAM here - puppet.do(readBodyHandler(t, "ghi")) - puppet.do(readBodyHandler(t, "jkl")) - st.wantWindowUpdate(0, 3) - st.wantWindowUpdate(0, 3) // no more stream-level, since END_STREAM -} - -func TestServer_Send_GoAway_After_Bogus_WindowUpdate(t *testing.T) { - st := newServerTester(t, nil) - defer st.Close() - st.greet() - if err := st.fr.WriteWindowUpdate(0, 1<<31-1); err != nil { - t.Fatal(err) - } - gf := st.wantGoAway() - if gf.ErrCode != ErrCodeFlowControl { - t.Errorf("GOAWAY err = %v; want %v", gf.ErrCode, ErrCodeFlowControl) - } - if gf.LastStreamID != 0 { - t.Errorf("GOAWAY last stream ID = %v; want %v", gf.LastStreamID, 0) - } -} - -func TestServer_Send_RstStream_After_Bogus_WindowUpdate(t *testing.T) { - inHandler := make(chan bool) - blockHandler := make(chan bool) - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { - inHandler <- true - <-blockHandler - }) - defer st.Close() - defer close(blockHandler) - st.greet() - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, - BlockFragment: st.encodeHeader(":method", "POST"), - EndStream: false, // keep it open - EndHeaders: true, - }) - <-inHandler - // Send a bogus window update: - if err := st.fr.WriteWindowUpdate(1, 1<<31-1); err != nil { - t.Fatal(err) - } - st.wantRSTStream(1, ErrCodeFlowControl) -} - -// testServerPostUnblock sends a hanging POST with unsent data to handler, -// then runs fn once in the handler, and verifies that the error returned from -// handler is acceptable. It fails if takes over 5 seconds for handler to exit. -func testServerPostUnblock(t *testing.T, - handler func(http.ResponseWriter, *http.Request) error, - fn func(*serverTester), - checkErr func(error), - otherHeaders ...string) { - inHandler := make(chan bool) - errc := make(chan error, 1) - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { - inHandler <- true - errc <- handler(w, r) - }) - st.greet() - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, - BlockFragment: st.encodeHeader(append([]string{":method", "POST"}, otherHeaders...)...), - EndStream: false, // keep it open - EndHeaders: true, - }) - <-inHandler - fn(st) - select { - case err := <-errc: - if checkErr != nil { - checkErr(err) - } - case <-time.After(5 * time.Second): - t.Fatal("timeout waiting for Handler to return") - } - st.Close() -} - -func TestServer_RSTStream_Unblocks_Read(t *testing.T) { - testServerPostUnblock(t, - func(w http.ResponseWriter, r *http.Request) (err error) { - _, err = r.Body.Read(make([]byte, 1)) - return - }, - func(st *serverTester) { - if err := st.fr.WriteRSTStream(1, ErrCodeCancel); err != nil { - t.Fatal(err) - } - }, - func(err error) { - if err == nil { - t.Error("unexpected nil error from Request.Body.Read") - } - }, - ) -} - -func TestServer_DeadConn_Unblocks_Read(t *testing.T) { - testServerPostUnblock(t, - func(w http.ResponseWriter, r *http.Request) (err error) { - _, err = r.Body.Read(make([]byte, 1)) - return - }, - func(st *serverTester) { st.cc.Close() }, - func(err error) { - if err == nil { - t.Error("unexpected nil error from Request.Body.Read") - } - }, - ) -} - -var blockUntilClosed = func(w http.ResponseWriter, r *http.Request) error { - <-w.(http.CloseNotifier).CloseNotify() - return nil -} - -func TestServer_CloseNotify_After_RSTStream(t *testing.T) { - testServerPostUnblock(t, blockUntilClosed, func(st *serverTester) { - if err := st.fr.WriteRSTStream(1, ErrCodeCancel); err != nil { - t.Fatal(err) - } - }, nil) -} - -func TestServer_CloseNotify_After_ConnClose(t *testing.T) { - testServerPostUnblock(t, blockUntilClosed, func(st *serverTester) { st.cc.Close() }, nil) -} - -// that CloseNotify unblocks after a stream error due to the client's -// problem that's unrelated to them explicitly canceling it (which is -// TestServer_CloseNotify_After_RSTStream above) -func TestServer_CloseNotify_After_StreamError(t *testing.T) { - testServerPostUnblock(t, blockUntilClosed, func(st *serverTester) { - // data longer than declared Content-Length => stream error - st.writeData(1, true, []byte("1234")) - }, nil, "content-length", "3") -} - -func TestServer_StateTransitions(t *testing.T) { - var st *serverTester - inHandler := make(chan bool) - writeData := make(chan bool) - leaveHandler := make(chan bool) - st = newServerTester(t, func(w http.ResponseWriter, r *http.Request) { - inHandler <- true - if st.stream(1) == nil { - t.Errorf("nil stream 1 in handler") - } - if got, want := st.streamState(1), stateOpen; got != want { - t.Errorf("in handler, state is %v; want %v", got, want) - } - writeData <- true - if n, err := r.Body.Read(make([]byte, 1)); n != 0 || err != io.EOF { - t.Errorf("body read = %d, %v; want 0, EOF", n, err) - } - if got, want := st.streamState(1), stateHalfClosedRemote; got != want { - t.Errorf("in handler, state is %v; want %v", got, want) - } - - <-leaveHandler - }) - st.greet() - if st.stream(1) != nil { - t.Fatal("stream 1 should be empty") - } - if got := st.streamState(1); got != stateIdle { - t.Fatalf("stream 1 should be idle; got %v", got) - } - - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, - BlockFragment: st.encodeHeader(":method", "POST"), - EndStream: false, // keep it open - EndHeaders: true, - }) - <-inHandler - <-writeData - st.writeData(1, true, nil) - - leaveHandler <- true - hf := st.wantHeaders() - if !hf.StreamEnded() { - t.Fatal("expected END_STREAM flag") - } - - if got, want := st.streamState(1), stateClosed; got != want { - t.Errorf("at end, state is %v; want %v", got, want) - } - if st.stream(1) != nil { - t.Fatal("at end, stream 1 should be gone") - } -} - -// test HEADERS w/o EndHeaders + another HEADERS (should get rejected) -func TestServer_Rejects_HeadersNoEnd_Then_Headers(t *testing.T) { - testServerRejects(t, func(st *serverTester) { - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, - BlockFragment: st.encodeHeader(), - EndStream: true, - EndHeaders: false, - }) - st.writeHeaders(HeadersFrameParam{ // Not a continuation. - StreamID: 3, // different stream. - BlockFragment: st.encodeHeader(), - EndStream: true, - EndHeaders: true, - }) - }) -} - -// test HEADERS w/o EndHeaders + PING (should get rejected) -func TestServer_Rejects_HeadersNoEnd_Then_Ping(t *testing.T) { - testServerRejects(t, func(st *serverTester) { - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, - BlockFragment: st.encodeHeader(), - EndStream: true, - EndHeaders: false, - }) - if err := st.fr.WritePing(false, [8]byte{}); err != nil { - t.Fatal(err) - } - }) -} - -// test HEADERS w/ EndHeaders + a continuation HEADERS (should get rejected) -func TestServer_Rejects_HeadersEnd_Then_Continuation(t *testing.T) { - testServerRejects(t, func(st *serverTester) { - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, - BlockFragment: st.encodeHeader(), - EndStream: true, - EndHeaders: true, - }) - st.wantHeaders() - if err := st.fr.WriteContinuation(1, true, encodeHeaderNoImplicit(t, "foo", "bar")); err != nil { - t.Fatal(err) - } - }) -} - -// test HEADERS w/o EndHeaders + a continuation HEADERS on wrong stream ID -func TestServer_Rejects_HeadersNoEnd_Then_ContinuationWrongStream(t *testing.T) { - testServerRejects(t, func(st *serverTester) { - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, - BlockFragment: st.encodeHeader(), - EndStream: true, - EndHeaders: false, - }) - if err := st.fr.WriteContinuation(3, true, encodeHeaderNoImplicit(t, "foo", "bar")); err != nil { - t.Fatal(err) - } - }) -} - -// No HEADERS on stream 0. -func TestServer_Rejects_Headers0(t *testing.T) { - testServerRejects(t, func(st *serverTester) { - st.fr.AllowIllegalWrites = true - st.writeHeaders(HeadersFrameParam{ - StreamID: 0, - BlockFragment: st.encodeHeader(), - EndStream: true, - EndHeaders: true, - }) - }) -} - -// No CONTINUATION on stream 0. -func TestServer_Rejects_Continuation0(t *testing.T) { - testServerRejects(t, func(st *serverTester) { - st.fr.AllowIllegalWrites = true - if err := st.fr.WriteContinuation(0, true, st.encodeHeader()); err != nil { - t.Fatal(err) - } - }) -} - -func TestServer_Rejects_PushPromise(t *testing.T) { - testServerRejects(t, func(st *serverTester) { - pp := PushPromiseParam{ - StreamID: 1, - PromiseID: 3, - } - if err := st.fr.WritePushPromise(pp); err != nil { - t.Fatal(err) - } - }) -} - -// testServerRejects tests that the server hangs up with a GOAWAY -// frame and a server close after the client does something -// deserving a CONNECTION_ERROR. -func testServerRejects(t *testing.T, writeReq func(*serverTester)) { - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {}) - st.addLogFilter("connection error: PROTOCOL_ERROR") - defer st.Close() - st.greet() - writeReq(st) - - st.wantGoAway() - errc := make(chan error, 1) - go func() { - fr, err := st.fr.ReadFrame() - if err == nil { - err = fmt.Errorf("got frame of type %T", fr) - } - errc <- err - }() - select { - case err := <-errc: - if err != io.EOF { - t.Errorf("ReadFrame = %v; want io.EOF", err) - } - case <-time.After(2 * time.Second): - t.Error("timeout waiting for disconnect") - } -} - -// testServerRequest sets up an idle HTTP/2 connection and lets you -// write a single request with writeReq, and then verify that the -// *http.Request is built correctly in checkReq. -func testServerRequest(t *testing.T, writeReq func(*serverTester), checkReq func(*http.Request)) { - gotReq := make(chan bool, 1) - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { - if r.Body == nil { - t.Fatal("nil Body") - } - checkReq(r) - gotReq <- true - }) - defer st.Close() - - st.greet() - writeReq(st) - - select { - case <-gotReq: - case <-time.After(2 * time.Second): - t.Error("timeout waiting for request") - } -} - -func getSlash(st *serverTester) { st.bodylessReq1() } - -func TestServer_Response_NoData(t *testing.T) { - testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { - // Nothing. - return nil - }, func(st *serverTester) { - getSlash(st) - hf := st.wantHeaders() - if !hf.StreamEnded() { - t.Fatal("want END_STREAM flag") - } - if !hf.HeadersEnded() { - t.Fatal("want END_HEADERS flag") - } - }) -} - -func TestServer_Response_NoData_Header_FooBar(t *testing.T) { - testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { - w.Header().Set("Foo-Bar", "some-value") - return nil - }, func(st *serverTester) { - getSlash(st) - hf := st.wantHeaders() - if !hf.StreamEnded() { - t.Fatal("want END_STREAM flag") - } - if !hf.HeadersEnded() { - t.Fatal("want END_HEADERS flag") - } - goth := decodeHeader(t, hf.HeaderBlockFragment()) - wanth := [][2]string{ - {":status", "200"}, - {"foo-bar", "some-value"}, - {"content-type", "text/plain; charset=utf-8"}, - {"content-length", "0"}, - } - if !reflect.DeepEqual(goth, wanth) { - t.Errorf("Got headers %v; want %v", goth, wanth) - } - }) -} - -func TestServer_Response_Data_Sniff_DoesntOverride(t *testing.T) { - const msg = "this is HTML." - testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { - w.Header().Set("Content-Type", "foo/bar") - io.WriteString(w, msg) - return nil - }, func(st *serverTester) { - getSlash(st) - hf := st.wantHeaders() - if hf.StreamEnded() { - t.Fatal("don't want END_STREAM, expecting data") - } - if !hf.HeadersEnded() { - t.Fatal("want END_HEADERS flag") - } - goth := decodeHeader(t, hf.HeaderBlockFragment()) - wanth := [][2]string{ - {":status", "200"}, - {"content-type", "foo/bar"}, - {"content-length", strconv.Itoa(len(msg))}, - } - if !reflect.DeepEqual(goth, wanth) { - t.Errorf("Got headers %v; want %v", goth, wanth) - } - df := st.wantData() - if !df.StreamEnded() { - t.Error("expected DATA to have END_STREAM flag") - } - if got := string(df.Data()); got != msg { - t.Errorf("got DATA %q; want %q", got, msg) - } - }) -} - -func TestServer_Response_TransferEncoding_chunked(t *testing.T) { - const msg = "hi" - testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { - w.Header().Set("Transfer-Encoding", "chunked") // should be stripped - io.WriteString(w, msg) - return nil - }, func(st *serverTester) { - getSlash(st) - hf := st.wantHeaders() - goth := decodeHeader(t, hf.HeaderBlockFragment()) - wanth := [][2]string{ - {":status", "200"}, - {"content-type", "text/plain; charset=utf-8"}, - {"content-length", strconv.Itoa(len(msg))}, - } - if !reflect.DeepEqual(goth, wanth) { - t.Errorf("Got headers %v; want %v", goth, wanth) - } - }) -} - -// Header accessed only after the initial write. -func TestServer_Response_Data_IgnoreHeaderAfterWrite_After(t *testing.T) { - const msg = "this is HTML." - testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { - io.WriteString(w, msg) - w.Header().Set("foo", "should be ignored") - return nil - }, func(st *serverTester) { - getSlash(st) - hf := st.wantHeaders() - if hf.StreamEnded() { - t.Fatal("unexpected END_STREAM") - } - if !hf.HeadersEnded() { - t.Fatal("want END_HEADERS flag") - } - goth := decodeHeader(t, hf.HeaderBlockFragment()) - wanth := [][2]string{ - {":status", "200"}, - {"content-type", "text/html; charset=utf-8"}, - {"content-length", strconv.Itoa(len(msg))}, - } - if !reflect.DeepEqual(goth, wanth) { - t.Errorf("Got headers %v; want %v", goth, wanth) - } - }) -} - -// Header accessed before the initial write and later mutated. -func TestServer_Response_Data_IgnoreHeaderAfterWrite_Overwrite(t *testing.T) { - const msg = "this is HTML." - testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { - w.Header().Set("foo", "proper value") - io.WriteString(w, msg) - w.Header().Set("foo", "should be ignored") - return nil - }, func(st *serverTester) { - getSlash(st) - hf := st.wantHeaders() - if hf.StreamEnded() { - t.Fatal("unexpected END_STREAM") - } - if !hf.HeadersEnded() { - t.Fatal("want END_HEADERS flag") - } - goth := decodeHeader(t, hf.HeaderBlockFragment()) - wanth := [][2]string{ - {":status", "200"}, - {"foo", "proper value"}, - {"content-type", "text/html; charset=utf-8"}, - {"content-length", strconv.Itoa(len(msg))}, - } - if !reflect.DeepEqual(goth, wanth) { - t.Errorf("Got headers %v; want %v", goth, wanth) - } - }) -} - -func TestServer_Response_Data_SniffLenType(t *testing.T) { - const msg = "this is HTML." - testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { - io.WriteString(w, msg) - return nil - }, func(st *serverTester) { - getSlash(st) - hf := st.wantHeaders() - if hf.StreamEnded() { - t.Fatal("don't want END_STREAM, expecting data") - } - if !hf.HeadersEnded() { - t.Fatal("want END_HEADERS flag") - } - goth := decodeHeader(t, hf.HeaderBlockFragment()) - wanth := [][2]string{ - {":status", "200"}, - {"content-type", "text/html; charset=utf-8"}, - {"content-length", strconv.Itoa(len(msg))}, - } - if !reflect.DeepEqual(goth, wanth) { - t.Errorf("Got headers %v; want %v", goth, wanth) - } - df := st.wantData() - if !df.StreamEnded() { - t.Error("expected DATA to have END_STREAM flag") - } - if got := string(df.Data()); got != msg { - t.Errorf("got DATA %q; want %q", got, msg) - } - }) -} - -func TestServer_Response_Header_Flush_MidWrite(t *testing.T) { - const msg = "this is HTML" - const msg2 = ", and this is the next chunk" - testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { - io.WriteString(w, msg) - w.(http.Flusher).Flush() - io.WriteString(w, msg2) - return nil - }, func(st *serverTester) { - getSlash(st) - hf := st.wantHeaders() - if hf.StreamEnded() { - t.Fatal("unexpected END_STREAM flag") - } - if !hf.HeadersEnded() { - t.Fatal("want END_HEADERS flag") - } - goth := decodeHeader(t, hf.HeaderBlockFragment()) - wanth := [][2]string{ - {":status", "200"}, - {"content-type", "text/html; charset=utf-8"}, // sniffed - // and no content-length - } - if !reflect.DeepEqual(goth, wanth) { - t.Errorf("Got headers %v; want %v", goth, wanth) - } - { - df := st.wantData() - if df.StreamEnded() { - t.Error("unexpected END_STREAM flag") - } - if got := string(df.Data()); got != msg { - t.Errorf("got DATA %q; want %q", got, msg) - } - } - { - df := st.wantData() - if !df.StreamEnded() { - t.Error("wanted END_STREAM flag on last data chunk") - } - if got := string(df.Data()); got != msg2 { - t.Errorf("got DATA %q; want %q", got, msg2) - } - } - }) -} - -func TestServer_Response_LargeWrite(t *testing.T) { - const size = 1 << 20 - const maxFrameSize = 16 << 10 - testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { - n, err := w.Write(bytes.Repeat([]byte("a"), size)) - if err != nil { - return fmt.Errorf("Write error: %v", err) - } - if n != size { - return fmt.Errorf("wrong size %d from Write", n) - } - return nil - }, func(st *serverTester) { - if err := st.fr.WriteSettings( - Setting{SettingInitialWindowSize, 0}, - Setting{SettingMaxFrameSize, maxFrameSize}, - ); err != nil { - t.Fatal(err) - } - st.wantSettingsAck() - - getSlash(st) // make the single request - - // Give the handler quota to write: - if err := st.fr.WriteWindowUpdate(1, size); err != nil { - t.Fatal(err) - } - // Give the handler quota to write to connection-level - // window as well - if err := st.fr.WriteWindowUpdate(0, size); err != nil { - t.Fatal(err) - } - hf := st.wantHeaders() - if hf.StreamEnded() { - t.Fatal("unexpected END_STREAM flag") - } - if !hf.HeadersEnded() { - t.Fatal("want END_HEADERS flag") - } - goth := decodeHeader(t, hf.HeaderBlockFragment()) - wanth := [][2]string{ - {":status", "200"}, - {"content-type", "text/plain; charset=utf-8"}, // sniffed - // and no content-length - } - if !reflect.DeepEqual(goth, wanth) { - t.Errorf("Got headers %v; want %v", goth, wanth) - } - var bytes, frames int - for { - df := st.wantData() - bytes += len(df.Data()) - frames++ - for _, b := range df.Data() { - if b != 'a' { - t.Fatal("non-'a' byte seen in DATA") - } - } - if df.StreamEnded() { - break - } - } - if bytes != size { - t.Errorf("Got %d bytes; want %d", bytes, size) - } - if want := int(size / maxFrameSize); frames < want || frames > want*2 { - t.Errorf("Got %d frames; want %d", frames, size) - } - }) -} - -// Test that the handler can't write more than the client allows -func TestServer_Response_LargeWrite_FlowControlled(t *testing.T) { - const size = 1 << 20 - const maxFrameSize = 16 << 10 - testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { - w.(http.Flusher).Flush() - n, err := w.Write(bytes.Repeat([]byte("a"), size)) - if err != nil { - return fmt.Errorf("Write error: %v", err) - } - if n != size { - return fmt.Errorf("wrong size %d from Write", n) - } - return nil - }, func(st *serverTester) { - // Set the window size to something explicit for this test. - // It's also how much initial data we expect. - const initWindowSize = 123 - if err := st.fr.WriteSettings( - Setting{SettingInitialWindowSize, initWindowSize}, - Setting{SettingMaxFrameSize, maxFrameSize}, - ); err != nil { - t.Fatal(err) - } - st.wantSettingsAck() - - getSlash(st) // make the single request - defer func() { st.fr.WriteRSTStream(1, ErrCodeCancel) }() - - hf := st.wantHeaders() - if hf.StreamEnded() { - t.Fatal("unexpected END_STREAM flag") - } - if !hf.HeadersEnded() { - t.Fatal("want END_HEADERS flag") - } - - df := st.wantData() - if got := len(df.Data()); got != initWindowSize { - t.Fatalf("Initial window size = %d but got DATA with %d bytes", initWindowSize, got) - } - - for _, quota := range []int{1, 13, 127} { - if err := st.fr.WriteWindowUpdate(1, uint32(quota)); err != nil { - t.Fatal(err) - } - df := st.wantData() - if int(quota) != len(df.Data()) { - t.Fatalf("read %d bytes after giving %d quota", len(df.Data()), quota) - } - } - - if err := st.fr.WriteRSTStream(1, ErrCodeCancel); err != nil { - t.Fatal(err) - } - }) -} - -// Test that the handler blocked in a Write is unblocked if the server sends a RST_STREAM. -func TestServer_Response_RST_Unblocks_LargeWrite(t *testing.T) { - const size = 1 << 20 - const maxFrameSize = 16 << 10 - testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { - w.(http.Flusher).Flush() - errc := make(chan error, 1) - go func() { - _, err := w.Write(bytes.Repeat([]byte("a"), size)) - errc <- err - }() - select { - case err := <-errc: - if err == nil { - return errors.New("unexpected nil error from Write in handler") - } - return nil - case <-time.After(2 * time.Second): - return errors.New("timeout waiting for Write in handler") - } - }, func(st *serverTester) { - if err := st.fr.WriteSettings( - Setting{SettingInitialWindowSize, 0}, - Setting{SettingMaxFrameSize, maxFrameSize}, - ); err != nil { - t.Fatal(err) - } - st.wantSettingsAck() - - getSlash(st) // make the single request - - hf := st.wantHeaders() - if hf.StreamEnded() { - t.Fatal("unexpected END_STREAM flag") - } - if !hf.HeadersEnded() { - t.Fatal("want END_HEADERS flag") - } - - if err := st.fr.WriteRSTStream(1, ErrCodeCancel); err != nil { - t.Fatal(err) - } - }) -} - -func TestServer_Response_Empty_Data_Not_FlowControlled(t *testing.T) { - testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { - w.(http.Flusher).Flush() - // Nothing; send empty DATA - return nil - }, func(st *serverTester) { - // Handler gets no data quota: - if err := st.fr.WriteSettings(Setting{SettingInitialWindowSize, 0}); err != nil { - t.Fatal(err) - } - st.wantSettingsAck() - - getSlash(st) // make the single request - - hf := st.wantHeaders() - if hf.StreamEnded() { - t.Fatal("unexpected END_STREAM flag") - } - if !hf.HeadersEnded() { - t.Fatal("want END_HEADERS flag") - } - - df := st.wantData() - if got := len(df.Data()); got != 0 { - t.Fatalf("unexpected %d DATA bytes; want 0", got) - } - if !df.StreamEnded() { - t.Fatal("DATA didn't have END_STREAM") - } - }) -} - -func TestServer_Response_Automatic100Continue(t *testing.T) { - const msg = "foo" - const reply = "bar" - testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { - if v := r.Header.Get("Expect"); v != "" { - t.Errorf("Expect header = %q; want empty", v) - } - buf := make([]byte, len(msg)) - // This read should trigger the 100-continue being sent. - if n, err := io.ReadFull(r.Body, buf); err != nil || n != len(msg) || string(buf) != msg { - return fmt.Errorf("ReadFull = %q, %v; want %q, nil", buf[:n], err, msg) - } - _, err := io.WriteString(w, reply) - return err - }, func(st *serverTester) { - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, // clients send odd numbers - BlockFragment: st.encodeHeader(":method", "POST", "expect", "100-continue"), - EndStream: false, - EndHeaders: true, - }) - hf := st.wantHeaders() - if hf.StreamEnded() { - t.Fatal("unexpected END_STREAM flag") - } - if !hf.HeadersEnded() { - t.Fatal("want END_HEADERS flag") - } - goth := decodeHeader(t, hf.HeaderBlockFragment()) - wanth := [][2]string{ - {":status", "100"}, - } - if !reflect.DeepEqual(goth, wanth) { - t.Fatalf("Got headers %v; want %v", goth, wanth) - } - - // Okay, they sent status 100, so we can send our - // gigantic and/or sensitive "foo" payload now. - st.writeData(1, true, []byte(msg)) - - st.wantWindowUpdate(0, uint32(len(msg))) - - hf = st.wantHeaders() - if hf.StreamEnded() { - t.Fatal("expected data to follow") - } - if !hf.HeadersEnded() { - t.Fatal("want END_HEADERS flag") - } - goth = decodeHeader(t, hf.HeaderBlockFragment()) - wanth = [][2]string{ - {":status", "200"}, - {"content-type", "text/plain; charset=utf-8"}, - {"content-length", strconv.Itoa(len(reply))}, - } - if !reflect.DeepEqual(goth, wanth) { - t.Errorf("Got headers %v; want %v", goth, wanth) - } - - df := st.wantData() - if string(df.Data()) != reply { - t.Errorf("Client read %q; want %q", df.Data(), reply) - } - if !df.StreamEnded() { - t.Errorf("expect data stream end") - } - }) -} - -func TestServer_HandlerWriteErrorOnDisconnect(t *testing.T) { - errc := make(chan error, 1) - testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { - p := []byte("some data.\n") - for { - _, err := w.Write(p) - if err != nil { - errc <- err - return nil - } - } - }, func(st *serverTester) { - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, - BlockFragment: st.encodeHeader(), - EndStream: false, - EndHeaders: true, - }) - hf := st.wantHeaders() - if hf.StreamEnded() { - t.Fatal("unexpected END_STREAM flag") - } - if !hf.HeadersEnded() { - t.Fatal("want END_HEADERS flag") - } - // Close the connection and wait for the handler to (hopefully) notice. - st.cc.Close() - select { - case <-errc: - case <-time.After(5 * time.Second): - t.Error("timeout") - } - }) -} - -func TestServer_Rejects_Too_Many_Streams(t *testing.T) { - const testPath = "/some/path" - - inHandler := make(chan uint32) - leaveHandler := make(chan bool) - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { - id := w.(*responseWriter).rws.stream.id - inHandler <- id - if id == 1+(defaultMaxStreams+1)*2 && r.URL.Path != testPath { - t.Errorf("decoded final path as %q; want %q", r.URL.Path, testPath) - } - <-leaveHandler - }) - defer st.Close() - st.greet() - nextStreamID := uint32(1) - streamID := func() uint32 { - defer func() { nextStreamID += 2 }() - return nextStreamID - } - sendReq := func(id uint32, headers ...string) { - st.writeHeaders(HeadersFrameParam{ - StreamID: id, - BlockFragment: st.encodeHeader(headers...), - EndStream: true, - EndHeaders: true, - }) - } - for i := 0; i < defaultMaxStreams; i++ { - sendReq(streamID()) - <-inHandler - } - defer func() { - for i := 0; i < defaultMaxStreams; i++ { - leaveHandler <- true - } - }() - - // And this one should cross the limit: - // (It's also sent as a CONTINUATION, to verify we still track the decoder context, - // even if we're rejecting it) - rejectID := streamID() - headerBlock := st.encodeHeader(":path", testPath) - frag1, frag2 := headerBlock[:3], headerBlock[3:] - st.writeHeaders(HeadersFrameParam{ - StreamID: rejectID, - BlockFragment: frag1, - EndStream: true, - EndHeaders: false, // CONTINUATION coming - }) - if err := st.fr.WriteContinuation(rejectID, true, frag2); err != nil { - t.Fatal(err) - } - st.wantRSTStream(rejectID, ErrCodeProtocol) - - // But let a handler finish: - leaveHandler <- true - st.wantHeaders() - - // And now another stream should be able to start: - goodID := streamID() - sendReq(goodID, ":path", testPath) - select { - case got := <-inHandler: - if got != goodID { - t.Errorf("Got stream %d; want %d", got, goodID) - } - case <-time.After(3 * time.Second): - t.Error("timeout waiting for handler") - } -} - -// So many response headers that the server needs to use CONTINUATION frames: -func TestServer_Response_ManyHeaders_With_Continuation(t *testing.T) { - testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { - h := w.Header() - for i := 0; i < 5000; i++ { - h.Set(fmt.Sprintf("x-header-%d", i), fmt.Sprintf("x-value-%d", i)) - } - return nil - }, func(st *serverTester) { - getSlash(st) - hf := st.wantHeaders() - if hf.HeadersEnded() { - t.Fatal("got unwanted END_HEADERS flag") - } - n := 0 - for { - n++ - cf := st.wantContinuation() - if cf.HeadersEnded() { - break - } - } - if n < 5 { - t.Errorf("Only got %d CONTINUATION frames; expected 5+ (currently 6)", n) - } - }) -} - -// This previously crashed (reported by Mathieu Lonjaret as observed -// while using Camlistore) because we got a DATA frame from the client -// after the handler exited and our logic at the time was wrong, -// keeping a stream in the map in stateClosed, which tickled an -// invariant check later when we tried to remove that stream (via -// defer sc.closeAllStreamsOnConnClose) when the serverConn serve loop -// ended. -func TestServer_NoCrash_HandlerClose_Then_ClientClose(t *testing.T) { - testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { - // nothing - return nil - }, func(st *serverTester) { - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, - BlockFragment: st.encodeHeader(), - EndStream: false, // DATA is coming - EndHeaders: true, - }) - hf := st.wantHeaders() - if !hf.HeadersEnded() || !hf.StreamEnded() { - t.Fatalf("want END_HEADERS+END_STREAM, got %v", hf) - } - - // Sent when the a Handler closes while a client has - // indicated it's still sending DATA: - st.wantRSTStream(1, ErrCodeCancel) - - // Now the handler has ended, so it's ended its - // stream, but the client hasn't closed its side - // (stateClosedLocal). So send more data and verify - // it doesn't crash with an internal invariant panic, like - // it did before. - st.writeData(1, true, []byte("foo")) - - // Sent after a peer sends data anyway (admittedly the - // previous RST_STREAM might've still been in-flight), - // but they'll get the more friendly 'cancel' code - // first. - st.wantRSTStream(1, ErrCodeStreamClosed) - - // Set up a bunch of machinery to record the panic we saw - // previously. - var ( - panMu sync.Mutex - panicVal interface{} - ) - - testHookOnPanicMu.Lock() - testHookOnPanic = func(sc *serverConn, pv interface{}) bool { - panMu.Lock() - panicVal = pv - panMu.Unlock() - return true - } - testHookOnPanicMu.Unlock() - - // Now force the serve loop to end, via closing the connection. - st.cc.Close() - select { - case <-st.sc.doneServing: - // Loop has exited. - panMu.Lock() - got := panicVal - panMu.Unlock() - if got != nil { - t.Errorf("Got panic: %v", got) - } - case <-time.After(5 * time.Second): - t.Error("timeout") - } - }) -} - -func TestServer_Rejects_TLS10(t *testing.T) { testRejectTLS(t, tls.VersionTLS10) } -func TestServer_Rejects_TLS11(t *testing.T) { testRejectTLS(t, tls.VersionTLS11) } - -func testRejectTLS(t *testing.T, max uint16) { - st := newServerTester(t, nil, func(c *tls.Config) { - c.MaxVersion = max - }) - defer st.Close() - gf := st.wantGoAway() - if got, want := gf.ErrCode, ErrCodeInadequateSecurity; got != want { - t.Errorf("Got error code %v; want %v", got, want) - } -} - -func TestServer_Rejects_TLSBadCipher(t *testing.T) { - st := newServerTester(t, nil, func(c *tls.Config) { - // Only list bad ones: - c.CipherSuites = []uint16{ - tls.TLS_RSA_WITH_RC4_128_SHA, - tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, - tls.TLS_RSA_WITH_AES_128_CBC_SHA, - tls.TLS_RSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, - tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, - } - }) - defer st.Close() - gf := st.wantGoAway() - if got, want := gf.ErrCode, ErrCodeInadequateSecurity; got != want { - t.Errorf("Got error code %v; want %v", got, want) - } -} - -func TestServer_Advertises_Common_Cipher(t *testing.T) { - const requiredSuite = tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - st := newServerTester(t, nil, func(c *tls.Config) { - // Have the client only support the one required by the spec. - c.CipherSuites = []uint16{requiredSuite} - }, func(ts *httptest.Server) { - var srv *http.Server = ts.Config - // Have the server configured with one specific cipher suite - // which is banned. This tests that ConfigureServer ends up - // adding the good one to this list. - srv.TLSConfig = &tls.Config{ - CipherSuites: []uint16{tls.TLS_RSA_WITH_AES_128_CBC_SHA}, // just a banned one - } - }) - defer st.Close() - st.greet() -} - -// TODO: move this onto *serverTester, and re-use the same hpack -// decoding context throughout. We're just getting lucky here with -// creating a new decoder each time. -func decodeHeader(t *testing.T, headerBlock []byte) (pairs [][2]string) { - d := hpack.NewDecoder(initialHeaderTableSize, func(f hpack.HeaderField) { - pairs = append(pairs, [2]string{f.Name, f.Value}) - }) - if _, err := d.Write(headerBlock); err != nil { - t.Fatalf("hpack decoding error: %v", err) - } - if err := d.Close(); err != nil { - t.Fatalf("hpack decoding error: %v", err) - } - return -} - -// testServerResponse sets up an idle HTTP/2 connection and lets you -// write a single request with writeReq, and then reply to it in some way with the provided handler, -// and then verify the output with the serverTester again (assuming the handler returns nil) -func testServerResponse(t testing.TB, - handler func(http.ResponseWriter, *http.Request) error, - client func(*serverTester), -) { - errc := make(chan error, 1) - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { - if r.Body == nil { - t.Fatal("nil Body") - } - errc <- handler(w, r) - }) - defer st.Close() - - donec := make(chan bool) - go func() { - defer close(donec) - st.greet() - client(st) - }() - - select { - case <-donec: - return - case <-time.After(5 * time.Second): - t.Fatal("timeout") - } - - select { - case err := <-errc: - if err != nil { - t.Fatalf("Error in handler: %v", err) - } - case <-time.After(2 * time.Second): - t.Error("timeout waiting for handler to finish") - } -} - -// readBodyHandler returns an http Handler func that reads len(want) -// bytes from r.Body and fails t if the contents read were not -// the value of want. -func readBodyHandler(t *testing.T, want string) func(w http.ResponseWriter, r *http.Request) { - return func(w http.ResponseWriter, r *http.Request) { - buf := make([]byte, len(want)) - _, err := io.ReadFull(r.Body, buf) - if err != nil { - t.Error(err) - return - } - if string(buf) != want { - t.Errorf("read %q; want %q", buf, want) - } - } -} - -// TestServerWithCurl currently fails, hence the LenientCipherSuites test. See: -// https://github.com/tatsuhiro-t/nghttp2/issues/140 & -// http://sourceforge.net/p/curl/bugs/1472/ -func TestServerWithCurl(t *testing.T) { testServerWithCurl(t, false) } -func TestServerWithCurl_LenientCipherSuites(t *testing.T) { testServerWithCurl(t, true) } - -func testServerWithCurl(t *testing.T, permitProhibitedCipherSuites bool) { - if runtime.GOOS != "linux" { - t.Skip("skipping Docker test when not on Linux; requires --net which won't work with boot2docker anyway") - } - requireCurl(t) - const msg = "Hello from curl!\n" - ts := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Foo", "Bar") - w.Header().Set("Client-Proto", r.Proto) - io.WriteString(w, msg) - })) - ConfigureServer(ts.Config, &Server{ - PermitProhibitedCipherSuites: permitProhibitedCipherSuites, - }) - ts.TLS = ts.Config.TLSConfig // the httptest.Server has its own copy of this TLS config - ts.StartTLS() - defer ts.Close() - - var gotConn int32 - testHookOnConn = func() { atomic.StoreInt32(&gotConn, 1) } - - t.Logf("Running test server for curl to hit at: %s", ts.URL) - container := curl(t, "--silent", "--http2", "--insecure", "-v", ts.URL) - defer kill(container) - resc := make(chan interface{}, 1) - go func() { - res, err := dockerLogs(container) - if err != nil { - resc <- err - } else { - resc <- res - } - }() - select { - case res := <-resc: - if err, ok := res.(error); ok { - t.Fatal(err) - } - if !strings.Contains(string(res.([]byte)), "foo: Bar") { - t.Errorf("didn't see foo: Bar header") - t.Logf("Got: %s", res) - } - if !strings.Contains(string(res.([]byte)), "client-proto: HTTP/2") { - t.Errorf("didn't see client-proto: HTTP/2 header") - t.Logf("Got: %s", res) - } - if !strings.Contains(string(res.([]byte)), msg) { - t.Errorf("didn't see %q content", msg) - t.Logf("Got: %s", res) - } - case <-time.After(3 * time.Second): - t.Errorf("timeout waiting for curl") - } - - if atomic.LoadInt32(&gotConn) == 0 { - t.Error("never saw an http2 connection") - } -} - -func BenchmarkServerGets(b *testing.B) { - b.ReportAllocs() - - const msg = "Hello, world" - st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) { - io.WriteString(w, msg) - }) - defer st.Close() - st.greet() - - // Give the server quota to reply. (plus it has the the 64KB) - if err := st.fr.WriteWindowUpdate(0, uint32(b.N*len(msg))); err != nil { - b.Fatal(err) - } - - for i := 0; i < b.N; i++ { - id := 1 + uint32(i)*2 - st.writeHeaders(HeadersFrameParam{ - StreamID: id, - BlockFragment: st.encodeHeader(), - EndStream: true, - EndHeaders: true, - }) - st.wantHeaders() - df := st.wantData() - if !df.StreamEnded() { - b.Fatalf("DATA didn't have END_STREAM; got %v", df) - } - } -} - -func BenchmarkServerPosts(b *testing.B) { - b.ReportAllocs() - - const msg = "Hello, world" - st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) { - io.WriteString(w, msg) - }) - defer st.Close() - st.greet() - - // Give the server quota to reply. (plus it has the the 64KB) - if err := st.fr.WriteWindowUpdate(0, uint32(b.N*len(msg))); err != nil { - b.Fatal(err) - } - - for i := 0; i < b.N; i++ { - id := 1 + uint32(i)*2 - st.writeHeaders(HeadersFrameParam{ - StreamID: id, - BlockFragment: st.encodeHeader(":method", "POST"), - EndStream: false, - EndHeaders: true, - }) - st.writeData(id, true, nil) - st.wantHeaders() - df := st.wantData() - if !df.StreamEnded() { - b.Fatalf("DATA didn't have END_STREAM; got %v", df) - } - } -} diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/testdata/draft-ietf-httpbis-http2.xml b/Godeps/_workspace/src/github.com/bradfitz/http2/testdata/draft-ietf-httpbis-http2.xml deleted file mode 100644 index 31a84bed4f..0000000000 --- a/Godeps/_workspace/src/github.com/bradfitz/http2/testdata/draft-ietf-httpbis-http2.xml +++ /dev/null @@ -1,5021 +0,0 @@ - - - - - - - - - - - - - - - - - - - Hypertext Transfer Protocol version 2 - - - Twist -
- mbelshe@chromium.org -
-
- - - Google, Inc -
- fenix@google.com -
-
- - - Mozilla -
- - 331 E Evelyn Street - Mountain View - CA - 94041 - US - - martin.thomson@gmail.com -
-
- - - Applications - HTTPbis - HTTP - SPDY - Web - - - - This specification describes an optimized expression of the semantics of the Hypertext - Transfer Protocol (HTTP). HTTP/2 enables a more efficient use of network resources and a - reduced perception of latency by introducing header field compression and allowing multiple - concurrent messages on the same connection. It also introduces unsolicited push of - representations from servers to clients. - - - This specification is an alternative to, but does not obsolete, the HTTP/1.1 message syntax. - HTTP's existing semantics remain unchanged. - - - - - - Discussion of this draft takes place on the HTTPBIS working group mailing list - (ietf-http-wg@w3.org), which is archived at . - - - Working Group information can be found at ; that specific to HTTP/2 are at . - - - The changes in this draft are summarized in . - - - -
- - -
- - - The Hypertext Transfer Protocol (HTTP) is a wildly successful protocol. However, the - HTTP/1.1 message format () has - several characteristics that have a negative overall effect on application performance - today. - - - In particular, HTTP/1.0 allowed only one request to be outstanding at a time on a given - TCP connection. HTTP/1.1 added request pipelining, but this only partially addressed - request concurrency and still suffers from head-of-line blocking. Therefore, HTTP/1.1 - clients that need to make many requests typically use multiple connections to a server in - order to achieve concurrency and thereby reduce latency. - - - Furthermore, HTTP header fields are often repetitive and verbose, causing unnecessary - network traffic, as well as causing the initial TCP congestion - window to quickly fill. This can result in excessive latency when multiple requests are - made on a new TCP connection. - - - HTTP/2 addresses these issues by defining an optimized mapping of HTTP's semantics to an - underlying connection. Specifically, it allows interleaving of request and response - messages on the same connection and uses an efficient coding for HTTP header fields. It - also allows prioritization of requests, letting more important requests complete more - quickly, further improving performance. - - - The resulting protocol is more friendly to the network, because fewer TCP connections can - be used in comparison to HTTP/1.x. This means less competition with other flows, and - longer-lived connections, which in turn leads to better utilization of available network - capacity. - - - Finally, HTTP/2 also enables more efficient processing of messages through use of binary - message framing. - -
- -
- - HTTP/2 provides an optimized transport for HTTP semantics. HTTP/2 supports all of the core - features of HTTP/1.1, but aims to be more efficient in several ways. - - - The basic protocol unit in HTTP/2 is a frame. Each frame - type serves a different purpose. For example, HEADERS and - DATA frames form the basis of HTTP requests and - responses; other frame types like SETTINGS, - WINDOW_UPDATE, and PUSH_PROMISE are used in support of other - HTTP/2 features. - - - Multiplexing of requests is achieved by having each HTTP request-response exchange - associated with its own stream. Streams are largely - independent of each other, so a blocked or stalled request or response does not prevent - progress on other streams. - - - Flow control and prioritization ensure that it is possible to efficiently use multiplexed - streams. Flow control helps to ensure that only data that - can be used by a receiver is transmitted. Prioritization ensures that limited resources can be directed - to the most important streams first. - - - HTTP/2 adds a new interaction mode, whereby a server can push - responses to a client. Server push allows a server to speculatively send a client - data that the server anticipates the client will need, trading off some network usage - against a potential latency gain. The server does this by synthesizing a request, which it - sends as a PUSH_PROMISE frame. The server is then able to send a response to - the synthetic request on a separate stream. - - - Frames that contain HTTP header fields are compressed. - HTTP requests can be highly redundant, so compression can reduce the size of requests and - responses significantly. - - -
- - The HTTP/2 specification is split into four parts: - - - Starting HTTP/2 covers how an HTTP/2 connection is - initiated. - - - The framing and streams layers describe the way HTTP/2 frames are - structured and formed into multiplexed streams. - - - Frame and error - definitions include details of the frame and error types used in HTTP/2. - - - HTTP mappings and additional - requirements describe how HTTP semantics are expressed using frames and - streams. - - - - - While some of the frame and stream layer concepts are isolated from HTTP, this - specification does not define a completely generic framing layer. The framing and streams - layers are tailored to the needs of the HTTP protocol and server push. - -
- -
- - The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD", "SHOULD - NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this document are to be interpreted as - described in RFC 2119. - - - All numeric values are in network byte order. Values are unsigned unless otherwise - indicated. Literal values are provided in decimal or hexadecimal as appropriate. - Hexadecimal literals are prefixed with 0x to distinguish them - from decimal literals. - - - The following terms are used: - - - The endpoint initiating the HTTP/2 connection. - - - A transport-layer connection between two endpoints. - - - An error that affects the entire HTTP/2 connection. - - - Either the client or server of the connection. - - - The smallest unit of communication within an HTTP/2 connection, consisting of a header - and a variable-length sequence of octets structured according to the frame type. - - - An endpoint. When discussing a particular endpoint, "peer" refers to the endpoint - that is remote to the primary subject of discussion. - - - An endpoint that is receiving frames. - - - An endpoint that is transmitting frames. - - - The endpoint which did not initiate the HTTP/2 connection. - - - A bi-directional flow of frames across a virtual channel within the HTTP/2 connection. - - - An error on the individual HTTP/2 stream. - - - - - Finally, the terms "gateway", "intermediary", "proxy", and "tunnel" are defined - in . - -
-
- -
- - An HTTP/2 connection is an application layer protocol running on top of a TCP connection - (). The client is the TCP connection initiator. - - - HTTP/2 uses the same "http" and "https" URI schemes used by HTTP/1.1. HTTP/2 shares the same - default port numbers: 80 for "http" URIs and 443 for "https" URIs. As a result, - implementations processing requests for target resource URIs like http://example.org/foo or https://example.com/bar are required to first discover whether the - upstream server (the immediate peer to which the client wishes to establish a connection) - supports HTTP/2. - - - - The means by which support for HTTP/2 is determined is different for "http" and "https" - URIs. Discovery for "http" URIs is described in . Discovery - for "https" URIs is described in . - - -
- - The protocol defined in this document has two identifiers. - - - - The string "h2" identifies the protocol where HTTP/2 uses TLS. This identifier is used in the TLS application layer protocol negotiation extension (ALPN) - field and any place that HTTP/2 over TLS is identified. - - - The "h2" string is serialized into an ALPN protocol identifier as the two octet - sequence: 0x68, 0x32. - - - - - The string "h2c" identifies the protocol where HTTP/2 is run over cleartext TCP. - This identifier is used in the HTTP/1.1 Upgrade header field and any place that - HTTP/2 over TCP is identified. - - - - - - Negotiating "h2" or "h2c" implies the use of the transport, security, framing and message - semantics described in this document. - - - RFC Editor's Note: please remove the remainder of this section prior to the - publication of a final version of this document. - - - Only implementations of the final, published RFC can identify themselves as "h2" or "h2c". - Until such an RFC exists, implementations MUST NOT identify themselves using these - strings. - - - Examples and text throughout the rest of this document use "h2" as a matter of - editorial convenience only. Implementations of draft versions MUST NOT identify using - this string. - - - Implementations of draft versions of the protocol MUST add the string "-" and the - corresponding draft number to the identifier. For example, draft-ietf-httpbis-http2-11 - over TLS is identified using the string "h2-11". - - - Non-compatible experiments that are based on these draft versions MUST append the string - "-" and an experiment name to the identifier. For example, an experimental implementation - of packet mood-based encoding based on draft-ietf-httpbis-http2-09 might identify itself - as "h2-09-emo". Note that any label MUST conform to the "token" syntax defined in - . Experimenters are - encouraged to coordinate their experiments on the ietf-http-wg@w3.org mailing list. - -
- -
- - A client that makes a request for an "http" URI without prior knowledge about support for - HTTP/2 uses the HTTP Upgrade mechanism (). The client makes an HTTP/1.1 request that includes an Upgrade - header field identifying HTTP/2 with the "h2c" token. The HTTP/1.1 request MUST include - exactly one HTTP2-Settings header field. - -
- For example: - - -]]> -
- - Requests that contain an entity body MUST be sent in their entirety before the client can - send HTTP/2 frames. This means that a large request entity can block the use of the - connection until it is completely sent. - - - If concurrency of an initial request with subsequent requests is important, an OPTIONS - request can be used to perform the upgrade to HTTP/2, at the cost of an additional - round-trip. - - - A server that does not support HTTP/2 can respond to the request as though the Upgrade - header field were absent: - -
- -HTTP/1.1 200 OK -Content-Length: 243 -Content-Type: text/html - -... - -
- - A server MUST ignore a "h2" token in an Upgrade header field. Presence of a token with - "h2" implies HTTP/2 over TLS, which is instead negotiated as described in . - - - A server that supports HTTP/2 can accept the upgrade with a 101 (Switching Protocols) - response. After the empty line that terminates the 101 response, the server can begin - sending HTTP/2 frames. These frames MUST include a response to the request that initiated - the Upgrade. - - -
- - For example: - - -HTTP/1.1 101 Switching Protocols -Connection: Upgrade -Upgrade: h2c - -[ HTTP/2 connection ... - -
- - The first HTTP/2 frame sent by the server is a SETTINGS frame () as the server connection preface (). Upon receiving the 101 response, the client sends a connection preface, which includes a - SETTINGS frame. - - - The HTTP/1.1 request that is sent prior to upgrade is assigned stream identifier 1 and is - assigned default priority values. Stream 1 is - implicitly half closed from the client toward the server, since the request is completed - as an HTTP/1.1 request. After commencing the HTTP/2 connection, stream 1 is used for the - response. - - -
- - A request that upgrades from HTTP/1.1 to HTTP/2 MUST include exactly one HTTP2-Settings header field. The HTTP2-Settings header field is a connection-specific header field - that includes parameters that govern the HTTP/2 connection, provided in anticipation of - the server accepting the request to upgrade. - -
- -
- - A server MUST NOT upgrade the connection to HTTP/2 if this header field is not present, - or if more than one is present. A server MUST NOT send this header field. - - - - The content of the HTTP2-Settings header field is the - payload of a SETTINGS frame (), encoded as a - base64url string (that is, the URL- and filename-safe Base64 encoding described in , with any trailing '=' characters omitted). The - ABNF production for token68 is - defined in . - - - Since the upgrade is only intended to apply to the immediate connection, a client - sending HTTP2-Settings MUST also send HTTP2-Settings as a connection option in the Connection header field to prevent it from being forwarded - downstream. - - - A server decodes and interprets these values as it would any other - SETTINGS frame. Acknowledgement of the - SETTINGS parameters is not necessary, since a 101 response serves as implicit - acknowledgment. Providing these values in the Upgrade request gives a client an - opportunity to provide parameters prior to receiving any frames from the server. - -
-
- -
- - A client that makes a request to an "https" URI uses TLS - with the application layer protocol negotiation extension. - - - HTTP/2 over TLS uses the "h2" application token. The "h2c" token MUST NOT be sent by a - client or selected by a server. - - - Once TLS negotiation is complete, both the client and the server send a connection preface. - -
- -
- - A client can learn that a particular server supports HTTP/2 by other means. For example, - describes a mechanism for advertising this capability. - - - A client MAY immediately send HTTP/2 frames to a server that is known to support HTTP/2, - after the connection preface; a server can - identify such a connection by the presence of the connection preface. This only affects - the establishment of HTTP/2 connections over cleartext TCP; implementations that support - HTTP/2 over TLS MUST use protocol negotiation in TLS. - - - Without additional information, prior support for HTTP/2 is not a strong signal that a - given server will support HTTP/2 for future connections. For example, it is possible for - server configurations to change, for configurations to differ between instances in - clustered servers, or for network conditions to change. - -
- -
- - Upon establishment of a TCP connection and determination that HTTP/2 will be used by both - peers, each endpoint MUST send a connection preface as a final confirmation and to - establish the initial SETTINGS parameters for the HTTP/2 connection. The client and - server each send a different connection preface. - - - The client connection preface starts with a sequence of 24 octets, which in hex notation - are: - -
- -
- - (the string PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n). This sequence - is followed by a SETTINGS frame (). The - SETTINGS frame MAY be empty. The client sends the client connection - preface immediately upon receipt of a 101 Switching Protocols response (indicating a - successful upgrade), or as the first application data octets of a TLS connection. If - starting an HTTP/2 connection with prior knowledge of server support for the protocol, the - client connection preface is sent upon connection establishment. - - - - - The client connection preface is selected so that a large proportion of HTTP/1.1 or - HTTP/1.0 servers and intermediaries do not attempt to process further frames. Note - that this does not address the concerns raised in . - - - - - The server connection preface consists of a potentially empty SETTINGS - frame () that MUST be the first frame the server sends in the - HTTP/2 connection. - - - The SETTINGS frames received from a peer as part of the connection preface - MUST be acknowledged (see ) after sending the connection - preface. - - - To avoid unnecessary latency, clients are permitted to send additional frames to the - server immediately after sending the client connection preface, without waiting to receive - the server connection preface. It is important to note, however, that the server - connection preface SETTINGS frame might include parameters that necessarily - alter how a client is expected to communicate with the server. Upon receiving the - SETTINGS frame, the client is expected to honor any parameters established. - In some configurations, it is possible for the server to transmit SETTINGS - before the client sends additional frames, providing an opportunity to avoid this issue. - - - Clients and servers MUST treat an invalid connection preface as a connection error of type - PROTOCOL_ERROR. A GOAWAY frame () - MAY be omitted in this case, since an invalid preface indicates that the peer is not using - HTTP/2. - -
-
- -
- - Once the HTTP/2 connection is established, endpoints can begin exchanging frames. - - -
- - All frames begin with a fixed 9-octet header followed by a variable-length payload. - -
- -
- - The fields of the frame header are defined as: - - - - The length of the frame payload expressed as an unsigned 24-bit integer. Values - greater than 214 (16,384) MUST NOT be sent unless the receiver has - set a larger value for SETTINGS_MAX_FRAME_SIZE. - - - The 9 octets of the frame header are not included in this value. - - - - - The 8-bit type of the frame. The frame type determines the format and semantics of - the frame. Implementations MUST ignore and discard any frame that has a type that - is unknown. - - - - - An 8-bit field reserved for frame-type specific boolean flags. - - - Flags are assigned semantics specific to the indicated frame type. Flags that have - no defined semantics for a particular frame type MUST be ignored, and MUST be left - unset (0) when sending. - - - - - A reserved 1-bit field. The semantics of this bit are undefined and the bit MUST - remain unset (0) when sending and MUST be ignored when receiving. - - - - - A 31-bit stream identifier (see ). The value 0 is - reserved for frames that are associated with the connection as a whole as opposed to - an individual stream. - - - - - - The structure and content of the frame payload is dependent entirely on the frame type. - -
- -
- - The size of a frame payload is limited by the maximum size that a receiver advertises in - the SETTINGS_MAX_FRAME_SIZE setting. This setting can have any value - between 214 (16,384) and 224-1 (16,777,215) octets, - inclusive. - - - All implementations MUST be capable of receiving and minimally processing frames up to - 214 octets in length, plus the 9 octet frame - header. The size of the frame header is not included when describing frame sizes. - - - Certain frame types, such as PING, impose additional limits - on the amount of payload data allowed. - - - - - If a frame size exceeds any defined limit, or is too small to contain mandatory frame - data, the endpoint MUST send a FRAME_SIZE_ERROR error. A frame size error - in a frame that could alter the state of the entire connection MUST be treated as a connection error; this includes any frame carrying - a header block (that is, HEADERS, - PUSH_PROMISE, and CONTINUATION), SETTINGS, - and any WINDOW_UPDATE frame with a stream identifier of 0. - - - Endpoints are not obligated to use all available space in a frame. Responsiveness can be - improved by using frames that are smaller than the permitted maximum size. Sending large - frames can result in delays in sending time-sensitive frames (such - RST_STREAM, WINDOW_UPDATE, or PRIORITY) - which if blocked by the transmission of a large frame, could affect performance. - -
- -
- - Just as in HTTP/1, a header field in HTTP/2 is a name with one or more associated values. - They are used within HTTP request and response messages as well as server push operations - (see ). - - - Header lists are collections of zero or more header fields. When transmitted over a - connection, a header list is serialized into a header block using HTTP Header Compression. The serialized header block is then - divided into one or more octet sequences, called header block fragments, and transmitted - within the payload of HEADERS, PUSH_PROMISE or CONTINUATION frames. - - - The Cookie header field is treated specially by the HTTP - mapping (see ). - - - A receiving endpoint reassembles the header block by concatenating its fragments, then - decompresses the block to reconstruct the header list. - - - A complete header block consists of either: - - - a single HEADERS or PUSH_PROMISE frame, - with the END_HEADERS flag set, or - - - a HEADERS or PUSH_PROMISE frame with the END_HEADERS - flag cleared and one or more CONTINUATION frames, - where the last CONTINUATION frame has the END_HEADERS flag set. - - - - - Header compression is stateful. One compression context and one decompression context is - used for the entire connection. Each header block is processed as a discrete unit. - Header blocks MUST be transmitted as a contiguous sequence of frames, with no interleaved - frames of any other type or from any other stream. The last frame in a sequence of - HEADERS or CONTINUATION frames MUST have the END_HEADERS - flag set. The last frame in a sequence of PUSH_PROMISE or - CONTINUATION frames MUST have the END_HEADERS flag set. This allows a - header block to be logically equivalent to a single frame. - - - Header block fragments can only be sent as the payload of HEADERS, - PUSH_PROMISE or CONTINUATION frames, because these frames - carry data that can modify the compression context maintained by a receiver. An endpoint - receiving HEADERS, PUSH_PROMISE or - CONTINUATION frames MUST reassemble header blocks and perform decompression - even if the frames are to be discarded. A receiver MUST terminate the connection with a - connection error of type - COMPRESSION_ERROR if it does not decompress a header block. - -
-
- -
- - A "stream" is an independent, bi-directional sequence of frames exchanged between the client - and server within an HTTP/2 connection. Streams have several important characteristics: - - - A single HTTP/2 connection can contain multiple concurrently open streams, with either - endpoint interleaving frames from multiple streams. - - - Streams can be established and used unilaterally or shared by either the client or - server. - - - Streams can be closed by either endpoint. - - - The order in which frames are sent on a stream is significant. Recipients process frames - in the order they are received. In particular, the order of HEADERS, - and DATA frames is semantically significant. - - - Streams are identified by an integer. Stream identifiers are assigned to streams by the - endpoint initiating the stream. - - - - -
- - The lifecycle of a stream is shown in . - - -
- - | |<-----------' | - | R | closed | R | - `-------------------->| |<--------------------' - +--------+ - - H: HEADERS frame (with implied CONTINUATIONs) - PP: PUSH_PROMISE frame (with implied CONTINUATIONs) - ES: END_STREAM flag - R: RST_STREAM frame -]]> - -
- - - Note that this diagram shows stream state transitions and the frames and flags that affect - those transitions only. In this regard, CONTINUATION frames do not result - in state transitions; they are effectively part of the HEADERS or - PUSH_PROMISE that they follow. For this purpose, the END_STREAM flag is - processed as a separate event to the frame that bears it; a HEADERS frame - with the END_STREAM flag set can cause two state transitions. - - - Both endpoints have a subjective view of the state of a stream that could be different - when frames are in transit. Endpoints do not coordinate the creation of streams; they are - created unilaterally by either endpoint. The negative consequences of a mismatch in - states are limited to the "closed" state after sending RST_STREAM, where - frames might be received for some time after closing. - - - Streams have the following states: - - - - - - All streams start in the "idle" state. In this state, no frames have been - exchanged. - - - The following transitions are valid from this state: - - - Sending or receiving a HEADERS frame causes the stream to become - "open". The stream identifier is selected as described in . The same HEADERS frame can also - cause a stream to immediately become "half closed". - - - Sending a PUSH_PROMISE frame marks the associated stream for - later use. The stream state for the reserved stream transitions to "reserved - (local)". - - - Receiving a PUSH_PROMISE frame marks the associated stream as - reserved by the remote peer. The state of the stream becomes "reserved - (remote)". - - - - - Receiving any frames other than HEADERS or - PUSH_PROMISE on a stream in this state MUST be treated as a connection error of type - PROTOCOL_ERROR. - - - - - - - A stream in the "reserved (local)" state is one that has been promised by sending a - PUSH_PROMISE frame. A PUSH_PROMISE frame reserves an - idle stream by associating the stream with an open stream that was initiated by the - remote peer (see ). - - - In this state, only the following transitions are possible: - - - The endpoint can send a HEADERS frame. This causes the stream to - open in a "half closed (remote)" state. - - - Either endpoint can send a RST_STREAM frame to cause the stream - to become "closed". This releases the stream reservation. - - - - - An endpoint MUST NOT send any type of frame other than HEADERS or - RST_STREAM in this state. - - - A PRIORITY frame MAY be received in this state. Receiving any type - of frame other than RST_STREAM or PRIORITY on a stream - in this state MUST be treated as a connection - error of type PROTOCOL_ERROR. - - - - - - - A stream in the "reserved (remote)" state has been reserved by a remote peer. - - - In this state, only the following transitions are possible: - - - Receiving a HEADERS frame causes the stream to transition to - "half closed (local)". - - - Either endpoint can send a RST_STREAM frame to cause the stream - to become "closed". This releases the stream reservation. - - - - - An endpoint MAY send a PRIORITY frame in this state to reprioritize - the reserved stream. An endpoint MUST NOT send any type of frame other than - RST_STREAM, WINDOW_UPDATE, or PRIORITY - in this state. - - - Receiving any type of frame other than HEADERS or - RST_STREAM on a stream in this state MUST be treated as a connection error of type - PROTOCOL_ERROR. - - - - - - - A stream in the "open" state may be used by both peers to send frames of any type. - In this state, sending peers observe advertised stream - level flow control limits. - - - From this state either endpoint can send a frame with an END_STREAM flag set, which - causes the stream to transition into one of the "half closed" states: an endpoint - sending an END_STREAM flag causes the stream state to become "half closed (local)"; - an endpoint receiving an END_STREAM flag causes the stream state to become "half - closed (remote)". - - - Either endpoint can send a RST_STREAM frame from this state, causing - it to transition immediately to "closed". - - - - - - - A stream that is in the "half closed (local)" state cannot be used for sending - frames. Only WINDOW_UPDATE, PRIORITY and - RST_STREAM frames can be sent in this state. - - - A stream transitions from this state to "closed" when a frame that contains an - END_STREAM flag is received, or when either peer sends a RST_STREAM - frame. - - - A receiver can ignore WINDOW_UPDATE frames in this state, which might - arrive for a short period after a frame bearing the END_STREAM flag is sent. - - - PRIORITY frames received in this state are used to reprioritize - streams that depend on the current stream. - - - - - - - A stream that is "half closed (remote)" is no longer being used by the peer to send - frames. In this state, an endpoint is no longer obligated to maintain a receiver - flow control window if it performs flow control. - - - If an endpoint receives additional frames for a stream that is in this state, other - than WINDOW_UPDATE, PRIORITY or - RST_STREAM, it MUST respond with a stream error of type - STREAM_CLOSED. - - - A stream that is "half closed (remote)" can be used by the endpoint to send frames - of any type. In this state, the endpoint continues to observe advertised stream level flow control limits. - - - A stream can transition from this state to "closed" by sending a frame that contains - an END_STREAM flag, or when either peer sends a RST_STREAM frame. - - - - - - - The "closed" state is the terminal state. - - - An endpoint MUST NOT send frames other than PRIORITY on a closed - stream. An endpoint that receives any frame other than PRIORITY - after receiving a RST_STREAM MUST treat that as a stream error of type - STREAM_CLOSED. Similarly, an endpoint that receives any frames after - receiving a frame with the END_STREAM flag set MUST treat that as a connection error of type - STREAM_CLOSED, unless the frame is permitted as described below. - - - WINDOW_UPDATE or RST_STREAM frames can be received in - this state for a short period after a DATA or HEADERS - frame containing an END_STREAM flag is sent. Until the remote peer receives and - processes RST_STREAM or the frame bearing the END_STREAM flag, it - might send frames of these types. Endpoints MUST ignore - WINDOW_UPDATE or RST_STREAM frames received in this - state, though endpoints MAY choose to treat frames that arrive a significant time - after sending END_STREAM as a connection - error of type PROTOCOL_ERROR. - - - PRIORITY frames can be sent on closed streams to prioritize streams - that are dependent on the closed stream. Endpoints SHOULD process - PRIORITY frame, though they can be ignored if the stream has been - removed from the dependency tree (see ). - - - If this state is reached as a result of sending a RST_STREAM frame, - the peer that receives the RST_STREAM might have already sent - or - enqueued for sending - frames on the stream that cannot be withdrawn. An endpoint - MUST ignore frames that it receives on closed streams after it has sent a - RST_STREAM frame. An endpoint MAY choose to limit the period over - which it ignores frames and treat frames that arrive after this time as being in - error. - - - Flow controlled frames (i.e., DATA) received after sending - RST_STREAM are counted toward the connection flow control window. - Even though these frames might be ignored, because they are sent before the sender - receives the RST_STREAM, the sender will consider the frames to count - against the flow control window. - - - An endpoint might receive a PUSH_PROMISE frame after it sends - RST_STREAM. PUSH_PROMISE causes a stream to become - "reserved" even if the associated stream has been reset. Therefore, a - RST_STREAM is needed to close an unwanted promised stream. - - - - - - In the absence of more specific guidance elsewhere in this document, implementations - SHOULD treat the receipt of a frame that is not expressly permitted in the description of - a state as a connection error of type - PROTOCOL_ERROR. Frame of unknown types are ignored. - - - An example of the state transitions for an HTTP request/response exchange can be found in - . An example of the state transitions for server push can be - found in and . - - -
- - Streams are identified with an unsigned 31-bit integer. Streams initiated by a client - MUST use odd-numbered stream identifiers; those initiated by the server MUST use - even-numbered stream identifiers. A stream identifier of zero (0x0) is used for - connection control messages; the stream identifier zero cannot be used to establish a - new stream. - - - HTTP/1.1 requests that are upgraded to HTTP/2 (see ) are - responded to with a stream identifier of one (0x1). After the upgrade - completes, stream 0x1 is "half closed (local)" to the client. Therefore, stream 0x1 - cannot be selected as a new stream identifier by a client that upgrades from HTTP/1.1. - - - The identifier of a newly established stream MUST be numerically greater than all - streams that the initiating endpoint has opened or reserved. This governs streams that - are opened using a HEADERS frame and streams that are reserved using - PUSH_PROMISE. An endpoint that receives an unexpected stream identifier - MUST respond with a connection error of - type PROTOCOL_ERROR. - - - The first use of a new stream identifier implicitly closes all streams in the "idle" - state that might have been initiated by that peer with a lower-valued stream identifier. - For example, if a client sends a HEADERS frame on stream 7 without ever - sending a frame on stream 5, then stream 5 transitions to the "closed" state when the - first frame for stream 7 is sent or received. - - - Stream identifiers cannot be reused. Long-lived connections can result in an endpoint - exhausting the available range of stream identifiers. A client that is unable to - establish a new stream identifier can establish a new connection for new streams. A - server that is unable to establish a new stream identifier can send a - GOAWAY frame so that the client is forced to open a new connection for - new streams. - -
- -
- - A peer can limit the number of concurrently active streams using the - SETTINGS_MAX_CONCURRENT_STREAMS parameter (see ) within a SETTINGS frame. The maximum concurrent - streams setting is specific to each endpoint and applies only to the peer that receives - the setting. That is, clients specify the maximum number of concurrent streams the - server can initiate, and servers specify the maximum number of concurrent streams the - client can initiate. - - - Streams that are in the "open" state, or either of the "half closed" states count toward - the maximum number of streams that an endpoint is permitted to open. Streams in any of - these three states count toward the limit advertised in the - SETTINGS_MAX_CONCURRENT_STREAMS setting. Streams in either of the - "reserved" states do not count toward the stream limit. - - - Endpoints MUST NOT exceed the limit set by their peer. An endpoint that receives a - HEADERS frame that causes their advertised concurrent stream limit to be - exceeded MUST treat this as a stream error. An - endpoint that wishes to reduce the value of - SETTINGS_MAX_CONCURRENT_STREAMS to a value that is below the current - number of open streams can either close streams that exceed the new value or allow - streams to complete. - -
-
- -
- - Using streams for multiplexing introduces contention over use of the TCP connection, - resulting in blocked streams. A flow control scheme ensures that streams on the same - connection do not destructively interfere with each other. Flow control is used for both - individual streams and for the connection as a whole. - - - HTTP/2 provides for flow control through use of the WINDOW_UPDATE frame. - - -
- - HTTP/2 stream flow control aims to allow a variety of flow control algorithms to be - used without requiring protocol changes. Flow control in HTTP/2 has the following - characteristics: - - - Flow control is specific to a connection; i.e., it is "hop-by-hop", not - "end-to-end". - - - Flow control is based on window update frames. Receivers advertise how many octets - they are prepared to receive on a stream and for the entire connection. This is a - credit-based scheme. - - - Flow control is directional with overall control provided by the receiver. A - receiver MAY choose to set any window size that it desires for each stream and for - the entire connection. A sender MUST respect flow control limits imposed by a - receiver. Clients, servers and intermediaries all independently advertise their - flow control window as a receiver and abide by the flow control limits set by - their peer when sending. - - - The initial value for the flow control window is 65,535 octets for both new streams - and the overall connection. - - - The frame type determines whether flow control applies to a frame. Of the frames - specified in this document, only DATA frames are subject to flow - control; all other frame types do not consume space in the advertised flow control - window. This ensures that important control frames are not blocked by flow control. - - - Flow control cannot be disabled. - - - HTTP/2 defines only the format and semantics of the WINDOW_UPDATE - frame (). This document does not stipulate how a - receiver decides when to send this frame or the value that it sends, nor does it - specify how a sender chooses to send packets. Implementations are able to select - any algorithm that suits their needs. - - - - - Implementations are also responsible for managing how requests and responses are sent - based on priority; choosing how to avoid head of line blocking for requests; and - managing the creation of new streams. Algorithm choices for these could interact with - any flow control algorithm. - -
- -
- - Flow control is defined to protect endpoints that are operating under resource - constraints. For example, a proxy needs to share memory between many connections, and - also might have a slow upstream connection and a fast downstream one. Flow control - addresses cases where the receiver is unable process data on one stream, yet wants to - continue to process other streams in the same connection. - - - Deployments that do not require this capability can advertise a flow control window of - the maximum size, incrementing the available space when new data is received. This - effectively disables flow control for that receiver. Conversely, a sender is always - subject to the flow control window advertised by the receiver. - - - Deployments with constrained resources (for example, memory) can employ flow control to - limit the amount of memory a peer can consume. Note, however, that this can lead to - suboptimal use of available network resources if flow control is enabled without - knowledge of the bandwidth-delay product (see ). - - - Even with full awareness of the current bandwidth-delay product, implementation of flow - control can be difficult. When using flow control, the receiver MUST read from the TCP - receive buffer in a timely fashion. Failure to do so could lead to a deadlock when - critical frames, such as WINDOW_UPDATE, are not read and acted upon. - -
-
- -
- - A client can assign a priority for a new stream by including prioritization information in - the HEADERS frame that opens the stream. For an existing - stream, the PRIORITY frame can be used to change the - priority. - - - The purpose of prioritization is to allow an endpoint to express how it would prefer its - peer allocate resources when managing concurrent streams. Most importantly, priority can - be used to select streams for transmitting frames when there is limited capacity for - sending. - - - Streams can be prioritized by marking them as dependent on the completion of other streams - (). Each dependency is assigned a relative weight, a number - that is used to determine the relative proportion of available resources that are assigned - to streams dependent on the same stream. - - - - Explicitly setting the priority for a stream is input to a prioritization process. It - does not guarantee any particular processing or transmission order for the stream relative - to any other stream. An endpoint cannot force a peer to process concurrent streams in a - particular order using priority. Expressing priority is therefore only ever a suggestion. - - - Providing prioritization information is optional, so default values are used if no - explicit indicator is provided (). - - -
- - Each stream can be given an explicit dependency on another stream. Including a - dependency expresses a preference to allocate resources to the identified stream rather - than to the dependent stream. - - - A stream that is not dependent on any other stream is given a stream dependency of 0x0. - In other words, the non-existent stream 0 forms the root of the tree. - - - A stream that depends on another stream is a dependent stream. The stream upon which a - stream is dependent is a parent stream. A dependency on a stream that is not currently - in the tree - such as a stream in the "idle" state - results in that stream being given - a default priority. - - - When assigning a dependency on another stream, the stream is added as a new dependency - of the parent stream. Dependent streams that share the same parent are not ordered with - respect to each other. For example, if streams B and C are dependent on stream A, and - if stream D is created with a dependency on stream A, this results in a dependency order - of A followed by B, C, and D in any order. - -
- /|\ - B C B D C -]]> -
- - An exclusive flag allows for the insertion of a new level of dependencies. The - exclusive flag causes the stream to become the sole dependency of its parent stream, - causing other dependencies to become dependent on the exclusive stream. In the - previous example, if stream D is created with an exclusive dependency on stream A, this - results in D becoming the dependency parent of B and C. - -
- D - B C / \ - B C -]]> -
- - Inside the dependency tree, a dependent stream SHOULD only be allocated resources if all - of the streams that it depends on (the chain of parent streams up to 0x0) are either - closed, or it is not possible to make progress on them. - - - A stream cannot depend on itself. An endpoint MUST treat this as a stream error of type PROTOCOL_ERROR. - -
- -
- - All dependent streams are allocated an integer weight between 1 and 256 (inclusive). - - - Streams with the same parent SHOULD be allocated resources proportionally based on their - weight. Thus, if stream B depends on stream A with weight 4, and C depends on stream A - with weight 12, and if no progress can be made on A, stream B ideally receives one third - of the resources allocated to stream C. - -
- -
- - Stream priorities are changed using the PRIORITY frame. Setting a - dependency causes a stream to become dependent on the identified parent stream. - - - Dependent streams move with their parent stream if the parent is reprioritized. Setting - a dependency with the exclusive flag for a reprioritized stream moves all the - dependencies of the new parent stream to become dependent on the reprioritized stream. - - - If a stream is made dependent on one of its own dependencies, the formerly dependent - stream is first moved to be dependent on the reprioritized stream's previous parent. - The moved dependency retains its weight. - -
- - For example, consider an original dependency tree where B and C depend on A, D and E - depend on C, and F depends on D. If A is made dependent on D, then D takes the place - of A. All other dependency relationships stay the same, except for F, which becomes - dependent on A if the reprioritization is exclusive. - - F B C ==> F A OR A - / \ | / \ /|\ - D E E B C B C F - | | | - F E E - (intermediate) (non-exclusive) (exclusive) -]]> -
-
- -
- - When a stream is removed from the dependency tree, its dependencies can be moved to - become dependent on the parent of the closed stream. The weights of new dependencies - are recalculated by distributing the weight of the dependency of the closed stream - proportionally based on the weights of its dependencies. - - - Streams that are removed from the dependency tree cause some prioritization information - to be lost. Resources are shared between streams with the same parent stream, which - means that if a stream in that set closes or becomes blocked, any spare capacity - allocated to a stream is distributed to the immediate neighbors of the stream. However, - if the common dependency is removed from the tree, those streams share resources with - streams at the next highest level. - - - For example, assume streams A and B share a parent, and streams C and D both depend on - stream A. Prior to the removal of stream A, if streams A and D are unable to proceed, - then stream C receives all the resources dedicated to stream A. If stream A is removed - from the tree, the weight of stream A is divided between streams C and D. If stream D - is still unable to proceed, this results in stream C receiving a reduced proportion of - resources. For equal starting weights, C receives one third, rather than one half, of - available resources. - - - It is possible for a stream to become closed while prioritization information that - creates a dependency on that stream is in transit. If a stream identified in a - dependency has no associated priority information, then the dependent stream is instead - assigned a default priority. This potentially creates - suboptimal prioritization, since the stream could be given a priority that is different - to what is intended. - - - To avoid these problems, an endpoint SHOULD retain stream prioritization state for a - period after streams become closed. The longer state is retained, the lower the chance - that streams are assigned incorrect or default priority values. - - - This could create a large state burden for an endpoint, so this state MAY be limited. - An endpoint MAY apply a fixed upper limit on the number of closed streams for which - prioritization state is tracked to limit state exposure. The amount of additional state - an endpoint maintains could be dependent on load; under high load, prioritization state - can be discarded to limit resource commitments. In extreme cases, an endpoint could - even discard prioritization state for active or reserved streams. If a fixed limit is - applied, endpoints SHOULD maintain state for at least as many streams as allowed by - their setting for SETTINGS_MAX_CONCURRENT_STREAMS. - - - An endpoint receiving a PRIORITY frame that changes the priority of a - closed stream SHOULD alter the dependencies of the streams that depend on it, if it has - retained enough state to do so. - -
- -
- - Providing priority information is optional. Streams are assigned a non-exclusive - dependency on stream 0x0 by default. Pushed streams - initially depend on their associated stream. In both cases, streams are assigned a - default weight of 16. - -
-
- -
- - HTTP/2 framing permits two classes of error: - - - An error condition that renders the entire connection unusable is a connection error. - - - An error in an individual stream is a stream error. - - - - - A list of error codes is included in . - - -
- - A connection error is any error which prevents further processing of the framing layer, - or which corrupts any connection state. - - - An endpoint that encounters a connection error SHOULD first send a GOAWAY - frame () with the stream identifier of the last stream that it - successfully received from its peer. The GOAWAY frame includes an error - code that indicates why the connection is terminating. After sending the - GOAWAY frame, the endpoint MUST close the TCP connection. - - - It is possible that the GOAWAY will not be reliably received by the - receiving endpoint (see ). In the event of a connection error, - GOAWAY only provides a best effort attempt to communicate with the peer - about why the connection is being terminated. - - - An endpoint can end a connection at any time. In particular, an endpoint MAY choose to - treat a stream error as a connection error. Endpoints SHOULD send a - GOAWAY frame when ending a connection, providing that circumstances - permit it. - -
- -
- - A stream error is an error related to a specific stream that does not affect processing - of other streams. - - - An endpoint that detects a stream error sends a RST_STREAM frame () that contains the stream identifier of the stream where the error - occurred. The RST_STREAM frame includes an error code that indicates the - type of error. - - - A RST_STREAM is the last frame that an endpoint can send on a stream. - The peer that sends the RST_STREAM frame MUST be prepared to receive any - frames that were sent or enqueued for sending by the remote peer. These frames can be - ignored, except where they modify connection state (such as the state maintained for - header compression, or flow control). - - - Normally, an endpoint SHOULD NOT send more than one RST_STREAM frame for - any stream. However, an endpoint MAY send additional RST_STREAM frames if - it receives frames on a closed stream after more than a round-trip time. This behavior - is permitted to deal with misbehaving implementations. - - - An endpoint MUST NOT send a RST_STREAM in response to an - RST_STREAM frame, to avoid looping. - -
- -
- - If the TCP connection is closed or reset while streams remain in open or half closed - states, then the endpoint MUST assume that those streams were abnormally interrupted and - could be incomplete. - -
-
- -
- - HTTP/2 permits extension of the protocol. Protocol extensions can be used to provide - additional services or alter any aspect of the protocol, within the limitations described - in this section. Extensions are effective only within the scope of a single HTTP/2 - connection. - - - Extensions are permitted to use new frame types, new - settings, or new error - codes. Registries are established for managing these extension points: frame types, settings and - error codes. - - - Implementations MUST ignore unknown or unsupported values in all extensible protocol - elements. Implementations MUST discard frames that have unknown or unsupported types. - This means that any of these extension points can be safely used by extensions without - prior arrangement or negotiation. However, extension frames that appear in the middle of - a header block are not permitted; these MUST be treated - as a connection error of type - PROTOCOL_ERROR. - - - However, extensions that could change the semantics of existing protocol components MUST - be negotiated before being used. For example, an extension that changes the layout of the - HEADERS frame cannot be used until the peer has given a positive signal - that this is acceptable. In this case, it could also be necessary to coordinate when the - revised layout comes into effect. Note that treating any frame other than - DATA frames as flow controlled is such a change in semantics, and can only - be done through negotiation. - - - This document doesn't mandate a specific method for negotiating the use of an extension, - but notes that a setting could be used for that - purpose. If both peers set a value that indicates willingness to use the extension, then - the extension can be used. If a setting is used for extension negotiation, the initial - value MUST be defined so that the extension is initially disabled. - -
-
- -
- - This specification defines a number of frame types, each identified by a unique 8-bit type - code. Each frame type serves a distinct purpose either in the establishment and management - of the connection as a whole, or of individual streams. - - - The transmission of specific frame types can alter the state of a connection. If endpoints - fail to maintain a synchronized view of the connection state, successful communication - within the connection will no longer be possible. Therefore, it is important that endpoints - have a shared comprehension of how the state is affected by the use any given frame. - - -
- - DATA frames (type=0x0) convey arbitrary, variable-length sequences of octets associated - with a stream. One or more DATA frames are used, for instance, to carry HTTP request or - response payloads. - - - DATA frames MAY also contain arbitrary padding. Padding can be added to DATA frames to - obscure the size of messages. - -
- -
- - The DATA frame contains the following fields: - - - An 8-bit field containing the length of the frame padding in units of octets. This - field is optional and is only present if the PADDED flag is set. - - - Application data. The amount of data is the remainder of the frame payload after - subtracting the length of the other fields that are present. - - - Padding octets that contain no application semantic value. Padding octets MUST be set - to zero when sending and ignored when receiving. - - - - - - The DATA frame defines the following flags: - - - Bit 1 being set indicates that this frame is the last that the endpoint will send for - the identified stream. Setting this flag causes the stream to enter one of the "half closed" states or the "closed" state. - - - Bit 4 being set indicates that the Pad Length field and any padding that it describes - is present. - - - - - DATA frames MUST be associated with a stream. If a DATA frame is received whose stream - identifier field is 0x0, the recipient MUST respond with a connection error of type - PROTOCOL_ERROR. - - - DATA frames are subject to flow control and can only be sent when a stream is in the - "open" or "half closed (remote)" states. The entire DATA frame payload is included in flow - control, including Pad Length and Padding fields if present. If a DATA frame is received - whose stream is not in "open" or "half closed (local)" state, the recipient MUST respond - with a stream error of type - STREAM_CLOSED. - - - The total number of padding octets is determined by the value of the Pad Length field. If - the length of the padding is greater than the length of the frame payload, the recipient - MUST treat this as a connection error of - type PROTOCOL_ERROR. - - - A frame can be increased in size by one octet by including a Pad Length field with a - value of zero. - - - - - Padding is a security feature; see . - -
- -
- - The HEADERS frame (type=0x1) is used to open a stream, - and additionally carries a header block fragment. HEADERS frames can be sent on a stream - in the "open" or "half closed (remote)" states. - -
- -
- - The HEADERS frame payload has the following fields: - - - An 8-bit field containing the length of the frame padding in units of octets. This - field is only present if the PADDED flag is set. - - - A single bit flag indicates that the stream dependency is exclusive, see . This field is only present if the PRIORITY flag is set. - - - A 31-bit stream identifier for the stream that this stream depends on, see . This field is only present if the PRIORITY flag is set. - - - An 8-bit weight for the stream, see . Add one to the - value to obtain a weight between 1 and 256. This field is only present if the - PRIORITY flag is set. - - - A header block fragment. - - - Padding octets that contain no application semantic value. Padding octets MUST be set - to zero when sending and ignored when receiving. - - - - - - The HEADERS frame defines the following flags: - - - - Bit 1 being set indicates that the header block is - the last that the endpoint will send for the identified stream. Setting this flag - causes the stream to enter one of "half closed" - states. - - - A HEADERS frame carries the END_STREAM flag that signals the end of a stream. - However, a HEADERS frame with the END_STREAM flag set can be followed by - CONTINUATION frames on the same stream. Logically, the - CONTINUATION frames are part of the HEADERS frame. - - - - - Bit 3 being set indicates that this frame contains an entire header block and is not followed by any - CONTINUATION frames. - - - A HEADERS frame without the END_HEADERS flag set MUST be followed by a - CONTINUATION frame for the same stream. A receiver MUST treat the - receipt of any other type of frame or a frame on a different stream as a connection error of type - PROTOCOL_ERROR. - - - - - Bit 4 being set indicates that the Pad Length field and any padding that it - describes is present. - - - - - Bit 6 being set indicates that the Exclusive Flag (E), Stream Dependency, and Weight - fields are present; see . - - - - - - - The payload of a HEADERS frame contains a header block - fragment. A header block that does not fit within a HEADERS frame is continued in - a CONTINUATION frame. - - - - HEADERS frames MUST be associated with a stream. If a HEADERS frame is received whose - stream identifier field is 0x0, the recipient MUST respond with a connection error of type - PROTOCOL_ERROR. - - - - The HEADERS frame changes the connection state as described in . - - - - The HEADERS frame includes optional padding. Padding fields and flags are identical to - those defined for DATA frames. - - - Prioritization information in a HEADERS frame is logically equivalent to a separate - PRIORITY frame, but inclusion in HEADERS avoids the potential for churn in - stream prioritization when new streams are created. Priorization fields in HEADERS frames - subsequent to the first on a stream reprioritize the - stream. - -
- -
- - The PRIORITY frame (type=0x2) specifies the sender-advised - priority of a stream. It can be sent at any time for an existing stream, including - closed streams. This enables reprioritization of existing streams. - -
- -
- - The payload of a PRIORITY frame contains the following fields: - - - A single bit flag indicates that the stream dependency is exclusive, see . - - - A 31-bit stream identifier for the stream that this stream depends on, see . - - - An 8-bit weight for the identified stream dependency, see . Add one to the value to obtain a weight between 1 and 256. - - - - - - The PRIORITY frame does not define any flags. - - - - The PRIORITY frame is associated with an existing stream. If a PRIORITY frame is received - with a stream identifier of 0x0, the recipient MUST respond with a connection error of type - PROTOCOL_ERROR. - - - The PRIORITY frame can be sent on a stream in any of the "reserved (remote)", "open", - "half closed (local)", "half closed (remote)", or "closed" states, though it cannot be - sent between consecutive frames that comprise a single header - block. Note that this frame could arrive after processing or frame sending has - completed, which would cause it to have no effect on the current stream. For a stream - that is in the "half closed (remote)" or "closed" - state, this frame can only affect - processing of the current stream and not frame transmission. - - - The PRIORITY frame is the only frame that can be sent for a stream in the "closed" state. - This allows for the reprioritization of a group of dependent streams by altering the - priority of a parent stream, which might be closed. However, a PRIORITY frame sent on a - closed stream risks being ignored due to the peer having discarded priority state - information for that stream. - -
- -
- - The RST_STREAM frame (type=0x3) allows for abnormal termination of a stream. When sent by - the initiator of a stream, it indicates that they wish to cancel the stream or that an - error condition has occurred. When sent by the receiver of a stream, it indicates that - either the receiver is rejecting the stream, requesting that the stream be cancelled, or - that an error condition has occurred. - -
- -
- - - The RST_STREAM frame contains a single unsigned, 32-bit integer identifying the error code. The error code indicates why the stream is being - terminated. - - - - The RST_STREAM frame does not define any flags. - - - - The RST_STREAM frame fully terminates the referenced stream and causes it to enter the - closed state. After receiving a RST_STREAM on a stream, the receiver MUST NOT send - additional frames for that stream, with the exception of PRIORITY. However, - after sending the RST_STREAM, the sending endpoint MUST be prepared to receive and process - additional frames sent on the stream that might have been sent by the peer prior to the - arrival of the RST_STREAM. - - - - RST_STREAM frames MUST be associated with a stream. If a RST_STREAM frame is received - with a stream identifier of 0x0, the recipient MUST treat this as a connection error of type - PROTOCOL_ERROR. - - - - RST_STREAM frames MUST NOT be sent for a stream in the "idle" state. If a RST_STREAM - frame identifying an idle stream is received, the recipient MUST treat this as a connection error of type - PROTOCOL_ERROR. - - -
- -
- - The SETTINGS frame (type=0x4) conveys configuration parameters that affect how endpoints - communicate, such as preferences and constraints on peer behavior. The SETTINGS frame is - also used to acknowledge the receipt of those parameters. Individually, a SETTINGS - parameter can also be referred to as a "setting". - - - SETTINGS parameters are not negotiated; they describe characteristics of the sending peer, - which are used by the receiving peer. Different values for the same parameter can be - advertised by each peer. For example, a client might set a high initial flow control - window, whereas a server might set a lower value to conserve resources. - - - - A SETTINGS frame MUST be sent by both endpoints at the start of a connection, and MAY be - sent at any other time by either endpoint over the lifetime of the connection. - Implementations MUST support all of the parameters defined by this specification. - - - - Each parameter in a SETTINGS frame replaces any existing value for that parameter. - Parameters are processed in the order in which they appear, and a receiver of a SETTINGS - frame does not need to maintain any state other than the current value of its - parameters. Therefore, the value of a SETTINGS parameter is the last value that is seen by - a receiver. - - - SETTINGS parameters are acknowledged by the receiving peer. To enable this, the SETTINGS - frame defines the following flag: - - - Bit 1 being set indicates that this frame acknowledges receipt and application of the - peer's SETTINGS frame. When this bit is set, the payload of the SETTINGS frame MUST - be empty. Receipt of a SETTINGS frame with the ACK flag set and a length field value - other than 0 MUST be treated as a connection - error of type FRAME_SIZE_ERROR. For more info, see Settings Synchronization. - - - - - SETTINGS frames always apply to a connection, never a single stream. The stream - identifier for a SETTINGS frame MUST be zero (0x0). If an endpoint receives a SETTINGS - frame whose stream identifier field is anything other than 0x0, the endpoint MUST respond - with a connection error of type - PROTOCOL_ERROR. - - - The SETTINGS frame affects connection state. A badly formed or incomplete SETTINGS frame - MUST be treated as a connection error of type - PROTOCOL_ERROR. - - -
- - The payload of a SETTINGS frame consists of zero or more parameters, each consisting of - an unsigned 16-bit setting identifier and an unsigned 32-bit value. - - -
- -
-
- -
- - The following parameters are defined: - - - - Allows the sender to inform the remote endpoint of the maximum size of the header - compression table used to decode header blocks, in octets. The encoder can select - any size equal to or less than this value by using signaling specific to the - header compression format inside a header block. The initial value is 4,096 - octets. - - - - - This setting can be use to disable server - push. An endpoint MUST NOT send a PUSH_PROMISE frame if it - receives this parameter set to a value of 0. An endpoint that has both set this - parameter to 0 and had it acknowledged MUST treat the receipt of a - PUSH_PROMISE frame as a connection error of type - PROTOCOL_ERROR. - - - The initial value is 1, which indicates that server push is permitted. Any value - other than 0 or 1 MUST be treated as a connection error of type - PROTOCOL_ERROR. - - - - - Indicates the maximum number of concurrent streams that the sender will allow. - This limit is directional: it applies to the number of streams that the sender - permits the receiver to create. Initially there is no limit to this value. It is - recommended that this value be no smaller than 100, so as to not unnecessarily - limit parallelism. - - - A value of 0 for SETTINGS_MAX_CONCURRENT_STREAMS SHOULD NOT be treated as special - by endpoints. A zero value does prevent the creation of new streams, however this - can also happen for any limit that is exhausted with active streams. Servers - SHOULD only set a zero value for short durations; if a server does not wish to - accept requests, closing the connection could be preferable. - - - - - Indicates the sender's initial window size (in octets) for stream level flow - control. The initial value is 216-1 (65,535) octets. - - - This setting affects the window size of all streams, including existing streams, - see . - - - Values above the maximum flow control window size of 231-1 MUST - be treated as a connection error of - type FLOW_CONTROL_ERROR. - - - - - Indicates the size of the largest frame payload that the sender is willing to - receive, in octets. - - - The initial value is 214 (16,384) octets. The value advertised by - an endpoint MUST be between this initial value and the maximum allowed frame size - (224-1 or 16,777,215 octets), inclusive. Values outside this range - MUST be treated as a connection error - of type PROTOCOL_ERROR. - - - - - This advisory setting informs a peer of the maximum size of header list that the - sender is prepared to accept, in octets. The value is based on the uncompressed - size of header fields, including the length of the name and value in octets plus - an overhead of 32 octets for each header field. - - - For any given request, a lower limit than what is advertised MAY be enforced. The - initial value of this setting is unlimited. - - - - - - An endpoint that receives a SETTINGS frame with any unknown or unsupported identifier - MUST ignore that setting. - -
- -
- - Most values in SETTINGS benefit from or require an understanding of when the peer has - received and applied the changed parameter values. In order to provide - such synchronization timepoints, the recipient of a SETTINGS frame in which the ACK flag - is not set MUST apply the updated parameters as soon as possible upon receipt. - - - The values in the SETTINGS frame MUST be processed in the order they appear, with no - other frame processing between values. Unsupported parameters MUST be ignored. Once - all values have been processed, the recipient MUST immediately emit a SETTINGS frame - with the ACK flag set. Upon receiving a SETTINGS frame with the ACK flag set, the sender - of the altered parameters can rely on the setting having been applied. - - - If the sender of a SETTINGS frame does not receive an acknowledgement within a - reasonable amount of time, it MAY issue a connection error of type - SETTINGS_TIMEOUT. - -
-
- -
- - The PUSH_PROMISE frame (type=0x5) is used to notify the peer endpoint in advance of - streams the sender intends to initiate. The PUSH_PROMISE frame includes the unsigned - 31-bit identifier of the stream the endpoint plans to create along with a set of headers - that provide additional context for the stream. contains a - thorough description of the use of PUSH_PROMISE frames. - - -
- -
- - The PUSH_PROMISE frame payload has the following fields: - - - An 8-bit field containing the length of the frame padding in units of octets. This - field is only present if the PADDED flag is set. - - - A single reserved bit. - - - An unsigned 31-bit integer that identifies the stream that is reserved by the - PUSH_PROMISE. The promised stream identifier MUST be a valid choice for the next - stream sent by the sender (see new stream - identifier). - - - A header block fragment containing request header - fields. - - - Padding octets. - - - - - - The PUSH_PROMISE frame defines the following flags: - - - - Bit 3 being set indicates that this frame contains an entire header block and is not followed by any - CONTINUATION frames. - - - A PUSH_PROMISE frame without the END_HEADERS flag set MUST be followed by a - CONTINUATION frame for the same stream. A receiver MUST treat the receipt of any - other type of frame or a frame on a different stream as a connection error of type - PROTOCOL_ERROR. - - - - - Bit 4 being set indicates that the Pad Length field and any padding that it - describes is present. - - - - - - - PUSH_PROMISE frames MUST be associated with an existing, peer-initiated stream. The stream - identifier of a PUSH_PROMISE frame indicates the stream it is associated with. If the - stream identifier field specifies the value 0x0, a recipient MUST respond with a connection error of type - PROTOCOL_ERROR. - - - - Promised streams are not required to be used in the order they are promised. The - PUSH_PROMISE only reserves stream identifiers for later use. - - - - PUSH_PROMISE MUST NOT be sent if the SETTINGS_ENABLE_PUSH setting of the - peer endpoint is set to 0. An endpoint that has set this setting and has received - acknowledgement MUST treat the receipt of a PUSH_PROMISE frame as a connection error of type - PROTOCOL_ERROR. - - - Recipients of PUSH_PROMISE frames can choose to reject promised streams by returning a - RST_STREAM referencing the promised stream identifier back to the sender of - the PUSH_PROMISE. - - - - A PUSH_PROMISE frame modifies the connection state in two ways. The inclusion of a header block potentially modifies the state maintained for - header compression. PUSH_PROMISE also reserves a stream for later use, causing the - promised stream to enter the "reserved" state. A sender MUST NOT send a PUSH_PROMISE on a - stream unless that stream is either "open" or "half closed (remote)"; the sender MUST - ensure that the promised stream is a valid choice for a new stream identifier (that is, the promised stream MUST - be in the "idle" state). - - - Since PUSH_PROMISE reserves a stream, ignoring a PUSH_PROMISE frame causes the stream - state to become indeterminate. A receiver MUST treat the receipt of a PUSH_PROMISE on a - stream that is neither "open" nor "half closed (local)" as a connection error of type - PROTOCOL_ERROR. However, an endpoint that has sent - RST_STREAM on the associated stream MUST handle PUSH_PROMISE frames that - might have been created before the RST_STREAM frame is received and - processed. - - - A receiver MUST treat the receipt of a PUSH_PROMISE that promises an illegal stream identifier (that is, an identifier for a - stream that is not currently in the "idle" state) as a connection error of type - PROTOCOL_ERROR. - - - - The PUSH_PROMISE frame includes optional padding. Padding fields and flags are identical - to those defined for DATA frames. - -
- -
- - The PING frame (type=0x6) is a mechanism for measuring a minimal round trip time from the - sender, as well as determining whether an idle connection is still functional. PING - frames can be sent from any endpoint. - -
- -
- - - In addition to the frame header, PING frames MUST contain 8 octets of data in the payload. - A sender can include any value it chooses and use those bytes in any fashion. - - - Receivers of a PING frame that does not include an ACK flag MUST send a PING frame with - the ACK flag set in response, with an identical payload. PING responses SHOULD be given - higher priority than any other frame. - - - - The PING frame defines the following flags: - - - Bit 1 being set indicates that this PING frame is a PING response. An endpoint MUST - set this flag in PING responses. An endpoint MUST NOT respond to PING frames - containing this flag. - - - - - PING frames are not associated with any individual stream. If a PING frame is received - with a stream identifier field value other than 0x0, the recipient MUST respond with a - connection error of type - PROTOCOL_ERROR. - - - Receipt of a PING frame with a length field value other than 8 MUST be treated as a connection error of type - FRAME_SIZE_ERROR. - - -
- -
- - The GOAWAY frame (type=0x7) informs the remote peer to stop creating streams on this - connection. GOAWAY can be sent by either the client or the server. Once sent, the sender - will ignore frames sent on any new streams with identifiers higher than the included last - stream identifier. Receivers of a GOAWAY frame MUST NOT open additional streams on the - connection, although a new connection can be established for new streams. - - - The purpose of this frame is to allow an endpoint to gracefully stop accepting new - streams, while still finishing processing of previously established streams. This enables - administrative actions, like server maintainance. - - - There is an inherent race condition between an endpoint starting new streams and the - remote sending a GOAWAY frame. To deal with this case, the GOAWAY contains the stream - identifier of the last peer-initiated stream which was or might be processed on the - sending endpoint in this connection. For instance, if the server sends a GOAWAY frame, - the identified stream is the highest numbered stream initiated by the client. - - - If the receiver of the GOAWAY has sent data on streams with a higher stream identifier - than what is indicated in the GOAWAY frame, those streams are not or will not be - processed. The receiver of the GOAWAY frame can treat the streams as though they had - never been created at all, thereby allowing those streams to be retried later on a new - connection. - - - Endpoints SHOULD always send a GOAWAY frame before closing a connection so that the remote - can know whether a stream has been partially processed or not. For example, if an HTTP - client sends a POST at the same time that a server closes a connection, the client cannot - know if the server started to process that POST request if the server does not send a - GOAWAY frame to indicate what streams it might have acted on. - - - An endpoint might choose to close a connection without sending GOAWAY for misbehaving - peers. - - -
- -
- - The GOAWAY frame does not define any flags. - - - The GOAWAY frame applies to the connection, not a specific stream. An endpoint MUST treat - a GOAWAY frame with a stream identifier other than 0x0 as a connection error of type - PROTOCOL_ERROR. - - - The last stream identifier in the GOAWAY frame contains the highest numbered stream - identifier for which the sender of the GOAWAY frame might have taken some action on, or - might yet take action on. All streams up to and including the identified stream might - have been processed in some way. The last stream identifier can be set to 0 if no streams - were processed. - - - In this context, "processed" means that some data from the stream was passed to some - higher layer of software that might have taken some action as a result. - - - If a connection terminates without a GOAWAY frame, the last stream identifier is - effectively the highest possible stream identifier. - - - On streams with lower or equal numbered identifiers that were not closed completely prior - to the connection being closed, re-attempting requests, transactions, or any protocol - activity is not possible, with the exception of idempotent actions like HTTP GET, PUT, or - DELETE. Any protocol activity that uses higher numbered streams can be safely retried - using a new connection. - - - Activity on streams numbered lower or equal to the last stream identifier might still - complete successfully. The sender of a GOAWAY frame might gracefully shut down a - connection by sending a GOAWAY frame, maintaining the connection in an open state until - all in-progress streams complete. - - - An endpoint MAY send multiple GOAWAY frames if circumstances change. For instance, an - endpoint that sends GOAWAY with NO_ERROR during graceful shutdown could - subsequently encounter an condition that requires immediate termination of the connection. - The last stream identifier from the last GOAWAY frame received indicates which streams - could have been acted upon. Endpoints MUST NOT increase the value they send in the last - stream identifier, since the peers might already have retried unprocessed requests on - another connection. - - - A client that is unable to retry requests loses all requests that are in flight when the - server closes the connection. This is especially true for intermediaries that might - not be serving clients using HTTP/2. A server that is attempting to gracefully shut down - a connection SHOULD send an initial GOAWAY frame with the last stream identifier set to - 231-1 and a NO_ERROR code. This signals to the client that - a shutdown is imminent and that no further requests can be initiated. After waiting at - least one round trip time, the server can send another GOAWAY frame with an updated last - stream identifier. This ensures that a connection can be cleanly shut down without losing - requests. - - - - After sending a GOAWAY frame, the sender can discard frames for streams with identifiers - higher than the identified last stream. However, any frames that alter connection state - cannot be completely ignored. For instance, HEADERS, - PUSH_PROMISE and CONTINUATION frames MUST be minimally - processed to ensure the state maintained for header compression is consistent (see ); similarly DATA frames MUST be counted toward the connection flow - control window. Failure to process these frames can cause flow control or header - compression state to become unsynchronized. - - - - The GOAWAY frame also contains a 32-bit error code that - contains the reason for closing the connection. - - - Endpoints MAY append opaque data to the payload of any GOAWAY frame. Additional debug - data is intended for diagnostic purposes only and carries no semantic value. Debug - information could contain security- or privacy-sensitive data. Logged or otherwise - persistently stored debug data MUST have adequate safeguards to prevent unauthorized - access. - -
- -
- - The WINDOW_UPDATE frame (type=0x8) is used to implement flow control; see for an overview. - - - Flow control operates at two levels: on each individual stream and on the entire - connection. - - - Both types of flow control are hop-by-hop; that is, only between the two endpoints. - Intermediaries do not forward WINDOW_UPDATE frames between dependent connections. - However, throttling of data transfer by any receiver can indirectly cause the propagation - of flow control information toward the original sender. - - - Flow control only applies to frames that are identified as being subject to flow control. - Of the frame types defined in this document, this includes only DATA frames. - Frames that are exempt from flow control MUST be accepted and processed, unless the - receiver is unable to assign resources to handling the frame. A receiver MAY respond with - a stream error or connection error of type - FLOW_CONTROL_ERROR if it is unable to accept a frame. - -
- -
- - The payload of a WINDOW_UPDATE frame is one reserved bit, plus an unsigned 31-bit integer - indicating the number of octets that the sender can transmit in addition to the existing - flow control window. The legal range for the increment to the flow control window is 1 to - 231-1 (0x7fffffff) octets. - - - The WINDOW_UPDATE frame does not define any flags. - - - The WINDOW_UPDATE frame can be specific to a stream or to the entire connection. In the - former case, the frame's stream identifier indicates the affected stream; in the latter, - the value "0" indicates that the entire connection is the subject of the frame. - - - A receiver MUST treat the receipt of a WINDOW_UPDATE frame with an flow control window - increment of 0 as a stream error of type - PROTOCOL_ERROR; errors on the connection flow control window MUST be - treated as a connection error. - - - WINDOW_UPDATE can be sent by a peer that has sent a frame bearing the END_STREAM flag. - This means that a receiver could receive a WINDOW_UPDATE frame on a "half closed (remote)" - or "closed" stream. A receiver MUST NOT treat this as an error, see . - - - A receiver that receives a flow controlled frame MUST always account for its contribution - against the connection flow control window, unless the receiver treats this as a connection error. This is necessary even if the - frame is in error. Since the sender counts the frame toward the flow control window, if - the receiver does not, the flow control window at sender and receiver can become - different. - - -
- - Flow control in HTTP/2 is implemented using a window kept by each sender on every - stream. The flow control window is a simple integer value that indicates how many octets - of data the sender is permitted to transmit; as such, its size is a measure of the - buffering capacity of the receiver. - - - Two flow control windows are applicable: the stream flow control window and the - connection flow control window. The sender MUST NOT send a flow controlled frame with a - length that exceeds the space available in either of the flow control windows advertised - by the receiver. Frames with zero length with the END_STREAM flag set (that is, an - empty DATA frame) MAY be sent if there is no available space in either - flow control window. - - - For flow control calculations, the 9 octet frame header is not counted. - - - After sending a flow controlled frame, the sender reduces the space available in both - windows by the length of the transmitted frame. - - - The receiver of a frame sends a WINDOW_UPDATE frame as it consumes data and frees up - space in flow control windows. Separate WINDOW_UPDATE frames are sent for the stream - and connection level flow control windows. - - - A sender that receives a WINDOW_UPDATE frame updates the corresponding window by the - amount specified in the frame. - - - A sender MUST NOT allow a flow control window to exceed 231-1 octets. - If a sender receives a WINDOW_UPDATE that causes a flow control window to exceed this - maximum it MUST terminate either the stream or the connection, as appropriate. For - streams, the sender sends a RST_STREAM with the error code of - FLOW_CONTROL_ERROR code; for the connection, a GOAWAY - frame with a FLOW_CONTROL_ERROR code. - - - Flow controlled frames from the sender and WINDOW_UPDATE frames from the receiver are - completely asynchronous with respect to each other. This property allows a receiver to - aggressively update the window size kept by the sender to prevent streams from stalling. - -
- -
- - When an HTTP/2 connection is first established, new streams are created with an initial - flow control window size of 65,535 octets. The connection flow control window is 65,535 - octets. Both endpoints can adjust the initial window size for new streams by including - a value for SETTINGS_INITIAL_WINDOW_SIZE in the SETTINGS - frame that forms part of the connection preface. The connection flow control window can - only be changed using WINDOW_UPDATE frames. - - - Prior to receiving a SETTINGS frame that sets a value for - SETTINGS_INITIAL_WINDOW_SIZE, an endpoint can only use the default - initial window size when sending flow controlled frames. Similarly, the connection flow - control window is set to the default initial window size until a WINDOW_UPDATE frame is - received. - - - A SETTINGS frame can alter the initial flow control window size for all - current streams. When the value of SETTINGS_INITIAL_WINDOW_SIZE changes, - a receiver MUST adjust the size of all stream flow control windows that it maintains by - the difference between the new value and the old value. - - - A change to SETTINGS_INITIAL_WINDOW_SIZE can cause the available space in - a flow control window to become negative. A sender MUST track the negative flow control - window, and MUST NOT send new flow controlled frames until it receives WINDOW_UPDATE - frames that cause the flow control window to become positive. - - - For example, if the client sends 60KB immediately on connection establishment, and the - server sets the initial window size to be 16KB, the client will recalculate the - available flow control window to be -44KB on receipt of the SETTINGS - frame. The client retains a negative flow control window until WINDOW_UPDATE frames - restore the window to being positive, after which the client can resume sending. - - - A SETTINGS frame cannot alter the connection flow control window. - - - An endpoint MUST treat a change to SETTINGS_INITIAL_WINDOW_SIZE that - causes any flow control window to exceed the maximum size as a connection error of type - FLOW_CONTROL_ERROR. - -
- -
- - A receiver that wishes to use a smaller flow control window than the current size can - send a new SETTINGS frame. However, the receiver MUST be prepared to - receive data that exceeds this window size, since the sender might send data that - exceeds the lower limit prior to processing the SETTINGS frame. - - - After sending a SETTINGS frame that reduces the initial flow control window size, a - receiver has two options for handling streams that exceed flow control limits: - - - The receiver can immediately send RST_STREAM with - FLOW_CONTROL_ERROR error code for the affected streams. - - - The receiver can accept the streams and tolerate the resulting head of line - blocking, sending WINDOW_UPDATE frames as it consumes data. - - - -
-
- -
- - The CONTINUATION frame (type=0x9) is used to continue a sequence of header block fragments. Any number of CONTINUATION frames can - be sent on an existing stream, as long as the preceding frame is on the same stream and is - a HEADERS, PUSH_PROMISE or CONTINUATION frame without the - END_HEADERS flag set. - - -
- -
- - The CONTINUATION frame payload contains a header block - fragment. - - - - The CONTINUATION frame defines the following flag: - - - - Bit 3 being set indicates that this frame ends a header - block. - - - If the END_HEADERS bit is not set, this frame MUST be followed by another - CONTINUATION frame. A receiver MUST treat the receipt of any other type of frame or - a frame on a different stream as a connection - error of type PROTOCOL_ERROR. - - - - - - - The CONTINUATION frame changes the connection state as defined in . - - - - CONTINUATION frames MUST be associated with a stream. If a CONTINUATION frame is received - whose stream identifier field is 0x0, the recipient MUST respond with a connection error of type PROTOCOL_ERROR. - - - - A CONTINUATION frame MUST be preceded by a HEADERS, - PUSH_PROMISE or CONTINUATION frame without the END_HEADERS flag set. A - recipient that observes violation of this rule MUST respond with a connection error of type - PROTOCOL_ERROR. - -
-
- -
- - Error codes are 32-bit fields that are used in RST_STREAM and - GOAWAY frames to convey the reasons for the stream or connection error. - - - - Error codes share a common code space. Some error codes apply only to either streams or the - entire connection and have no defined semantics in the other context. - - - - The following error codes are defined: - - - The associated condition is not as a result of an error. For example, a - GOAWAY might include this code to indicate graceful shutdown of a - connection. - - - The endpoint detected an unspecific protocol error. This error is for use when a more - specific error code is not available. - - - The endpoint encountered an unexpected internal error. - - - The endpoint detected that its peer violated the flow control protocol. - - - The endpoint sent a SETTINGS frame, but did not receive a response in a - timely manner. See Settings Synchronization. - - - The endpoint received a frame after a stream was half closed. - - - The endpoint received a frame with an invalid size. - - - The endpoint refuses the stream prior to performing any application processing, see - for details. - - - Used by the endpoint to indicate that the stream is no longer needed. - - - The endpoint is unable to maintain the header compression context for the connection. - - - The connection established in response to a CONNECT - request was reset or abnormally closed. - - - The endpoint detected that its peer is exhibiting a behavior that might be generating - excessive load. - - - The underlying transport has properties that do not meet minimum security - requirements (see ). - - - - - Unknown or unsupported error codes MUST NOT trigger any special behavior. These MAY be - treated by an implementation as being equivalent to INTERNAL_ERROR. - -
- -
- - HTTP/2 is intended to be as compatible as possible with current uses of HTTP. This means - that, from the application perspective, the features of the protocol are largely - unchanged. To achieve this, all request and response semantics are preserved, although the - syntax of conveying those semantics has changed. - - - Thus, the specification and requirements of HTTP/1.1 Semantics and Content , Conditional Requests , Range Requests , Caching and Authentication are applicable to HTTP/2. Selected portions of HTTP/1.1 Message Syntax - and Routing , such as the HTTP and HTTPS URI schemes, are also - applicable in HTTP/2, but the expression of those semantics for this protocol are defined - in the sections below. - - -
- - A client sends an HTTP request on a new stream, using a previously unused stream identifier. A server sends an HTTP response on - the same stream as the request. - - - An HTTP message (request or response) consists of: - - - for a response only, zero or more HEADERS frames (each followed by zero - or more CONTINUATION frames) containing the message headers of - informational (1xx) HTTP responses (see and ), - and - - - one HEADERS frame (followed by zero or more CONTINUATION - frames) containing the message headers (see ), and - - - zero or more DATA frames containing the message payload (see ), and - - - optionally, one HEADERS frame, followed by zero or more - CONTINUATION frames containing the trailer-part, if present (see ). - - - The last frame in the sequence bears an END_STREAM flag, noting that a - HEADERS frame bearing the END_STREAM flag can be followed by - CONTINUATION frames that carry any remaining portions of the header block. - - - Other frames (from any stream) MUST NOT occur between either HEADERS frame - and any CONTINUATION frames that might follow. - - - - Trailing header fields are carried in a header block that also terminates the stream. - That is, a sequence starting with a HEADERS frame, followed by zero or more - CONTINUATION frames, where the HEADERS frame bears an - END_STREAM flag. Header blocks after the first that do not terminate the stream are not - part of an HTTP request or response. - - - A HEADERS frame (and associated CONTINUATION frames) can - only appear at the start or end of a stream. An endpoint that receives a - HEADERS frame without the END_STREAM flag set after receiving a final - (non-informational) status code MUST treat the corresponding request or response as malformed. - - - - An HTTP request/response exchange fully consumes a single stream. A request starts with - the HEADERS frame that puts the stream into an "open" state. The request - ends with a frame bearing END_STREAM, which causes the stream to become "half closed - (local)" for the client and "half closed (remote)" for the server. A response starts with - a HEADERS frame and ends with a frame bearing END_STREAM, which places the - stream in the "closed" state. - - - -
- - HTTP/2 removes support for the 101 (Switching Protocols) informational status code - (). - - - The semantics of 101 (Switching Protocols) aren't applicable to a multiplexed protocol. - Alternative protocols are able to use the same mechanisms that HTTP/2 uses to negotiate - their use (see ). - -
- -
- - HTTP header fields carry information as a series of key-value pairs. For a listing of - registered HTTP headers, see the Message Header Field Registry maintained at . - - -
- - While HTTP/1.x used the message start-line (see ) to convey the target URI and method of the request, and the - status code for the response, HTTP/2 uses special pseudo-header fields beginning with - ':' character (ASCII 0x3a) for this purpose. - - - Pseudo-header fields are not HTTP header fields. Endpoints MUST NOT generate - pseudo-header fields other than those defined in this document. - - - Pseudo-header fields are only valid in the context in which they are defined. - Pseudo-header fields defined for requests MUST NOT appear in responses; pseudo-header - fields defined for responses MUST NOT appear in requests. Pseudo-header fields MUST - NOT appear in trailers. Endpoints MUST treat a request or response that contains - undefined or invalid pseudo-header fields as malformed. - - - Just as in HTTP/1.x, header field names are strings of ASCII characters that are - compared in a case-insensitive fashion. However, header field names MUST be converted - to lowercase prior to their encoding in HTTP/2. A request or response containing - uppercase header field names MUST be treated as malformed. - - - All pseudo-header fields MUST appear in the header block before regular header fields. - Any request or response that contains a pseudo-header field that appears in a header - block after a regular header field MUST be treated as malformed. - -
- -
- - HTTP/2 does not use the Connection header field to - indicate connection-specific header fields; in this protocol, connection-specific - metadata is conveyed by other means. An endpoint MUST NOT generate a HTTP/2 message - containing connection-specific header fields; any message containing - connection-specific header fields MUST be treated as malformed. - - - This means that an intermediary transforming an HTTP/1.x message to HTTP/2 will need - to remove any header fields nominated by the Connection header field, along with the - Connection header field itself. Such intermediaries SHOULD also remove other - connection-specific header fields, such as Keep-Alive, Proxy-Connection, - Transfer-Encoding and Upgrade, even if they are not nominated by Connection. - - - One exception to this is the TE header field, which MAY be present in an HTTP/2 - request, but when it is MUST NOT contain any value other than "trailers". - - - - - HTTP/2 purposefully does not support upgrade to another protocol. The handshake - methods described in are believed sufficient to - negotiate the use of alternative protocols. - - - -
- -
- - The following pseudo-header fields are defined for HTTP/2 requests: - - - - The :method pseudo-header field includes the HTTP - method (). - - - - - The :scheme pseudo-header field includes the scheme - portion of the target URI (). - - - :scheme is not restricted to http and https schemed URIs. A - proxy or gateway can translate requests for non-HTTP schemes, enabling the use - of HTTP to interact with non-HTTP services. - - - - - The :authority pseudo-header field includes the - authority portion of the target URI (). The authority MUST NOT include the deprecated userinfo subcomponent for http - or https schemed URIs. - - - To ensure that the HTTP/1.1 request line can be reproduced accurately, this - pseudo-header field MUST be omitted when translating from an HTTP/1.1 request - that has a request target in origin or asterisk form (see ). Clients that generate - HTTP/2 requests directly SHOULD use the :authority pseudo-header - field instead of the Host header field. An - intermediary that converts an HTTP/2 request to HTTP/1.1 MUST create a Host header field if one is not present in a request by - copying the value of the :authority pseudo-header - field. - - - - - The :path pseudo-header field includes the path and - query parts of the target URI (the path-absolute - production from and optionally a '?' character - followed by the query production, see and ). A request in asterisk form includes the value '*' for the - :path pseudo-header field. - - - This pseudo-header field MUST NOT be empty for http - or https URIs; http or - https URIs that do not contain a path component - MUST include a value of '/'. The exception to this rule is an OPTIONS request - for an http or https - URI that does not include a path component; these MUST include a :path pseudo-header field with a value of '*' (see ). - - - - - - All HTTP/2 requests MUST include exactly one valid value for the :method, :scheme, and :path pseudo-header fields, unless it is a CONNECT request. An HTTP request that omits mandatory - pseudo-header fields is malformed. - - - HTTP/2 does not define a way to carry the version identifier that is included in the - HTTP/1.1 request line. - -
- -
- - For HTTP/2 responses, a single :status pseudo-header - field is defined that carries the HTTP status code field (see ). This pseudo-header field MUST be included in all - responses, otherwise the response is malformed. - - - HTTP/2 does not define a way to carry the version or reason phrase that is included in - an HTTP/1.1 status line. - -
- -
- - The Cookie header field can carry a significant amount of - redundant data. - - - The Cookie header field uses a semi-colon (";") to delimit cookie-pairs (or "crumbs"). - This header field doesn't follow the list construction rules in HTTP (see ), which prevents cookie-pairs from - being separated into different name-value pairs. This can significantly reduce - compression efficiency as individual cookie-pairs are updated. - - - To allow for better compression efficiency, the Cookie header field MAY be split into - separate header fields, each with one or more cookie-pairs. If there are multiple - Cookie header fields after decompression, these MUST be concatenated into a single - octet string using the two octet delimiter of 0x3B, 0x20 (the ASCII string "; ") - before being passed into a non-HTTP/2 context, such as an HTTP/1.1 connection, or a - generic HTTP server application. - -
- - Therefore, the following two lists of Cookie header fields are semantically - equivalent. - - -
-
- -
- - A malformed request or response is one that is an otherwise valid sequence of HTTP/2 - frames, but is otherwise invalid due to the presence of extraneous frames, prohibited - header fields, the absence of mandatory header fields, or the inclusion of uppercase - header field names. - - - A request or response that includes an entity body can include a content-length header field. A request or response is also - malformed if the value of a content-length header field - does not equal the sum of the DATA frame payload lengths that form the - body. A response that is defined to have no payload, as described in , can have a non-zero - content-length header field, even though no content is - included in DATA frames. - - - Intermediaries that process HTTP requests or responses (i.e., any intermediary not - acting as a tunnel) MUST NOT forward a malformed request or response. Malformed - requests or responses that are detected MUST be treated as a stream error of type PROTOCOL_ERROR. - - - For malformed requests, a server MAY send an HTTP response prior to closing or - resetting the stream. Clients MUST NOT accept a malformed response. Note that these - requirements are intended to protect against several types of common attacks against - HTTP; they are deliberately strict, because being permissive can expose - implementations to these vulnerabilities. - -
-
- -
- - This section shows HTTP/1.1 requests and responses, with illustrations of equivalent - HTTP/2 requests and responses. - - - An HTTP GET request includes request header fields and no body and is therefore - transmitted as a single HEADERS frame, followed by zero or more - CONTINUATION frames containing the serialized block of request header - fields. The HEADERS frame in the following has both the END_HEADERS and - END_STREAM flags set; no CONTINUATION frames are sent: - - -
- + END_STREAM - Accept: image/jpeg + END_HEADERS - :method = GET - :scheme = https - :path = /resource - host = example.org - accept = image/jpeg -]]> -
- - - Similarly, a response that includes only response header fields is transmitted as a - HEADERS frame (again, followed by zero or more - CONTINUATION frames) containing the serialized block of response header - fields. - - -
- + END_STREAM - Expires: Thu, 23 Jan ... + END_HEADERS - :status = 304 - etag = "xyzzy" - expires = Thu, 23 Jan ... -]]> -
- - - An HTTP POST request that includes request header fields and payload data is transmitted - as one HEADERS frame, followed by zero or more - CONTINUATION frames containing the request header fields, followed by one - or more DATA frames, with the last CONTINUATION (or - HEADERS) frame having the END_HEADERS flag set and the final - DATA frame having the END_STREAM flag set: - - -
- - END_STREAM - Content-Type: image/jpeg - END_HEADERS - Content-Length: 123 :method = POST - :path = /resource - {binary data} :scheme = https - - CONTINUATION - + END_HEADERS - content-type = image/jpeg - host = example.org - content-length = 123 - - DATA - + END_STREAM - {binary data} -]]> - - Note that data contributing to any given header field could be spread between header - block fragments. The allocation of header fields to frames in this example is - illustrative only. - -
- - - A response that includes header fields and payload data is transmitted as a - HEADERS frame, followed by zero or more CONTINUATION - frames, followed by one or more DATA frames, with the last - DATA frame in the sequence having the END_STREAM flag set: - - -
- - END_STREAM - Content-Length: 123 + END_HEADERS - :status = 200 - {binary data} content-type = image/jpeg - content-length = 123 - - DATA - + END_STREAM - {binary data} -]]> -
- - - Trailing header fields are sent as a header block after both the request or response - header block and all the DATA frames have been sent. The - HEADERS frame starting the trailers header block has the END_STREAM flag - set. - - -
- - END_STREAM - Transfer-Encoding: chunked + END_HEADERS - Trailer: Foo :status = 200 - content-length = 123 - 123 content-type = image/jpeg - {binary data} trailer = Foo - 0 - Foo: bar DATA - - END_STREAM - {binary data} - - HEADERS - + END_STREAM - + END_HEADERS - foo = bar -]]> -
- - -
- - An informational response using a 1xx status code other than 101 is transmitted as a - HEADERS frame, followed by zero or more CONTINUATION - frames: - - - END_STREAM - + END_HEADERS - :status = 103 - extension-field = bar -]]> -
-
- -
- - In HTTP/1.1, an HTTP client is unable to retry a non-idempotent request when an error - occurs, because there is no means to determine the nature of the error. It is possible - that some server processing occurred prior to the error, which could result in - undesirable effects if the request were reattempted. - - - HTTP/2 provides two mechanisms for providing a guarantee to a client that a request has - not been processed: - - - The GOAWAY frame indicates the highest stream number that might have - been processed. Requests on streams with higher numbers are therefore guaranteed to - be safe to retry. - - - The REFUSED_STREAM error code can be included in a - RST_STREAM frame to indicate that the stream is being closed prior to - any processing having occurred. Any request that was sent on the reset stream can - be safely retried. - - - - - Requests that have not been processed have not failed; clients MAY automatically retry - them, even those with non-idempotent methods. - - - A server MUST NOT indicate that a stream has not been processed unless it can guarantee - that fact. If frames that are on a stream are passed to the application layer for any - stream, then REFUSED_STREAM MUST NOT be used for that stream, and a - GOAWAY frame MUST include a stream identifier that is greater than or - equal to the given stream identifier. - - - In addition to these mechanisms, the PING frame provides a way for a - client to easily test a connection. Connections that remain idle can become broken as - some middleboxes (for instance, network address translators, or load balancers) silently - discard connection bindings. The PING frame allows a client to safely - test whether a connection is still active without sending a request. - -
-
- -
- - HTTP/2 allows a server to pre-emptively send (or "push") responses (along with - corresponding "promised" requests) to a client in association with a previous - client-initiated request. This can be useful when the server knows the client will need - to have those responses available in order to fully process the response to the original - request. - - - - Pushing additional message exchanges in this fashion is optional, and is negotiated - between individual endpoints. The SETTINGS_ENABLE_PUSH setting can be set - to 0 to indicate that server push is disabled. - - - Promised requests MUST be cacheable (see ), MUST be safe (see ) and MUST NOT include a request body. Clients that receive a - promised request that is not cacheable, unsafe or that includes a request body MUST - reset the stream with a stream error of type - PROTOCOL_ERROR. - - - Pushed responses that are cacheable (see ) can be stored by the client, if it implements a HTTP - cache. Pushed responses are considered successfully validated on the origin server (e.g., - if the "no-cache" cache response directive is present) while the stream identified by the - promised stream ID is still open. - - - Pushed responses that are not cacheable MUST NOT be stored by any HTTP cache. They MAY - be made available to the application separately. - - - An intermediary can receive pushes from the server and choose not to forward them on to - the client. In other words, how to make use of the pushed information is up to that - intermediary. Equally, the intermediary might choose to make additional pushes to the - client, without any action taken by the server. - - - A client cannot push. Thus, servers MUST treat the receipt of a - PUSH_PROMISE frame as a connection - error of type PROTOCOL_ERROR. Clients MUST reject any attempt to - change the SETTINGS_ENABLE_PUSH setting to a value other than 0 by treating - the message as a connection error of type - PROTOCOL_ERROR. - - -
- - Server push is semantically equivalent to a server responding to a request; however, in - this case that request is also sent by the server, as a PUSH_PROMISE - frame. - - - The PUSH_PROMISE frame includes a header block that contains a complete - set of request header fields that the server attributes to the request. It is not - possible to push a response to a request that includes a request body. - - - - Pushed responses are always associated with an explicit request from the client. The - PUSH_PROMISE frames sent by the server are sent on that explicit - request's stream. The PUSH_PROMISE frame also includes a promised stream - identifier, chosen from the stream identifiers available to the server (see ). - - - - The header fields in PUSH_PROMISE and any subsequent - CONTINUATION frames MUST be a valid and complete set of request header fields. The server MUST include a method in - the :method header field that is safe and cacheable. If a - client receives a PUSH_PROMISE that does not include a complete and valid - set of header fields, or the :method header field identifies - a method that is not safe, it MUST respond with a stream error of type PROTOCOL_ERROR. - - - - The server SHOULD send PUSH_PROMISE () - frames prior to sending any frames that reference the promised responses. This avoids a - race where clients issue requests prior to receiving any PUSH_PROMISE - frames. - - - For example, if the server receives a request for a document containing embedded links - to multiple image files, and the server chooses to push those additional images to the - client, sending push promises before the DATA frames that contain the - image links ensures that the client is able to see the promises before discovering - embedded links. Similarly, if the server pushes responses referenced by the header block - (for instance, in Link header fields), sending the push promises before sending the - header block ensures that clients do not request them. - - - - PUSH_PROMISE frames MUST NOT be sent by the client. - - - PUSH_PROMISE frames can be sent by the server in response to any - client-initiated stream, but the stream MUST be in either the "open" or "half closed - (remote)" state with respect to the server. PUSH_PROMISE frames are - interspersed with the frames that comprise a response, though they cannot be - interspersed with HEADERS and CONTINUATION frames that - comprise a single header block. - - - Sending a PUSH_PROMISE frame creates a new stream and puts the stream - into the “reserved (local)” state for the server and the “reserved (remote)” state for - the client. - -
- -
- - After sending the PUSH_PROMISE frame, the server can begin delivering the - pushed response as a response on a server-initiated - stream that uses the promised stream identifier. The server uses this stream to - transmit an HTTP response, using the same sequence of frames as defined in . This stream becomes "half closed" - to the client after the initial HEADERS frame is sent. - - - - Once a client receives a PUSH_PROMISE frame and chooses to accept the - pushed response, the client SHOULD NOT issue any requests for the promised response - until after the promised stream has closed. - - - - If the client determines, for any reason, that it does not wish to receive the pushed - response from the server, or if the server takes too long to begin sending the promised - response, the client can send an RST_STREAM frame, using either the - CANCEL or REFUSED_STREAM codes, and referencing the pushed - stream's identifier. - - - A client can use the SETTINGS_MAX_CONCURRENT_STREAMS setting to limit the - number of responses that can be concurrently pushed by a server. Advertising a - SETTINGS_MAX_CONCURRENT_STREAMS value of zero disables server push by - preventing the server from creating the necessary streams. This does not prohibit a - server from sending PUSH_PROMISE frames; clients need to reset any - promised streams that are not wanted. - - - - Clients receiving a pushed response MUST validate that either the server is - authoritative (see ), or the proxy that provided the pushed - response is configured for the corresponding request. For example, a server that offers - a certificate for only the example.com DNS-ID or Common Name - is not permitted to push a response for https://www.example.org/doc. - - - The response for a PUSH_PROMISE stream begins with a - HEADERS frame, which immediately puts the stream into the “half closed - (remote)” state for the server and “half closed (local)” state for the client, and ends - with a frame bearing END_STREAM, which places the stream in the "closed" state. - - - The client never sends a frame with the END_STREAM flag for a server push. - - - -
- -
- -
- - In HTTP/1.x, the pseudo-method CONNECT () is used to convert an HTTP connection into a tunnel to a remote host. - CONNECT is primarily used with HTTP proxies to establish a TLS session with an origin - server for the purposes of interacting with https resources. - - - In HTTP/2, the CONNECT method is used to establish a tunnel over a single HTTP/2 stream to - a remote host, for similar purposes. The HTTP header field mapping works as defined in - Request Header Fields, with a few - differences. Specifically: - - - The :method header field is set to CONNECT. - - - The :scheme and :path header - fields MUST be omitted. - - - The :authority header field contains the host and port to - connect to (equivalent to the authority-form of the request-target of CONNECT - requests, see ). - - - - - A proxy that supports CONNECT establishes a TCP connection to - the server identified in the :authority header field. Once - this connection is successfully established, the proxy sends a HEADERS - frame containing a 2xx series status code to the client, as defined in . - - - After the initial HEADERS frame sent by each peer, all subsequent - DATA frames correspond to data sent on the TCP connection. The payload of - any DATA frames sent by the client is transmitted by the proxy to the TCP - server; data received from the TCP server is assembled into DATA frames by - the proxy. Frame types other than DATA or stream management frames - (RST_STREAM, WINDOW_UPDATE, and PRIORITY) - MUST NOT be sent on a connected stream, and MUST be treated as a stream error if received. - - - The TCP connection can be closed by either peer. The END_STREAM flag on a - DATA frame is treated as being equivalent to the TCP FIN bit. A client is - expected to send a DATA frame with the END_STREAM flag set after receiving - a frame bearing the END_STREAM flag. A proxy that receives a DATA frame - with the END_STREAM flag set sends the attached data with the FIN bit set on the last TCP - segment. A proxy that receives a TCP segment with the FIN bit set sends a - DATA frame with the END_STREAM flag set. Note that the final TCP segment - or DATA frame could be empty. - - - A TCP connection error is signaled with RST_STREAM. A proxy treats any - error in the TCP connection, which includes receiving a TCP segment with the RST bit set, - as a stream error of type - CONNECT_ERROR. Correspondingly, a proxy MUST send a TCP segment with the - RST bit set if it detects an error with the stream or the HTTP/2 connection. - -
-
- -
- - This section outlines attributes of the HTTP protocol that improve interoperability, reduce - exposure to known security vulnerabilities, or reduce the potential for implementation - variation. - - -
- - HTTP/2 connections are persistent. For best performance, it is expected clients will not - close connections until it is determined that no further communication with a server is - necessary (for example, when a user navigates away from a particular web page), or until - the server closes the connection. - - - Clients SHOULD NOT open more than one HTTP/2 connection to a given host and port pair, - where host is derived from a URI, a selected alternative - service, or a configured proxy. - - - A client can create additional connections as replacements, either to replace connections - that are near to exhausting the available stream - identifier space, to refresh the keying material for a TLS connection, or to - replace connections that have encountered errors. - - - A client MAY open multiple connections to the same IP address and TCP port using different - Server Name Indication values or to provide different TLS - client certificates, but SHOULD avoid creating multiple connections with the same - configuration. - - - Servers are encouraged to maintain open connections for as long as possible, but are - permitted to terminate idle connections if necessary. When either endpoint chooses to - close the transport-layer TCP connection, the terminating endpoint SHOULD first send a - GOAWAY () frame so that both endpoints can reliably - determine whether previously sent frames have been processed and gracefully complete or - terminate any necessary remaining tasks. - - -
- - Connections that are made to an origin servers, either directly or through a tunnel - created using the CONNECT method MAY be reused for - requests with multiple different URI authority components. A connection can be reused - as long as the origin server is authoritative. For - http resources, this depends on the host having resolved to - the same IP address. - - - For https resources, connection reuse additionally depends - on having a certificate that is valid for the host in the URI. An origin server might - offer a certificate with multiple subjectAltName attributes, - or names with wildcards, one of which is valid for the authority in the URI. For - example, a certificate with a subjectAltName of *.example.com might permit the use of the same connection for - requests to URIs starting with https://a.example.com/ and - https://b.example.com/. - - - In some deployments, reusing a connection for multiple origins can result in requests - being directed to the wrong origin server. For example, TLS termination might be - performed by a middlebox that uses the TLS Server Name Indication - (SNI) extension to select an origin server. This means that it is possible - for clients to send confidential information to servers that might not be the intended - target for the request, even though the server is otherwise authoritative. - - - A server that does not wish clients to reuse connections can indicate that it is not - authoritative for a request by sending a 421 (Misdirected Request) status code in response - to the request (see ). - - - A client that is configured to use a proxy over HTTP/2 directs requests to that proxy - through a single connection. That is, all requests sent via a proxy reuse the - connection to the proxy. - -
- -
- - The 421 (Misdirected Request) status code indicates that the request was directed at a - server that is not able to produce a response. This can be sent by a server that is not - configured to produce responses for the combination of scheme and authority that are - included in the request URI. - - - Clients receiving a 421 (Misdirected Request) response from a server MAY retry the - request - whether the request method is idempotent or not - over a different connection. - This is possible if a connection is reused () or if an alternative - service is selected (). - - - This status code MUST NOT be generated by proxies. - - - A 421 response is cacheable by default; i.e., unless otherwise indicated by the method - definition or explicit cache controls (see ). - -
-
- -
- - Implementations of HTTP/2 MUST support TLS 1.2 for HTTP/2 over - TLS. The general TLS usage guidance in SHOULD be followed, with - some additional restrictions that are specific to HTTP/2. - - - - An implementation of HTTP/2 over TLS MUST use TLS 1.2 or higher with the restrictions on - feature set and cipher suite described in this section. Due to implementation - limitations, it might not be possible to fail TLS negotiation. An endpoint MUST - immediately terminate an HTTP/2 connection that does not meet these minimum requirements - with a connection error of type - INADEQUATE_SECURITY. - - -
- - The TLS implementation MUST support the Server Name Indication - (SNI) extension to TLS. HTTP/2 clients MUST indicate the target domain name when - negotiating TLS. - - - The TLS implementation MUST disable compression. TLS compression can lead to the - exposure of information that would not otherwise be revealed . - Generic compression is unnecessary since HTTP/2 provides compression features that are - more aware of context and therefore likely to be more appropriate for use for - performance, security or other reasons. - - - The TLS implementation MUST disable renegotiation. An endpoint MUST treat a TLS - renegotiation as a connection error of type - PROTOCOL_ERROR. Note that disabling renegotiation can result in - long-lived connections becoming unusable due to limits on the number of messages the - underlying cipher suite can encipher. - - - A client MAY use renegotiation to provide confidentiality protection for client - credentials offered in the handshake, but any renegotiation MUST occur prior to sending - the connection preface. A server SHOULD request a client certificate if it sees a - renegotiation request immediately after establishing a connection. - - - This effectively prevents the use of renegotiation in response to a request for a - specific protected resource. A future specification might provide a way to support this - use case. - -
- -
- - The set of TLS cipher suites that are permitted in HTTP/2 is restricted. HTTP/2 MUST - only be used with cipher suites that have ephemeral key exchange, such as the ephemeral Diffie-Hellman (DHE) or the elliptic curve variant (ECDHE). Ephemeral key exchange MUST - have a minimum size of 2048 bits for DHE or security level of 128 bits for ECDHE. - Clients MUST accept DHE sizes of up to 4096 bits. HTTP MUST NOT be used with cipher - suites that use stream or block ciphers. Authenticated Encryption with Additional Data - (AEAD) modes, such as the Galois Counter Model (GCM) mode for - AES are acceptable. - - - The effect of these restrictions is that TLS 1.2 implementations could have - non-intersecting sets of available cipher suites, since these prevent the use of the - cipher suite that TLS 1.2 makes mandatory. To avoid this problem, implementations of - HTTP/2 that use TLS 1.2 MUST support TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 with P256 . - - - Clients MAY advertise support of cipher suites that are prohibited by the above - restrictions in order to allow for connection to servers that do not support HTTP/2. - This enables a fallback to protocols without these constraints without the additional - latency imposed by using a separate connection for fallback. - -
-
-
- -
-
- - HTTP/2 relies on the HTTP/1.1 definition of authority for determining whether a server is - authoritative in providing a given response, see . This relies on local name resolution for the "http" - URI scheme, and the authenticated server identity for the "https" scheme (see ). - -
- -
- - In a cross-protocol attack, an attacker causes a client to initiate a transaction in one - protocol toward a server that understands a different protocol. An attacker might be able - to cause the transaction to appear as valid transaction in the second protocol. In - combination with the capabilities of the web context, this can be used to interact with - poorly protected servers in private networks. - - - Completing a TLS handshake with an ALPN identifier for HTTP/2 can be considered sufficient - protection against cross protocol attacks. ALPN provides a positive indication that a - server is willing to proceed with HTTP/2, which prevents attacks on other TLS-based - protocols. - - - The encryption in TLS makes it difficult for attackers to control the data which could be - used in a cross-protocol attack on a cleartext protocol. - - - The cleartext version of HTTP/2 has minimal protection against cross-protocol attacks. - The connection preface contains a string that is - designed to confuse HTTP/1.1 servers, but no special protection is offered for other - protocols. A server that is willing to ignore parts of an HTTP/1.1 request containing an - Upgrade header field in addition to the client connection preface could be exposed to a - cross-protocol attack. - -
- -
- - HTTP/2 header field names and values are encoded as sequences of octets with a length - prefix. This enables HTTP/2 to carry any string of octets as the name or value of a - header field. An intermediary that translates HTTP/2 requests or responses into HTTP/1.1 - directly could permit the creation of corrupted HTTP/1.1 messages. An attacker might - exploit this behavior to cause the intermediary to create HTTP/1.1 messages with illegal - header fields, extra header fields, or even new messages that are entirely falsified. - - - Header field names or values that contain characters not permitted by HTTP/1.1, including - carriage return (ASCII 0xd) or line feed (ASCII 0xa) MUST NOT be translated verbatim by an - intermediary, as stipulated in . - - - Translation from HTTP/1.x to HTTP/2 does not produce the same opportunity to an attacker. - Intermediaries that perform translation to HTTP/2 MUST remove any instances of the obs-fold production from header field values. - -
- -
- - Pushed responses do not have an explicit request from the client; the request - is provided by the server in the PUSH_PROMISE frame. - - - Caching responses that are pushed is possible based on the guidance provided by the origin - server in the Cache-Control header field. However, this can cause issues if a single - server hosts more than one tenant. For example, a server might offer multiple users each - a small portion of its URI space. - - - Where multiple tenants share space on the same server, that server MUST ensure that - tenants are not able to push representations of resources that they do not have authority - over. Failure to enforce this would allow a tenant to provide a representation that would - be served out of cache, overriding the actual representation that the authoritative tenant - provides. - - - Pushed responses for which an origin server is not authoritative (see - ) are never cached or used. - -
- -
- - An HTTP/2 connection can demand a greater commitment of resources to operate than a - HTTP/1.1 connection. The use of header compression and flow control depend on a - commitment of resources for storing a greater amount of state. Settings for these - features ensure that memory commitments for these features are strictly bounded. - - - The number of PUSH_PROMISE frames is not constrained in the same fashion. - A client that accepts server push SHOULD limit the number of streams it allows to be in - the "reserved (remote)" state. Excessive number of server push streams can be treated as - a stream error of type - ENHANCE_YOUR_CALM. - - - Processing capacity cannot be guarded as effectively as state capacity. - - - The SETTINGS frame can be abused to cause a peer to expend additional - processing time. This might be done by pointlessly changing SETTINGS parameters, setting - multiple undefined parameters, or changing the same setting multiple times in the same - frame. WINDOW_UPDATE or PRIORITY frames can be abused to - cause an unnecessary waste of resources. - - - Large numbers of small or empty frames can be abused to cause a peer to expend time - processing frame headers. Note however that some uses are entirely legitimate, such as - the sending of an empty DATA frame to end a stream. - - - Header compression also offers some opportunities to waste processing resources; see for more details on potential abuses. - - - Limits in SETTINGS parameters cannot be reduced instantaneously, which - leaves an endpoint exposed to behavior from a peer that could exceed the new limits. In - particular, immediately after establishing a connection, limits set by a server are not - known to clients and could be exceeded without being an obvious protocol violation. - - - All these features - i.e., SETTINGS changes, small frames, header - compression - have legitimate uses. These features become a burden only when they are - used unnecessarily or to excess. - - - An endpoint that doesn't monitor this behavior exposes itself to a risk of denial of - service attack. Implementations SHOULD track the use of these features and set limits on - their use. An endpoint MAY treat activity that is suspicious as a connection error of type - ENHANCE_YOUR_CALM. - - -
- - A large header block can cause an implementation to - commit a large amount of state. Header fields that are critical for routing can appear - toward the end of a header block, which prevents streaming of header fields to their - ultimate destination. For this an other reasons, such as ensuring cache correctness, - means that an endpoint might need to buffer the entire header block. Since there is no - hard limit to the size of a header block, some endpoints could be forced commit a large - amount of available memory for header fields. - - - An endpoint can use the SETTINGS_MAX_HEADER_LIST_SIZE to advise peers of - limits that might apply on the size of header blocks. This setting is only advisory, so - endpoints MAY choose to send header blocks that exceed this limit and risk having the - request or response being treated as malformed. This setting specific to a connection, - so any request or response could encounter a hop with a lower, unknown limit. An - intermediary can attempt to avoid this problem by passing on values presented by - different peers, but they are not obligated to do so. - - - A server that receives a larger header block than it is willing to handle can send an - HTTP 431 (Request Header Fields Too Large) status code . A - client can discard responses that it cannot process. The header block MUST be processed - to ensure a consistent connection state, unless the connection is closed. - -
-
- -
- - HTTP/2 enables greater use of compression for both header fields () and entity bodies. Compression can allow an attacker to recover - secret data when it is compressed in the same context as data under attacker control. - - - There are demonstrable attacks on compression that exploit the characteristics of the web - (e.g., ). The attacker induces multiple requests containing - varying plaintext, observing the length of the resulting ciphertext in each, which - reveals a shorter length when a guess about the secret is correct. - - - Implementations communicating on a secure channel MUST NOT compress content that includes - both confidential and attacker-controlled data unless separate compression dictionaries - are used for each source of data. Compression MUST NOT be used if the source of data - cannot be reliably determined. Generic stream compression, such as that provided by TLS - MUST NOT be used with HTTP/2 (). - - - Further considerations regarding the compression of header fields are described in . - -
- -
- - Padding within HTTP/2 is not intended as a replacement for general purpose padding, such - as might be provided by TLS. Redundant padding could even be - counterproductive. Correct application can depend on having specific knowledge of the - data that is being padded. - - - To mitigate attacks that rely on compression, disabling or limiting compression might be - preferable to padding as a countermeasure. - - - Padding can be used to obscure the exact size of frame content, and is provided to - mitigate specific attacks within HTTP. For example, attacks where compressed content - includes both attacker-controlled plaintext and secret data (see for example, ). - - - Use of padding can result in less protection than might seem immediately obvious. At - best, padding only makes it more difficult for an attacker to infer length information by - increasing the number of frames an attacker has to observe. Incorrectly implemented - padding schemes can be easily defeated. In particular, randomized padding with a - predictable distribution provides very little protection; similarly, padding payloads to a - fixed size exposes information as payload sizes cross the fixed size boundary, which could - be possible if an attacker can control plaintext. - - - Intermediaries SHOULD retain padding for DATA frames, but MAY drop padding - for HEADERS and PUSH_PROMISE frames. A valid reason for an - intermediary to change the amount of padding of frames is to improve the protections that - padding provides. - -
- -
- - Several characteristics of HTTP/2 provide an observer an opportunity to correlate actions - of a single client or server over time. This includes the value of settings, the manner - in which flow control windows are managed, the way priorities are allocated to streams, - timing of reactions to stimulus, and handling of any optional features. - - - As far as this creates observable differences in behavior, they could be used as a basis - for fingerprinting a specific client, as defined in . - -
-
- -
- - A string for identifying HTTP/2 is entered into the "Application Layer Protocol Negotiation - (ALPN) Protocol IDs" registry established in . - - - This document establishes a registry for frame types, settings, and error codes. These new - registries are entered into a new "Hypertext Transfer Protocol (HTTP) 2 Parameters" section. - - - This document registers the HTTP2-Settings header field for - use in HTTP; and the 421 (Misdirected Request) status code. - - - This document registers the PRI method for use in HTTP, to avoid - collisions with the connection preface. - - -
- - This document creates two registrations for the identification of HTTP/2 in the - "Application Layer Protocol Negotiation (ALPN) Protocol IDs" registry established in . - - - The "h2" string identifies HTTP/2 when used over TLS: - - HTTP/2 over TLS - 0x68 0x32 ("h2") - This document - - - - The "h2c" string identifies HTTP/2 when used over cleartext TCP: - - HTTP/2 over TCP - 0x68 0x32 0x63 ("h2c") - This document - - -
- -
- - This document establishes a registry for HTTP/2 frame type codes. The "HTTP/2 Frame - Type" registry manages an 8-bit space. The "HTTP/2 Frame Type" registry operates under - either of the "IETF Review" or "IESG Approval" policies for - values between 0x00 and 0xef, with values between 0xf0 and 0xff being reserved for - experimental use. - - - New entries in this registry require the following information: - - - A name or label for the frame type. - - - The 8-bit code assigned to the frame type. - - - A reference to a specification that includes a description of the frame layout, - it's semantics and flags that the frame type uses, including any parts of the frame - that are conditionally present based on the value of flags. - - - - - The entries in the following table are registered by this document. - - - Frame Type - Code - Section - DATA0x0 - HEADERS0x1 - PRIORITY0x2 - RST_STREAM0x3 - SETTINGS0x4 - PUSH_PROMISE0x5 - PING0x6 - GOAWAY0x7 - WINDOW_UPDATE0x8 - CONTINUATION0x9 - -
- -
- - This document establishes a registry for HTTP/2 settings. The "HTTP/2 Settings" registry - manages a 16-bit space. The "HTTP/2 Settings" registry operates under the "Expert Review" policy for values in the range from 0x0000 to - 0xefff, with values between and 0xf000 and 0xffff being reserved for experimental use. - - - New registrations are advised to provide the following information: - - - A symbolic name for the setting. Specifying a setting name is optional. - - - The 16-bit code assigned to the setting. - - - An initial value for the setting. - - - An optional reference to a specification that describes the use of the setting. - - - - - An initial set of setting registrations can be found in . - - - Name - Code - Initial Value - Specification - HEADER_TABLE_SIZE - 0x14096 - ENABLE_PUSH - 0x21 - MAX_CONCURRENT_STREAMS - 0x3(infinite) - INITIAL_WINDOW_SIZE - 0x465535 - MAX_FRAME_SIZE - 0x516384 - MAX_HEADER_LIST_SIZE - 0x6(infinite) - - -
- -
- - This document establishes a registry for HTTP/2 error codes. The "HTTP/2 Error Code" - registry manages a 32-bit space. The "HTTP/2 Error Code" registry operates under the - "Expert Review" policy. - - - Registrations for error codes are required to include a description of the error code. An - expert reviewer is advised to examine new registrations for possible duplication with - existing error codes. Use of existing registrations is to be encouraged, but not - mandated. - - - New registrations are advised to provide the following information: - - - A name for the error code. Specifying an error code name is optional. - - - The 32-bit error code value. - - - A brief description of the error code semantics, longer if no detailed specification - is provided. - - - An optional reference for a specification that defines the error code. - - - - - The entries in the following table are registered by this document. - - - Name - Code - Description - Specification - NO_ERROR0x0 - Graceful shutdown - - PROTOCOL_ERROR0x1 - Protocol error detected - - INTERNAL_ERROR0x2 - Implementation fault - - FLOW_CONTROL_ERROR0x3 - Flow control limits exceeded - - SETTINGS_TIMEOUT0x4 - Settings not acknowledged - - STREAM_CLOSED0x5 - Frame received for closed stream - - FRAME_SIZE_ERROR0x6 - Frame size incorrect - - REFUSED_STREAM0x7 - Stream not processed - - CANCEL0x8 - Stream cancelled - - COMPRESSION_ERROR0x9 - Compression state not updated - - CONNECT_ERROR0xa - TCP connection error for CONNECT method - - ENHANCE_YOUR_CALM0xb - Processing capacity exceeded - - INADEQUATE_SECURITY0xc - Negotiated TLS parameters not acceptable - - - -
- -
- - This section registers the HTTP2-Settings header field in the - Permanent Message Header Field Registry. - - - HTTP2-Settings - - - http - - - standard - - - IETF - - - of this document - - - This header field is only used by an HTTP/2 client for Upgrade-based negotiation. - - - -
- -
- - This section registers the PRI method in the HTTP Method - Registry (). - - - PRI - - - No - - - No - - - of this document - - - This method is never used by an actual client. This method will appear to be used - when an HTTP/1.1 server or intermediary attempts to parse an HTTP/2 connection - preface. - - - -
- -
- - This document registers the 421 (Misdirected Request) HTTP Status code in the Hypertext - Transfer Protocol (HTTP) Status Code Registry (). - - - - - 421 - - - Misdirected Request - - - of this document - - - -
- -
- -
- - This document includes substantial input from the following individuals: - - - Adam Langley, Wan-Teh Chang, Jim Morrison, Mark Nottingham, Alyssa Wilk, Costin - Manolache, William Chan, Vitaliy Lvin, Joe Chan, Adam Barth, Ryan Hamilton, Gavin - Peters, Kent Alstad, Kevin Lindsay, Paul Amer, Fan Yang, Jonathan Leighton (SPDY - contributors). - - - Gabriel Montenegro and Willy Tarreau (Upgrade mechanism). - - - William Chan, Salvatore Loreto, Osama Mazahir, Gabriel Montenegro, Jitu Padhye, Roberto - Peon, Rob Trace (Flow control). - - - Mike Bishop (Extensibility). - - - Mark Nottingham, Julian Reschke, James Snell, Jeff Pinner, Mike Bishop, Herve Ruellan - (Substantial editorial contributions). - - - Kari Hurtta, Tatsuhiro Tsujikawa, Greg Wilkins, Poul-Henning Kamp. - - - Alexey Melnikov was an editor of this document during 2013. - - - A substantial proportion of Martin's contribution was supported by Microsoft during his - employment there. - - - -
-
- - - - - - HPACK - Header Compression for HTTP/2 - - - - - - - - - - - - Transmission Control Protocol - - - University of Southern California (USC)/Information Sciences - Institute - - - - - - - - - - - Key words for use in RFCs to Indicate Requirement Levels - - - Harvard University -
sob@harvard.edu
-
- -
- - -
- - - - - HTTP Over TLS - - - - - - - - - - Uniform Resource Identifier (URI): Generic - Syntax - - - - - - - - - - - - The Base16, Base32, and Base64 Data Encodings - - - - - - - - - Guidelines for Writing an IANA Considerations Section in RFCs - - - - - - - - - - - Augmented BNF for Syntax Specifications: ABNF - - - - - - - - - - - The Transport Layer Security (TLS) Protocol Version 1.2 - - - - - - - - - - - Transport Layer Security (TLS) Extensions: Extension Definitions - - - - - - - - - - Transport Layer Security (TLS) Application-Layer Protocol Negotiation Extension - - - - - - - - - - - - - TLS Elliptic Curve Cipher Suites with SHA-256/384 and AES Galois - Counter Mode (GCM) - - - - - - - - - - - Digital Signature Standard (DSS) - - NIST - - - - - - - - - Hypertext Transfer Protocol (HTTP/1.1): Message Syntax and Routing - - Adobe Systems Incorporated -
fielding@gbiv.com
-
- - greenbytes GmbH -
julian.reschke@greenbytes.de
-
- -
- - -
- - - - Hypertext Transfer Protocol (HTTP/1.1): Semantics and Content - - Adobe Systems Incorporated -
fielding@gbiv.com
-
- - greenbytes GmbH -
julian.reschke@greenbytes.de
-
- -
- - -
- - - Hypertext Transfer Protocol (HTTP/1.1): Conditional Requests - - Adobe Systems Incorporated -
fielding@gbiv.com
-
- - greenbytes GmbH -
julian.reschke@greenbytes.de
-
- -
- -
- - - Hypertext Transfer Protocol (HTTP/1.1): Range Requests - - Adobe Systems Incorporated -
fielding@gbiv.com
-
- - World Wide Web Consortium -
ylafon@w3.org
-
- - greenbytes GmbH -
julian.reschke@greenbytes.de
-
- -
- -
- - - Hypertext Transfer Protocol (HTTP/1.1): Caching - - Adobe Systems Incorporated -
fielding@gbiv.com
-
- - Akamai -
mnot@mnot.net
-
- - greenbytes GmbH -
julian.reschke@greenbytes.de
-
- -
- - -
- - - Hypertext Transfer Protocol (HTTP/1.1): Authentication - - Adobe Systems Incorporated -
fielding@gbiv.com
-
- - greenbytes GmbH -
julian.reschke@greenbytes.de
-
- -
- - -
- - - - HTTP State Management Mechanism - - - - - -
- - - - - - TCP Extensions for High Performance - - - - - - - - - - - - Transport Layer Security Protocol Compression Methods - - - - - - - - - Additional HTTP Status Codes - - - - - - - - - - - Elliptic Curve Cryptography (ECC) Cipher Suites for Transport Layer Security (TLS) - - - - - - - - - - - - - - - AES Galois Counter Mode (GCM) Cipher Suites for TLS - - - - - - - - - - - - HTML5 - - - - - - - - - - - Latest version available at - . - - - - - - - Talking to Yourself for Fun and Profit - - - - - - - - - - - - - - BREACH: Reviving the CRIME Attack - - - - - - - - - - - Registration Procedures for Message Header Fields - - Nine by Nine -
GK-IETF@ninebynine.org
-
- - BEA Systems -
mnot@pobox.com
-
- - HP Labs -
JeffMogul@acm.org
-
- -
- - -
- - - - Recommendations for Secure Use of TLS and DTLS - - - - - - - - - - - - - - - - - - HTTP Alternative Services - - - Akamai - - - Mozilla - - - greenbytes - - - - - - -
- -
- - This section is to be removed by RFC Editor before publication. - - -
- - Renamed Not Authoritative status code to Misdirected Request. - -
- -
- - Pseudo-header fields are now required to appear strictly before regular ones. - - - Restored 1xx series status codes, except 101. - - - Changed frame length field 24-bits. Expanded frame header to 9 octets. Added a setting - to limit the damage. - - - Added a setting to advise peers of header set size limits. - - - Removed segments. - - - Made non-semantic-bearing HEADERS frames illegal in the HTTP mapping. - -
- -
- - Restored extensibility options. - - - Restricting TLS cipher suites to AEAD only. - - - Removing Content-Encoding requirements. - - - Permitting the use of PRIORITY after stream close. - - - Removed ALTSVC frame. - - - Removed BLOCKED frame. - - - Reducing the maximum padding size to 256 octets; removing padding from - CONTINUATION frames. - - - Removed per-frame GZIP compression. - -
- -
- - Added BLOCKED frame (at risk). - - - Simplified priority scheme. - - - Added DATA per-frame GZIP compression. - -
- -
- - Changed "connection header" to "connection preface" to avoid confusion. - - - Added dependency-based stream prioritization. - - - Added "h2c" identifier to distinguish between cleartext and secured HTTP/2. - - - Adding missing padding to PUSH_PROMISE. - - - Integrate ALTSVC frame and supporting text. - - - Dropping requirement on "deflate" Content-Encoding. - - - Improving security considerations around use of compression. - -
- -
- - Adding padding for data frames. - - - Renumbering frame types, error codes, and settings. - - - Adding INADEQUATE_SECURITY error code. - - - Updating TLS usage requirements to 1.2; forbidding TLS compression. - - - Removing extensibility for frames and settings. - - - Changing setting identifier size. - - - Removing the ability to disable flow control. - - - Changing the protocol identification token to "h2". - - - Changing the use of :authority to make it optional and to allow userinfo in non-HTTP - cases. - - - Allowing split on 0x0 for Cookie. - - - Reserved PRI method in HTTP/1.1 to avoid possible future collisions. - -
- -
- - Added cookie crumbling for more efficient header compression. - - - Added header field ordering with the value-concatenation mechanism. - -
- -
- - Marked draft for implementation. - -
- -
- - Adding definition for CONNECT method. - - - Constraining the use of push to safe, cacheable methods with no request body. - - - Changing from :host to :authority to remove any potential confusion. - - - Adding setting for header compression table size. - - - Adding settings acknowledgement. - - - Removing unnecessary and potentially problematic flags from CONTINUATION. - - - Added denial of service considerations. - -
-
- - Marking the draft ready for implementation. - - - Renumbering END_PUSH_PROMISE flag. - - - Editorial clarifications and changes. - -
- -
- - Added CONTINUATION frame for HEADERS and PUSH_PROMISE. - - - PUSH_PROMISE is no longer implicitly prohibited if SETTINGS_MAX_CONCURRENT_STREAMS is - zero. - - - Push expanded to allow all safe methods without a request body. - - - Clarified the use of HTTP header fields in requests and responses. Prohibited HTTP/1.1 - hop-by-hop header fields. - - - Requiring that intermediaries not forward requests with missing or illegal routing - :-headers. - - - Clarified requirements around handling different frames after stream close, stream reset - and GOAWAY. - - - Added more specific prohibitions for sending of different frame types in various stream - states. - - - Making the last received setting value the effective value. - - - Clarified requirements on TLS version, extension and ciphers. - -
- -
- - Committed major restructuring atrocities. - - - Added reference to first header compression draft. - - - Added more formal description of frame lifecycle. - - - Moved END_STREAM (renamed from FINAL) back to HEADERS/DATA. - - - Removed HEADERS+PRIORITY, added optional priority to HEADERS frame. - - - Added PRIORITY frame. - -
- -
- - Added continuations to frames carrying header blocks. - - - Replaced use of "session" with "connection" to avoid confusion with other HTTP stateful - concepts, like cookies. - - - Removed "message". - - - Switched to TLS ALPN from NPN. - - - Editorial changes. - -
- -
- - Added IANA considerations section for frame types, error codes and settings. - - - Removed data frame compression. - - - Added PUSH_PROMISE. - - - Added globally applicable flags to framing. - - - Removed zlib-based header compression mechanism. - - - Updated references. - - - Clarified stream identifier reuse. - - - Removed CREDENTIALS frame and associated mechanisms. - - - Added advice against naive implementation of flow control. - - - Added session header section. - - - Restructured frame header. Removed distinction between data and control frames. - - - Altered flow control properties to include session-level limits. - - - Added note on cacheability of pushed resources and multiple tenant servers. - - - Changed protocol label form based on discussions. - -
- -
- - Changed title throughout. - - - Removed section on Incompatibilities with SPDY draft#2. - - - Changed INTERNAL_ERROR on GOAWAY to have a value of 2 . - - - Replaced abstract and introduction. - - - Added section on starting HTTP/2.0, including upgrade mechanism. - - - Removed unused references. - - - Added flow control principles based on . - -
- -
- - Adopted as base for draft-ietf-httpbis-http2. - - - Updated authors/editors list. - - - Added status note. - -
-
- -
-
- diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/transport_test.go b/Godeps/_workspace/src/github.com/bradfitz/http2/transport_test.go deleted file mode 100644 index 0622923ceb..0000000000 --- a/Godeps/_workspace/src/github.com/bradfitz/http2/transport_test.go +++ /dev/null @@ -1,168 +0,0 @@ -// Copyright 2015 The Go Authors. -// See https://go.googlesource.com/go/+/master/CONTRIBUTORS -// Licensed under the same terms as Go itself: -// https://go.googlesource.com/go/+/master/LICENSE - -package http2 - -import ( - "flag" - "io" - "io/ioutil" - "net/http" - "os" - "reflect" - "strings" - "testing" - "time" -) - -var ( - extNet = flag.Bool("extnet", false, "do external network tests") - transportHost = flag.String("transporthost", "http2.golang.org", "hostname to use for TestTransport") - insecure = flag.Bool("insecure", false, "insecure TLS dials") -) - -func TestTransportExternal(t *testing.T) { - if !*extNet { - t.Skip("skipping external network test") - } - req, _ := http.NewRequest("GET", "https://"+*transportHost+"/", nil) - rt := &Transport{ - InsecureTLSDial: *insecure, - } - res, err := rt.RoundTrip(req) - if err != nil { - t.Fatalf("%v", err) - } - res.Write(os.Stdout) -} - -func TestTransport(t *testing.T) { - const body = "sup" - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { - io.WriteString(w, body) - }) - defer st.Close() - - tr := &Transport{InsecureTLSDial: true} - defer tr.CloseIdleConnections() - - req, err := http.NewRequest("GET", st.ts.URL, nil) - if err != nil { - t.Fatal(err) - } - res, err := tr.RoundTrip(req) - if err != nil { - t.Fatal(err) - } - defer res.Body.Close() - - t.Logf("Got res: %+v", res) - if g, w := res.StatusCode, 200; g != w { - t.Errorf("StatusCode = %v; want %v", g, w) - } - if g, w := res.Status, "200 OK"; g != w { - t.Errorf("Status = %q; want %q", g, w) - } - wantHeader := http.Header{ - "Content-Length": []string{"3"}, - "Content-Type": []string{"text/plain; charset=utf-8"}, - } - if !reflect.DeepEqual(res.Header, wantHeader) { - t.Errorf("res Header = %v; want %v", res.Header, wantHeader) - } - if res.Request != req { - t.Errorf("Response.Request = %p; want %p", res.Request, req) - } - if res.TLS == nil { - t.Errorf("Response.TLS = nil; want non-nil", res.TLS) - } - slurp, err := ioutil.ReadAll(res.Body) - if err != nil { - t.Error("Body read: %v", err) - } else if string(slurp) != body { - t.Errorf("Body = %q; want %q", slurp, body) - } - -} - -func TestTransportReusesConns(t *testing.T) { - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { - io.WriteString(w, r.RemoteAddr) - }, optOnlyServer) - defer st.Close() - tr := &Transport{InsecureTLSDial: true} - defer tr.CloseIdleConnections() - get := func() string { - req, err := http.NewRequest("GET", st.ts.URL, nil) - if err != nil { - t.Fatal(err) - } - res, err := tr.RoundTrip(req) - if err != nil { - t.Fatal(err) - } - defer res.Body.Close() - slurp, err := ioutil.ReadAll(res.Body) - if err != nil { - t.Fatalf("Body read: %v", err) - } - addr := strings.TrimSpace(string(slurp)) - if addr == "" { - t.Fatalf("didn't get an addr in response") - } - return addr - } - first := get() - second := get() - if first != second { - t.Errorf("first and second responses were on different connections: %q vs %q", first, second) - } -} - -func TestTransportAbortClosesPipes(t *testing.T) { - shutdown := make(chan struct{}) - st := newServerTester(t, - func(w http.ResponseWriter, r *http.Request) { - w.(http.Flusher).Flush() - <-shutdown - }, - optOnlyServer, - ) - defer st.Close() - defer close(shutdown) // we must shutdown before st.Close() to avoid hanging - - done := make(chan struct{}) - requestMade := make(chan struct{}) - go func() { - defer close(done) - tr := &Transport{ - InsecureTLSDial: true, - } - req, err := http.NewRequest("GET", st.ts.URL, nil) - if err != nil { - t.Fatal(err) - } - res, err := tr.RoundTrip(req) - if err != nil { - t.Fatal(err) - } - defer res.Body.Close() - close(requestMade) - _, err = ioutil.ReadAll(res.Body) - if err == nil { - t.Error("expected error from res.Body.Read") - } - }() - - <-requestMade - // Now force the serve loop to end, via closing the connection. - st.closeConn() - // deadlock? that's a bug. - select { - case <-done: - case <-time.After(3 * time.Second): - t.Fatal("timeout") - } -} diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/z_spec_test.go b/Godeps/_workspace/src/github.com/bradfitz/http2/z_spec_test.go deleted file mode 100644 index 07b53e3ae9..0000000000 --- a/Godeps/_workspace/src/github.com/bradfitz/http2/z_spec_test.go +++ /dev/null @@ -1,357 +0,0 @@ -// Copyright 2014 The Go Authors. -// See https://code.google.com/p/go/source/browse/CONTRIBUTORS -// Licensed under the same terms as Go itself: -// https://code.google.com/p/go/source/browse/LICENSE - -package http2 - -import ( - "bytes" - "encoding/xml" - "flag" - "fmt" - "io" - "os" - "reflect" - "regexp" - "sort" - "strconv" - "strings" - "sync" - "testing" -) - -var coverSpec = flag.Bool("coverspec", false, "Run spec coverage tests") - -// The global map of sentence coverage for the http2 spec. -var defaultSpecCoverage specCoverage - -var loadSpecOnce sync.Once - -func loadSpec() { - if f, err := os.Open("testdata/draft-ietf-httpbis-http2.xml"); err != nil { - panic(err) - } else { - defaultSpecCoverage = readSpecCov(f) - f.Close() - } -} - -// covers marks all sentences for section sec in defaultSpecCoverage. Sentences not -// "covered" will be included in report outputed by TestSpecCoverage. -func covers(sec, sentences string) { - loadSpecOnce.Do(loadSpec) - defaultSpecCoverage.cover(sec, sentences) -} - -type specPart struct { - section string - sentence string -} - -func (ss specPart) Less(oo specPart) bool { - atoi := func(s string) int { - n, err := strconv.Atoi(s) - if err != nil { - panic(err) - } - return n - } - a := strings.Split(ss.section, ".") - b := strings.Split(oo.section, ".") - for len(a) > 0 { - if len(b) == 0 { - return false - } - x, y := atoi(a[0]), atoi(b[0]) - if x == y { - a, b = a[1:], b[1:] - continue - } - return x < y - } - if len(b) > 0 { - return true - } - return false -} - -type bySpecSection []specPart - -func (a bySpecSection) Len() int { return len(a) } -func (a bySpecSection) Less(i, j int) bool { return a[i].Less(a[j]) } -func (a bySpecSection) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -type specCoverage struct { - coverage map[specPart]bool - d *xml.Decoder -} - -func joinSection(sec []int) string { - s := fmt.Sprintf("%d", sec[0]) - for _, n := range sec[1:] { - s = fmt.Sprintf("%s.%d", s, n) - } - return s -} - -func (sc specCoverage) readSection(sec []int) { - var ( - buf = new(bytes.Buffer) - sub = 0 - ) - for { - tk, err := sc.d.Token() - if err != nil { - if err == io.EOF { - return - } - panic(err) - } - switch v := tk.(type) { - case xml.StartElement: - if skipElement(v) { - if err := sc.d.Skip(); err != nil { - panic(err) - } - if v.Name.Local == "section" { - sub++ - } - break - } - switch v.Name.Local { - case "section": - sub++ - sc.readSection(append(sec, sub)) - case "xref": - buf.Write(sc.readXRef(v)) - } - case xml.CharData: - if len(sec) == 0 { - break - } - buf.Write(v) - case xml.EndElement: - if v.Name.Local == "section" { - sc.addSentences(joinSection(sec), buf.String()) - return - } - } - } -} - -func (sc specCoverage) readXRef(se xml.StartElement) []byte { - var b []byte - for { - tk, err := sc.d.Token() - if err != nil { - panic(err) - } - switch v := tk.(type) { - case xml.CharData: - if b != nil { - panic("unexpected CharData") - } - b = []byte(string(v)) - case xml.EndElement: - if v.Name.Local != "xref" { - panic("expected ") - } - if b != nil { - return b - } - sig := attrSig(se) - switch sig { - case "target": - return []byte(fmt.Sprintf("[%s]", attrValue(se, "target"))) - case "fmt-of,rel,target", "fmt-,,rel,target": - return []byte(fmt.Sprintf("[%s, %s]", attrValue(se, "target"), attrValue(se, "rel"))) - case "fmt-of,sec,target", "fmt-,,sec,target": - return []byte(fmt.Sprintf("[section %s of %s]", attrValue(se, "sec"), attrValue(se, "target"))) - case "fmt-of,rel,sec,target": - return []byte(fmt.Sprintf("[section %s of %s, %s]", attrValue(se, "sec"), attrValue(se, "target"), attrValue(se, "rel"))) - default: - panic(fmt.Sprintf("unknown attribute signature %q in %#v", sig, fmt.Sprintf("%#v", se))) - } - default: - panic(fmt.Sprintf("unexpected tag %q", v)) - } - } -} - -var skipAnchor = map[string]bool{ - "intro": true, - "Overview": true, -} - -var skipTitle = map[string]bool{ - "Acknowledgements": true, - "Change Log": true, - "Document Organization": true, - "Conventions and Terminology": true, -} - -func skipElement(s xml.StartElement) bool { - switch s.Name.Local { - case "artwork": - return true - case "section": - for _, attr := range s.Attr { - switch attr.Name.Local { - case "anchor": - if skipAnchor[attr.Value] || strings.HasPrefix(attr.Value, "changes.since.") { - return true - } - case "title": - if skipTitle[attr.Value] { - return true - } - } - } - } - return false -} - -func readSpecCov(r io.Reader) specCoverage { - sc := specCoverage{ - coverage: map[specPart]bool{}, - d: xml.NewDecoder(r)} - sc.readSection(nil) - return sc -} - -func (sc specCoverage) addSentences(sec string, sentence string) { - for _, s := range parseSentences(sentence) { - sc.coverage[specPart{sec, s}] = false - } -} - -func (sc specCoverage) cover(sec string, sentence string) { - for _, s := range parseSentences(sentence) { - p := specPart{sec, s} - if _, ok := sc.coverage[p]; !ok { - panic(fmt.Sprintf("Not found in spec: %q, %q", sec, s)) - } - sc.coverage[specPart{sec, s}] = true - } - -} - -var whitespaceRx = regexp.MustCompile(`\s+`) - -func parseSentences(sens string) []string { - sens = strings.TrimSpace(sens) - if sens == "" { - return nil - } - ss := strings.Split(whitespaceRx.ReplaceAllString(sens, " "), ". ") - for i, s := range ss { - s = strings.TrimSpace(s) - if !strings.HasSuffix(s, ".") { - s += "." - } - ss[i] = s - } - return ss -} - -func TestSpecParseSentences(t *testing.T) { - tests := []struct { - ss string - want []string - }{ - {"Sentence 1. Sentence 2.", - []string{ - "Sentence 1.", - "Sentence 2.", - }}, - {"Sentence 1. \nSentence 2.\tSentence 3.", - []string{ - "Sentence 1.", - "Sentence 2.", - "Sentence 3.", - }}, - } - - for i, tt := range tests { - got := parseSentences(tt.ss) - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("%d: got = %q, want %q", i, got, tt.want) - } - } -} - -func TestSpecCoverage(t *testing.T) { - if !*coverSpec { - t.Skip() - } - - loadSpecOnce.Do(loadSpec) - - var ( - list []specPart - cv = defaultSpecCoverage.coverage - total = len(cv) - complete = 0 - ) - - for sp, touched := range defaultSpecCoverage.coverage { - if touched { - complete++ - } else { - list = append(list, sp) - } - } - sort.Stable(bySpecSection(list)) - - if testing.Short() && len(list) > 5 { - list = list[:5] - } - - for _, p := range list { - t.Errorf("\tSECTION %s: %s", p.section, p.sentence) - } - - t.Logf("%d/%d (%d%%) sentances covered", complete, total, (complete/total)*100) -} - -func attrSig(se xml.StartElement) string { - var names []string - for _, attr := range se.Attr { - if attr.Name.Local == "fmt" { - names = append(names, "fmt-"+attr.Value) - } else { - names = append(names, attr.Name.Local) - } - } - sort.Strings(names) - return strings.Join(names, ",") -} - -func attrValue(se xml.StartElement, attr string) string { - for _, a := range se.Attr { - if a.Name.Local == attr { - return a.Value - } - } - panic("unknown attribute " + attr) -} - -func TestSpecPartLess(t *testing.T) { - tests := []struct { - sec1, sec2 string - want bool - }{ - {"6.2.1", "6.2", false}, - {"6.2", "6.2.1", true}, - {"6.10", "6.10.1", true}, - {"6.10", "6.1.1", false}, // 10, not 1 - {"6.1", "6.1", false}, // equal, so not less - } - for _, tt := range tests { - got := (specPart{tt.sec1, "foo"}).Less(specPart{tt.sec2, "foo"}) - if got != tt.want { - t.Errorf("Less(%q, %q) = %v; want %v", tt.sec1, tt.sec2, got, tt.want) - } - } -} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/all_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/all_test.go deleted file mode 100644 index 8784187cde..0000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/all_test.go +++ /dev/null @@ -1,2071 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "math" - "math/rand" - "reflect" - "runtime/debug" - "strings" - "testing" - "time" - - . "github.com/golang/protobuf/proto" - . "github.com/golang/protobuf/proto/testdata" -) - -var globalO *Buffer - -func old() *Buffer { - if globalO == nil { - globalO = NewBuffer(nil) - } - globalO.Reset() - return globalO -} - -func equalbytes(b1, b2 []byte, t *testing.T) { - if len(b1) != len(b2) { - t.Errorf("wrong lengths: 2*%d != %d", len(b1), len(b2)) - return - } - for i := 0; i < len(b1); i++ { - if b1[i] != b2[i] { - t.Errorf("bad byte[%d]:%x %x: %s %s", i, b1[i], b2[i], b1, b2) - } - } -} - -func initGoTestField() *GoTestField { - f := new(GoTestField) - f.Label = String("label") - f.Type = String("type") - return f -} - -// These are all structurally equivalent but the tag numbers differ. -// (It's remarkable that required, optional, and repeated all have -// 8 letters.) -func initGoTest_RequiredGroup() *GoTest_RequiredGroup { - return &GoTest_RequiredGroup{ - RequiredField: String("required"), - } -} - -func initGoTest_OptionalGroup() *GoTest_OptionalGroup { - return &GoTest_OptionalGroup{ - RequiredField: String("optional"), - } -} - -func initGoTest_RepeatedGroup() *GoTest_RepeatedGroup { - return &GoTest_RepeatedGroup{ - RequiredField: String("repeated"), - } -} - -func initGoTest(setdefaults bool) *GoTest { - pb := new(GoTest) - if setdefaults { - pb.F_BoolDefaulted = Bool(Default_GoTest_F_BoolDefaulted) - pb.F_Int32Defaulted = Int32(Default_GoTest_F_Int32Defaulted) - pb.F_Int64Defaulted = Int64(Default_GoTest_F_Int64Defaulted) - pb.F_Fixed32Defaulted = Uint32(Default_GoTest_F_Fixed32Defaulted) - pb.F_Fixed64Defaulted = Uint64(Default_GoTest_F_Fixed64Defaulted) - pb.F_Uint32Defaulted = Uint32(Default_GoTest_F_Uint32Defaulted) - pb.F_Uint64Defaulted = Uint64(Default_GoTest_F_Uint64Defaulted) - pb.F_FloatDefaulted = Float32(Default_GoTest_F_FloatDefaulted) - pb.F_DoubleDefaulted = Float64(Default_GoTest_F_DoubleDefaulted) - pb.F_StringDefaulted = String(Default_GoTest_F_StringDefaulted) - pb.F_BytesDefaulted = Default_GoTest_F_BytesDefaulted - pb.F_Sint32Defaulted = Int32(Default_GoTest_F_Sint32Defaulted) - pb.F_Sint64Defaulted = Int64(Default_GoTest_F_Sint64Defaulted) - } - - pb.Kind = GoTest_TIME.Enum() - pb.RequiredField = initGoTestField() - pb.F_BoolRequired = Bool(true) - pb.F_Int32Required = Int32(3) - pb.F_Int64Required = Int64(6) - pb.F_Fixed32Required = Uint32(32) - pb.F_Fixed64Required = Uint64(64) - pb.F_Uint32Required = Uint32(3232) - pb.F_Uint64Required = Uint64(6464) - pb.F_FloatRequired = Float32(3232) - pb.F_DoubleRequired = Float64(6464) - pb.F_StringRequired = String("string") - pb.F_BytesRequired = []byte("bytes") - pb.F_Sint32Required = Int32(-32) - pb.F_Sint64Required = Int64(-64) - pb.Requiredgroup = initGoTest_RequiredGroup() - - return pb -} - -func fail(msg string, b *bytes.Buffer, s string, t *testing.T) { - data := b.Bytes() - ld := len(data) - ls := len(s) / 2 - - fmt.Printf("fail %s ld=%d ls=%d\n", msg, ld, ls) - - // find the interesting spot - n - n := ls - if ld < ls { - n = ld - } - j := 0 - for i := 0; i < n; i++ { - bs := hex(s[j])*16 + hex(s[j+1]) - j += 2 - if data[i] == bs { - continue - } - n = i - break - } - l := n - 10 - if l < 0 { - l = 0 - } - h := n + 10 - - // find the interesting spot - n - fmt.Printf("is[%d]:", l) - for i := l; i < h; i++ { - if i >= ld { - fmt.Printf(" --") - continue - } - fmt.Printf(" %.2x", data[i]) - } - fmt.Printf("\n") - - fmt.Printf("sb[%d]:", l) - for i := l; i < h; i++ { - if i >= ls { - fmt.Printf(" --") - continue - } - bs := hex(s[j])*16 + hex(s[j+1]) - j += 2 - fmt.Printf(" %.2x", bs) - } - fmt.Printf("\n") - - t.Fail() - - // t.Errorf("%s: \ngood: %s\nbad: %x", msg, s, b.Bytes()) - // Print the output in a partially-decoded format; can - // be helpful when updating the test. It produces the output - // that is pasted, with minor edits, into the argument to verify(). - // data := b.Bytes() - // nesting := 0 - // for b.Len() > 0 { - // start := len(data) - b.Len() - // var u uint64 - // u, err := DecodeVarint(b) - // if err != nil { - // fmt.Printf("decode error on varint:", err) - // return - // } - // wire := u & 0x7 - // tag := u >> 3 - // switch wire { - // case WireVarint: - // v, err := DecodeVarint(b) - // if err != nil { - // fmt.Printf("decode error on varint:", err) - // return - // } - // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n", - // data[start:len(data)-b.Len()], tag, wire, v) - // case WireFixed32: - // v, err := DecodeFixed32(b) - // if err != nil { - // fmt.Printf("decode error on fixed32:", err) - // return - // } - // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n", - // data[start:len(data)-b.Len()], tag, wire, v) - // case WireFixed64: - // v, err := DecodeFixed64(b) - // if err != nil { - // fmt.Printf("decode error on fixed64:", err) - // return - // } - // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n", - // data[start:len(data)-b.Len()], tag, wire, v) - // case WireBytes: - // nb, err := DecodeVarint(b) - // if err != nil { - // fmt.Printf("decode error on bytes:", err) - // return - // } - // after_tag := len(data) - b.Len() - // str := make([]byte, nb) - // _, err = b.Read(str) - // if err != nil { - // fmt.Printf("decode error on bytes:", err) - // return - // } - // fmt.Printf("\t\t\"%x\" \"%x\" // field %d, encoding %d (FIELD)\n", - // data[start:after_tag], str, tag, wire) - // case WireStartGroup: - // nesting++ - // fmt.Printf("\t\t\"%x\"\t\t// start group field %d level %d\n", - // data[start:len(data)-b.Len()], tag, nesting) - // case WireEndGroup: - // fmt.Printf("\t\t\"%x\"\t\t// end group field %d level %d\n", - // data[start:len(data)-b.Len()], tag, nesting) - // nesting-- - // default: - // fmt.Printf("unrecognized wire type %d\n", wire) - // return - // } - // } -} - -func hex(c uint8) uint8 { - if '0' <= c && c <= '9' { - return c - '0' - } - if 'a' <= c && c <= 'f' { - return 10 + c - 'a' - } - if 'A' <= c && c <= 'F' { - return 10 + c - 'A' - } - return 0 -} - -func equal(b []byte, s string, t *testing.T) bool { - if 2*len(b) != len(s) { - // fail(fmt.Sprintf("wrong lengths: 2*%d != %d", len(b), len(s)), b, s, t) - fmt.Printf("wrong lengths: 2*%d != %d\n", len(b), len(s)) - return false - } - for i, j := 0, 0; i < len(b); i, j = i+1, j+2 { - x := hex(s[j])*16 + hex(s[j+1]) - if b[i] != x { - // fail(fmt.Sprintf("bad byte[%d]:%x %x", i, b[i], x), b, s, t) - fmt.Printf("bad byte[%d]:%x %x", i, b[i], x) - return false - } - } - return true -} - -func overify(t *testing.T, pb *GoTest, expected string) { - o := old() - err := o.Marshal(pb) - if err != nil { - fmt.Printf("overify marshal-1 err = %v", err) - o.DebugPrint("", o.Bytes()) - t.Fatalf("expected = %s", expected) - } - if !equal(o.Bytes(), expected, t) { - o.DebugPrint("overify neq 1", o.Bytes()) - t.Fatalf("expected = %s", expected) - } - - // Now test Unmarshal by recreating the original buffer. - pbd := new(GoTest) - err = o.Unmarshal(pbd) - if err != nil { - t.Fatalf("overify unmarshal err = %v", err) - o.DebugPrint("", o.Bytes()) - t.Fatalf("string = %s", expected) - } - o.Reset() - err = o.Marshal(pbd) - if err != nil { - t.Errorf("overify marshal-2 err = %v", err) - o.DebugPrint("", o.Bytes()) - t.Fatalf("string = %s", expected) - } - if !equal(o.Bytes(), expected, t) { - o.DebugPrint("overify neq 2", o.Bytes()) - t.Fatalf("string = %s", expected) - } -} - -// Simple tests for numeric encode/decode primitives (varint, etc.) -func TestNumericPrimitives(t *testing.T) { - for i := uint64(0); i < 1e6; i += 111 { - o := old() - if o.EncodeVarint(i) != nil { - t.Error("EncodeVarint") - break - } - x, e := o.DecodeVarint() - if e != nil { - t.Fatal("DecodeVarint") - } - if x != i { - t.Fatal("varint decode fail:", i, x) - } - - o = old() - if o.EncodeFixed32(i) != nil { - t.Fatal("encFixed32") - } - x, e = o.DecodeFixed32() - if e != nil { - t.Fatal("decFixed32") - } - if x != i { - t.Fatal("fixed32 decode fail:", i, x) - } - - o = old() - if o.EncodeFixed64(i*1234567) != nil { - t.Error("encFixed64") - break - } - x, e = o.DecodeFixed64() - if e != nil { - t.Error("decFixed64") - break - } - if x != i*1234567 { - t.Error("fixed64 decode fail:", i*1234567, x) - break - } - - o = old() - i32 := int32(i - 12345) - if o.EncodeZigzag32(uint64(i32)) != nil { - t.Fatal("EncodeZigzag32") - } - x, e = o.DecodeZigzag32() - if e != nil { - t.Fatal("DecodeZigzag32") - } - if x != uint64(uint32(i32)) { - t.Fatal("zigzag32 decode fail:", i32, x) - } - - o = old() - i64 := int64(i - 12345) - if o.EncodeZigzag64(uint64(i64)) != nil { - t.Fatal("EncodeZigzag64") - } - x, e = o.DecodeZigzag64() - if e != nil { - t.Fatal("DecodeZigzag64") - } - if x != uint64(i64) { - t.Fatal("zigzag64 decode fail:", i64, x) - } - } -} - -// fakeMarshaler is a simple struct implementing Marshaler and Message interfaces. -type fakeMarshaler struct { - b []byte - err error -} - -func (f fakeMarshaler) Marshal() ([]byte, error) { - return f.b, f.err -} - -func (f fakeMarshaler) String() string { - return fmt.Sprintf("Bytes: %v Error: %v", f.b, f.err) -} - -func (f fakeMarshaler) ProtoMessage() {} - -func (f fakeMarshaler) Reset() {} - -// Simple tests for proto messages that implement the Marshaler interface. -func TestMarshalerEncoding(t *testing.T) { - tests := []struct { - name string - m Message - want []byte - wantErr error - }{ - { - name: "Marshaler that fails", - m: fakeMarshaler{ - err: errors.New("some marshal err"), - b: []byte{5, 6, 7}, - }, - // Since there's an error, nothing should be written to buffer. - want: nil, - wantErr: errors.New("some marshal err"), - }, - { - name: "Marshaler that succeeds", - m: fakeMarshaler{ - b: []byte{0, 1, 2, 3, 4, 127, 255}, - }, - want: []byte{0, 1, 2, 3, 4, 127, 255}, - wantErr: nil, - }, - } - for _, test := range tests { - b := NewBuffer(nil) - err := b.Marshal(test.m) - if !reflect.DeepEqual(test.wantErr, err) { - t.Errorf("%s: got err %v wanted %v", test.name, err, test.wantErr) - } - if !reflect.DeepEqual(test.want, b.Bytes()) { - t.Errorf("%s: got bytes %v wanted %v", test.name, b.Bytes(), test.want) - } - } -} - -// Simple tests for bytes -func TestBytesPrimitives(t *testing.T) { - o := old() - bytes := []byte{'n', 'o', 'w', ' ', 'i', 's', ' ', 't', 'h', 'e', ' ', 't', 'i', 'm', 'e'} - if o.EncodeRawBytes(bytes) != nil { - t.Error("EncodeRawBytes") - } - decb, e := o.DecodeRawBytes(false) - if e != nil { - t.Error("DecodeRawBytes") - } - equalbytes(bytes, decb, t) -} - -// Simple tests for strings -func TestStringPrimitives(t *testing.T) { - o := old() - s := "now is the time" - if o.EncodeStringBytes(s) != nil { - t.Error("enc_string") - } - decs, e := o.DecodeStringBytes() - if e != nil { - t.Error("dec_string") - } - if s != decs { - t.Error("string encode/decode fail:", s, decs) - } -} - -// Do we catch the "required bit not set" case? -func TestRequiredBit(t *testing.T) { - o := old() - pb := new(GoTest) - err := o.Marshal(pb) - if err == nil { - t.Error("did not catch missing required fields") - } else if strings.Index(err.Error(), "Kind") < 0 { - t.Error("wrong error type:", err) - } -} - -// Check that all fields are nil. -// Clearly silly, and a residue from a more interesting test with an earlier, -// different initialization property, but it once caught a compiler bug so -// it lives. -func checkInitialized(pb *GoTest, t *testing.T) { - if pb.F_BoolDefaulted != nil { - t.Error("New or Reset did not set boolean:", *pb.F_BoolDefaulted) - } - if pb.F_Int32Defaulted != nil { - t.Error("New or Reset did not set int32:", *pb.F_Int32Defaulted) - } - if pb.F_Int64Defaulted != nil { - t.Error("New or Reset did not set int64:", *pb.F_Int64Defaulted) - } - if pb.F_Fixed32Defaulted != nil { - t.Error("New or Reset did not set fixed32:", *pb.F_Fixed32Defaulted) - } - if pb.F_Fixed64Defaulted != nil { - t.Error("New or Reset did not set fixed64:", *pb.F_Fixed64Defaulted) - } - if pb.F_Uint32Defaulted != nil { - t.Error("New or Reset did not set uint32:", *pb.F_Uint32Defaulted) - } - if pb.F_Uint64Defaulted != nil { - t.Error("New or Reset did not set uint64:", *pb.F_Uint64Defaulted) - } - if pb.F_FloatDefaulted != nil { - t.Error("New or Reset did not set float:", *pb.F_FloatDefaulted) - } - if pb.F_DoubleDefaulted != nil { - t.Error("New or Reset did not set double:", *pb.F_DoubleDefaulted) - } - if pb.F_StringDefaulted != nil { - t.Error("New or Reset did not set string:", *pb.F_StringDefaulted) - } - if pb.F_BytesDefaulted != nil { - t.Error("New or Reset did not set bytes:", string(pb.F_BytesDefaulted)) - } - if pb.F_Sint32Defaulted != nil { - t.Error("New or Reset did not set int32:", *pb.F_Sint32Defaulted) - } - if pb.F_Sint64Defaulted != nil { - t.Error("New or Reset did not set int64:", *pb.F_Sint64Defaulted) - } -} - -// Does Reset() reset? -func TestReset(t *testing.T) { - pb := initGoTest(true) - // muck with some values - pb.F_BoolDefaulted = Bool(false) - pb.F_Int32Defaulted = Int32(237) - pb.F_Int64Defaulted = Int64(12346) - pb.F_Fixed32Defaulted = Uint32(32000) - pb.F_Fixed64Defaulted = Uint64(666) - pb.F_Uint32Defaulted = Uint32(323232) - pb.F_Uint64Defaulted = nil - pb.F_FloatDefaulted = nil - pb.F_DoubleDefaulted = Float64(0) - pb.F_StringDefaulted = String("gotcha") - pb.F_BytesDefaulted = []byte("asdfasdf") - pb.F_Sint32Defaulted = Int32(123) - pb.F_Sint64Defaulted = Int64(789) - pb.Reset() - checkInitialized(pb, t) -} - -// All required fields set, no defaults provided. -func TestEncodeDecode1(t *testing.T) { - pb := initGoTest(false) - overify(t, pb, - "0807"+ // field 1, encoding 0, value 7 - "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) - "5001"+ // field 10, encoding 0, value 1 - "5803"+ // field 11, encoding 0, value 3 - "6006"+ // field 12, encoding 0, value 6 - "6d20000000"+ // field 13, encoding 5, value 0x20 - "714000000000000000"+ // field 14, encoding 1, value 0x40 - "78a019"+ // field 15, encoding 0, value 0xca0 = 3232 - "8001c032"+ // field 16, encoding 0, value 0x1940 = 6464 - "8d0100004a45"+ // field 17, encoding 5, value 3232.0 - "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 - "9a0106"+"737472696e67"+ // field 19, encoding 2, string "string" - "b304"+ // field 70, encoding 3, start group - "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" - "b404"+ // field 70, encoding 4, end group - "aa0605"+"6279746573"+ // field 101, encoding 2, string "bytes" - "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 - "b8067f") // field 103, encoding 0, 0x7f zigzag64 -} - -// All required fields set, defaults provided. -func TestEncodeDecode2(t *testing.T) { - pb := initGoTest(true) - overify(t, pb, - "0807"+ // field 1, encoding 0, value 7 - "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) - "5001"+ // field 10, encoding 0, value 1 - "5803"+ // field 11, encoding 0, value 3 - "6006"+ // field 12, encoding 0, value 6 - "6d20000000"+ // field 13, encoding 5, value 32 - "714000000000000000"+ // field 14, encoding 1, value 64 - "78a019"+ // field 15, encoding 0, value 3232 - "8001c032"+ // field 16, encoding 0, value 6464 - "8d0100004a45"+ // field 17, encoding 5, value 3232.0 - "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 - "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" - "c00201"+ // field 40, encoding 0, value 1 - "c80220"+ // field 41, encoding 0, value 32 - "d00240"+ // field 42, encoding 0, value 64 - "dd0240010000"+ // field 43, encoding 5, value 320 - "e1028002000000000000"+ // field 44, encoding 1, value 640 - "e8028019"+ // field 45, encoding 0, value 3200 - "f0028032"+ // field 46, encoding 0, value 6400 - "fd02e0659948"+ // field 47, encoding 5, value 314159.0 - "81030000000050971041"+ // field 48, encoding 1, value 271828.0 - "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" - "b304"+ // start group field 70 level 1 - "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" - "b404"+ // end group field 70 level 1 - "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" - "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 - "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 - "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" - "90193f"+ // field 402, encoding 0, value 63 - "98197f") // field 403, encoding 0, value 127 - -} - -// All default fields set to their default value by hand -func TestEncodeDecode3(t *testing.T) { - pb := initGoTest(false) - pb.F_BoolDefaulted = Bool(true) - pb.F_Int32Defaulted = Int32(32) - pb.F_Int64Defaulted = Int64(64) - pb.F_Fixed32Defaulted = Uint32(320) - pb.F_Fixed64Defaulted = Uint64(640) - pb.F_Uint32Defaulted = Uint32(3200) - pb.F_Uint64Defaulted = Uint64(6400) - pb.F_FloatDefaulted = Float32(314159) - pb.F_DoubleDefaulted = Float64(271828) - pb.F_StringDefaulted = String("hello, \"world!\"\n") - pb.F_BytesDefaulted = []byte("Bignose") - pb.F_Sint32Defaulted = Int32(-32) - pb.F_Sint64Defaulted = Int64(-64) - - overify(t, pb, - "0807"+ // field 1, encoding 0, value 7 - "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) - "5001"+ // field 10, encoding 0, value 1 - "5803"+ // field 11, encoding 0, value 3 - "6006"+ // field 12, encoding 0, value 6 - "6d20000000"+ // field 13, encoding 5, value 32 - "714000000000000000"+ // field 14, encoding 1, value 64 - "78a019"+ // field 15, encoding 0, value 3232 - "8001c032"+ // field 16, encoding 0, value 6464 - "8d0100004a45"+ // field 17, encoding 5, value 3232.0 - "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 - "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" - "c00201"+ // field 40, encoding 0, value 1 - "c80220"+ // field 41, encoding 0, value 32 - "d00240"+ // field 42, encoding 0, value 64 - "dd0240010000"+ // field 43, encoding 5, value 320 - "e1028002000000000000"+ // field 44, encoding 1, value 640 - "e8028019"+ // field 45, encoding 0, value 3200 - "f0028032"+ // field 46, encoding 0, value 6400 - "fd02e0659948"+ // field 47, encoding 5, value 314159.0 - "81030000000050971041"+ // field 48, encoding 1, value 271828.0 - "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" - "b304"+ // start group field 70 level 1 - "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" - "b404"+ // end group field 70 level 1 - "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" - "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 - "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 - "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" - "90193f"+ // field 402, encoding 0, value 63 - "98197f") // field 403, encoding 0, value 127 - -} - -// All required fields set, defaults provided, all non-defaulted optional fields have values. -func TestEncodeDecode4(t *testing.T) { - pb := initGoTest(true) - pb.Table = String("hello") - pb.Param = Int32(7) - pb.OptionalField = initGoTestField() - pb.F_BoolOptional = Bool(true) - pb.F_Int32Optional = Int32(32) - pb.F_Int64Optional = Int64(64) - pb.F_Fixed32Optional = Uint32(3232) - pb.F_Fixed64Optional = Uint64(6464) - pb.F_Uint32Optional = Uint32(323232) - pb.F_Uint64Optional = Uint64(646464) - pb.F_FloatOptional = Float32(32.) - pb.F_DoubleOptional = Float64(64.) - pb.F_StringOptional = String("hello") - pb.F_BytesOptional = []byte("Bignose") - pb.F_Sint32Optional = Int32(-32) - pb.F_Sint64Optional = Int64(-64) - pb.Optionalgroup = initGoTest_OptionalGroup() - - overify(t, pb, - "0807"+ // field 1, encoding 0, value 7 - "1205"+"68656c6c6f"+ // field 2, encoding 2, string "hello" - "1807"+ // field 3, encoding 0, value 7 - "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) - "320d"+"0a056c6162656c120474797065"+ // field 6, encoding 2 (GoTestField) - "5001"+ // field 10, encoding 0, value 1 - "5803"+ // field 11, encoding 0, value 3 - "6006"+ // field 12, encoding 0, value 6 - "6d20000000"+ // field 13, encoding 5, value 32 - "714000000000000000"+ // field 14, encoding 1, value 64 - "78a019"+ // field 15, encoding 0, value 3232 - "8001c032"+ // field 16, encoding 0, value 6464 - "8d0100004a45"+ // field 17, encoding 5, value 3232.0 - "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 - "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" - "f00101"+ // field 30, encoding 0, value 1 - "f80120"+ // field 31, encoding 0, value 32 - "800240"+ // field 32, encoding 0, value 64 - "8d02a00c0000"+ // field 33, encoding 5, value 3232 - "91024019000000000000"+ // field 34, encoding 1, value 6464 - "9802a0dd13"+ // field 35, encoding 0, value 323232 - "a002c0ba27"+ // field 36, encoding 0, value 646464 - "ad0200000042"+ // field 37, encoding 5, value 32.0 - "b1020000000000005040"+ // field 38, encoding 1, value 64.0 - "ba0205"+"68656c6c6f"+ // field 39, encoding 2, string "hello" - "c00201"+ // field 40, encoding 0, value 1 - "c80220"+ // field 41, encoding 0, value 32 - "d00240"+ // field 42, encoding 0, value 64 - "dd0240010000"+ // field 43, encoding 5, value 320 - "e1028002000000000000"+ // field 44, encoding 1, value 640 - "e8028019"+ // field 45, encoding 0, value 3200 - "f0028032"+ // field 46, encoding 0, value 6400 - "fd02e0659948"+ // field 47, encoding 5, value 314159.0 - "81030000000050971041"+ // field 48, encoding 1, value 271828.0 - "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" - "b304"+ // start group field 70 level 1 - "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" - "b404"+ // end group field 70 level 1 - "d305"+ // start group field 90 level 1 - "da0508"+"6f7074696f6e616c"+ // field 91, encoding 2, string "optional" - "d405"+ // end group field 90 level 1 - "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" - "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 - "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 - "ea1207"+"4269676e6f7365"+ // field 301, encoding 2, string "Bignose" - "f0123f"+ // field 302, encoding 0, value 63 - "f8127f"+ // field 303, encoding 0, value 127 - "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" - "90193f"+ // field 402, encoding 0, value 63 - "98197f") // field 403, encoding 0, value 127 - -} - -// All required fields set, defaults provided, all repeated fields given two values. -func TestEncodeDecode5(t *testing.T) { - pb := initGoTest(true) - pb.RepeatedField = []*GoTestField{initGoTestField(), initGoTestField()} - pb.F_BoolRepeated = []bool{false, true} - pb.F_Int32Repeated = []int32{32, 33} - pb.F_Int64Repeated = []int64{64, 65} - pb.F_Fixed32Repeated = []uint32{3232, 3333} - pb.F_Fixed64Repeated = []uint64{6464, 6565} - pb.F_Uint32Repeated = []uint32{323232, 333333} - pb.F_Uint64Repeated = []uint64{646464, 656565} - pb.F_FloatRepeated = []float32{32., 33.} - pb.F_DoubleRepeated = []float64{64., 65.} - pb.F_StringRepeated = []string{"hello", "sailor"} - pb.F_BytesRepeated = [][]byte{[]byte("big"), []byte("nose")} - pb.F_Sint32Repeated = []int32{32, -32} - pb.F_Sint64Repeated = []int64{64, -64} - pb.Repeatedgroup = []*GoTest_RepeatedGroup{initGoTest_RepeatedGroup(), initGoTest_RepeatedGroup()} - - overify(t, pb, - "0807"+ // field 1, encoding 0, value 7 - "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) - "2a0d"+"0a056c6162656c120474797065"+ // field 5, encoding 2 (GoTestField) - "2a0d"+"0a056c6162656c120474797065"+ // field 5, encoding 2 (GoTestField) - "5001"+ // field 10, encoding 0, value 1 - "5803"+ // field 11, encoding 0, value 3 - "6006"+ // field 12, encoding 0, value 6 - "6d20000000"+ // field 13, encoding 5, value 32 - "714000000000000000"+ // field 14, encoding 1, value 64 - "78a019"+ // field 15, encoding 0, value 3232 - "8001c032"+ // field 16, encoding 0, value 6464 - "8d0100004a45"+ // field 17, encoding 5, value 3232.0 - "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 - "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" - "a00100"+ // field 20, encoding 0, value 0 - "a00101"+ // field 20, encoding 0, value 1 - "a80120"+ // field 21, encoding 0, value 32 - "a80121"+ // field 21, encoding 0, value 33 - "b00140"+ // field 22, encoding 0, value 64 - "b00141"+ // field 22, encoding 0, value 65 - "bd01a00c0000"+ // field 23, encoding 5, value 3232 - "bd01050d0000"+ // field 23, encoding 5, value 3333 - "c1014019000000000000"+ // field 24, encoding 1, value 6464 - "c101a519000000000000"+ // field 24, encoding 1, value 6565 - "c801a0dd13"+ // field 25, encoding 0, value 323232 - "c80195ac14"+ // field 25, encoding 0, value 333333 - "d001c0ba27"+ // field 26, encoding 0, value 646464 - "d001b58928"+ // field 26, encoding 0, value 656565 - "dd0100000042"+ // field 27, encoding 5, value 32.0 - "dd0100000442"+ // field 27, encoding 5, value 33.0 - "e1010000000000005040"+ // field 28, encoding 1, value 64.0 - "e1010000000000405040"+ // field 28, encoding 1, value 65.0 - "ea0105"+"68656c6c6f"+ // field 29, encoding 2, string "hello" - "ea0106"+"7361696c6f72"+ // field 29, encoding 2, string "sailor" - "c00201"+ // field 40, encoding 0, value 1 - "c80220"+ // field 41, encoding 0, value 32 - "d00240"+ // field 42, encoding 0, value 64 - "dd0240010000"+ // field 43, encoding 5, value 320 - "e1028002000000000000"+ // field 44, encoding 1, value 640 - "e8028019"+ // field 45, encoding 0, value 3200 - "f0028032"+ // field 46, encoding 0, value 6400 - "fd02e0659948"+ // field 47, encoding 5, value 314159.0 - "81030000000050971041"+ // field 48, encoding 1, value 271828.0 - "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" - "b304"+ // start group field 70 level 1 - "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" - "b404"+ // end group field 70 level 1 - "8305"+ // start group field 80 level 1 - "8a0508"+"7265706561746564"+ // field 81, encoding 2, string "repeated" - "8405"+ // end group field 80 level 1 - "8305"+ // start group field 80 level 1 - "8a0508"+"7265706561746564"+ // field 81, encoding 2, string "repeated" - "8405"+ // end group field 80 level 1 - "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" - "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 - "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 - "ca0c03"+"626967"+ // field 201, encoding 2, string "big" - "ca0c04"+"6e6f7365"+ // field 201, encoding 2, string "nose" - "d00c40"+ // field 202, encoding 0, value 32 - "d00c3f"+ // field 202, encoding 0, value -32 - "d80c8001"+ // field 203, encoding 0, value 64 - "d80c7f"+ // field 203, encoding 0, value -64 - "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" - "90193f"+ // field 402, encoding 0, value 63 - "98197f") // field 403, encoding 0, value 127 - -} - -// All required fields set, all packed repeated fields given two values. -func TestEncodeDecode6(t *testing.T) { - pb := initGoTest(false) - pb.F_BoolRepeatedPacked = []bool{false, true} - pb.F_Int32RepeatedPacked = []int32{32, 33} - pb.F_Int64RepeatedPacked = []int64{64, 65} - pb.F_Fixed32RepeatedPacked = []uint32{3232, 3333} - pb.F_Fixed64RepeatedPacked = []uint64{6464, 6565} - pb.F_Uint32RepeatedPacked = []uint32{323232, 333333} - pb.F_Uint64RepeatedPacked = []uint64{646464, 656565} - pb.F_FloatRepeatedPacked = []float32{32., 33.} - pb.F_DoubleRepeatedPacked = []float64{64., 65.} - pb.F_Sint32RepeatedPacked = []int32{32, -32} - pb.F_Sint64RepeatedPacked = []int64{64, -64} - - overify(t, pb, - "0807"+ // field 1, encoding 0, value 7 - "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) - "5001"+ // field 10, encoding 0, value 1 - "5803"+ // field 11, encoding 0, value 3 - "6006"+ // field 12, encoding 0, value 6 - "6d20000000"+ // field 13, encoding 5, value 32 - "714000000000000000"+ // field 14, encoding 1, value 64 - "78a019"+ // field 15, encoding 0, value 3232 - "8001c032"+ // field 16, encoding 0, value 6464 - "8d0100004a45"+ // field 17, encoding 5, value 3232.0 - "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 - "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" - "9203020001"+ // field 50, encoding 2, 2 bytes, value 0, value 1 - "9a03022021"+ // field 51, encoding 2, 2 bytes, value 32, value 33 - "a203024041"+ // field 52, encoding 2, 2 bytes, value 64, value 65 - "aa0308"+ // field 53, encoding 2, 8 bytes - "a00c0000050d0000"+ // value 3232, value 3333 - "b20310"+ // field 54, encoding 2, 16 bytes - "4019000000000000a519000000000000"+ // value 6464, value 6565 - "ba0306"+ // field 55, encoding 2, 6 bytes - "a0dd1395ac14"+ // value 323232, value 333333 - "c20306"+ // field 56, encoding 2, 6 bytes - "c0ba27b58928"+ // value 646464, value 656565 - "ca0308"+ // field 57, encoding 2, 8 bytes - "0000004200000442"+ // value 32.0, value 33.0 - "d20310"+ // field 58, encoding 2, 16 bytes - "00000000000050400000000000405040"+ // value 64.0, value 65.0 - "b304"+ // start group field 70 level 1 - "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" - "b404"+ // end group field 70 level 1 - "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" - "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 - "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 - "b21f02"+ // field 502, encoding 2, 2 bytes - "403f"+ // value 32, value -32 - "ba1f03"+ // field 503, encoding 2, 3 bytes - "80017f") // value 64, value -64 -} - -// Test that we can encode empty bytes fields. -func TestEncodeDecodeBytes1(t *testing.T) { - pb := initGoTest(false) - - // Create our bytes - pb.F_BytesRequired = []byte{} - pb.F_BytesRepeated = [][]byte{{}} - pb.F_BytesOptional = []byte{} - - d, err := Marshal(pb) - if err != nil { - t.Error(err) - } - - pbd := new(GoTest) - if err := Unmarshal(d, pbd); err != nil { - t.Error(err) - } - - if pbd.F_BytesRequired == nil || len(pbd.F_BytesRequired) != 0 { - t.Error("required empty bytes field is incorrect") - } - if pbd.F_BytesRepeated == nil || len(pbd.F_BytesRepeated) == 1 && pbd.F_BytesRepeated[0] == nil { - t.Error("repeated empty bytes field is incorrect") - } - if pbd.F_BytesOptional == nil || len(pbd.F_BytesOptional) != 0 { - t.Error("optional empty bytes field is incorrect") - } -} - -// Test that we encode nil-valued fields of a repeated bytes field correctly. -// Since entries in a repeated field cannot be nil, nil must mean empty value. -func TestEncodeDecodeBytes2(t *testing.T) { - pb := initGoTest(false) - - // Create our bytes - pb.F_BytesRepeated = [][]byte{nil} - - d, err := Marshal(pb) - if err != nil { - t.Error(err) - } - - pbd := new(GoTest) - if err := Unmarshal(d, pbd); err != nil { - t.Error(err) - } - - if len(pbd.F_BytesRepeated) != 1 || pbd.F_BytesRepeated[0] == nil { - t.Error("Unexpected value for repeated bytes field") - } -} - -// All required fields set, defaults provided, all repeated fields given two values. -func TestSkippingUnrecognizedFields(t *testing.T) { - o := old() - pb := initGoTestField() - - // Marshal it normally. - o.Marshal(pb) - - // Now new a GoSkipTest record. - skip := &GoSkipTest{ - SkipInt32: Int32(32), - SkipFixed32: Uint32(3232), - SkipFixed64: Uint64(6464), - SkipString: String("skipper"), - Skipgroup: &GoSkipTest_SkipGroup{ - GroupInt32: Int32(75), - GroupString: String("wxyz"), - }, - } - - // Marshal it into same buffer. - o.Marshal(skip) - - pbd := new(GoTestField) - o.Unmarshal(pbd) - - // The __unrecognized field should be a marshaling of GoSkipTest - skipd := new(GoSkipTest) - - o.SetBuf(pbd.XXX_unrecognized) - o.Unmarshal(skipd) - - if *skipd.SkipInt32 != *skip.SkipInt32 { - t.Error("skip int32", skipd.SkipInt32) - } - if *skipd.SkipFixed32 != *skip.SkipFixed32 { - t.Error("skip fixed32", skipd.SkipFixed32) - } - if *skipd.SkipFixed64 != *skip.SkipFixed64 { - t.Error("skip fixed64", skipd.SkipFixed64) - } - if *skipd.SkipString != *skip.SkipString { - t.Error("skip string", *skipd.SkipString) - } - if *skipd.Skipgroup.GroupInt32 != *skip.Skipgroup.GroupInt32 { - t.Error("skip group int32", skipd.Skipgroup.GroupInt32) - } - if *skipd.Skipgroup.GroupString != *skip.Skipgroup.GroupString { - t.Error("skip group string", *skipd.Skipgroup.GroupString) - } -} - -// Check that unrecognized fields of a submessage are preserved. -func TestSubmessageUnrecognizedFields(t *testing.T) { - nm := &NewMessage{ - Nested: &NewMessage_Nested{ - Name: String("Nigel"), - FoodGroup: String("carbs"), - }, - } - b, err := Marshal(nm) - if err != nil { - t.Fatalf("Marshal of NewMessage: %v", err) - } - - // Unmarshal into an OldMessage. - om := new(OldMessage) - if err := Unmarshal(b, om); err != nil { - t.Fatalf("Unmarshal to OldMessage: %v", err) - } - exp := &OldMessage{ - Nested: &OldMessage_Nested{ - Name: String("Nigel"), - // normal protocol buffer users should not do this - XXX_unrecognized: []byte("\x12\x05carbs"), - }, - } - if !Equal(om, exp) { - t.Errorf("om = %v, want %v", om, exp) - } - - // Clone the OldMessage. - om = Clone(om).(*OldMessage) - if !Equal(om, exp) { - t.Errorf("Clone(om) = %v, want %v", om, exp) - } - - // Marshal the OldMessage, then unmarshal it into an empty NewMessage. - if b, err = Marshal(om); err != nil { - t.Fatalf("Marshal of OldMessage: %v", err) - } - t.Logf("Marshal(%v) -> %q", om, b) - nm2 := new(NewMessage) - if err := Unmarshal(b, nm2); err != nil { - t.Fatalf("Unmarshal to NewMessage: %v", err) - } - if !Equal(nm, nm2) { - t.Errorf("NewMessage round-trip: %v => %v", nm, nm2) - } -} - -// Check that an int32 field can be upgraded to an int64 field. -func TestNegativeInt32(t *testing.T) { - om := &OldMessage{ - Num: Int32(-1), - } - b, err := Marshal(om) - if err != nil { - t.Fatalf("Marshal of OldMessage: %v", err) - } - - // Check the size. It should be 11 bytes; - // 1 for the field/wire type, and 10 for the negative number. - if len(b) != 11 { - t.Errorf("%v marshaled as %q, wanted 11 bytes", om, b) - } - - // Unmarshal into a NewMessage. - nm := new(NewMessage) - if err := Unmarshal(b, nm); err != nil { - t.Fatalf("Unmarshal to NewMessage: %v", err) - } - want := &NewMessage{ - Num: Int64(-1), - } - if !Equal(nm, want) { - t.Errorf("nm = %v, want %v", nm, want) - } -} - -// Check that we can grow an array (repeated field) to have many elements. -// This test doesn't depend only on our encoding; for variety, it makes sure -// we create, encode, and decode the correct contents explicitly. It's therefore -// a bit messier. -// This test also uses (and hence tests) the Marshal/Unmarshal functions -// instead of the methods. -func TestBigRepeated(t *testing.T) { - pb := initGoTest(true) - - // Create the arrays - const N = 50 // Internally the library starts much smaller. - pb.Repeatedgroup = make([]*GoTest_RepeatedGroup, N) - pb.F_Sint64Repeated = make([]int64, N) - pb.F_Sint32Repeated = make([]int32, N) - pb.F_BytesRepeated = make([][]byte, N) - pb.F_StringRepeated = make([]string, N) - pb.F_DoubleRepeated = make([]float64, N) - pb.F_FloatRepeated = make([]float32, N) - pb.F_Uint64Repeated = make([]uint64, N) - pb.F_Uint32Repeated = make([]uint32, N) - pb.F_Fixed64Repeated = make([]uint64, N) - pb.F_Fixed32Repeated = make([]uint32, N) - pb.F_Int64Repeated = make([]int64, N) - pb.F_Int32Repeated = make([]int32, N) - pb.F_BoolRepeated = make([]bool, N) - pb.RepeatedField = make([]*GoTestField, N) - - // Fill in the arrays with checkable values. - igtf := initGoTestField() - igtrg := initGoTest_RepeatedGroup() - for i := 0; i < N; i++ { - pb.Repeatedgroup[i] = igtrg - pb.F_Sint64Repeated[i] = int64(i) - pb.F_Sint32Repeated[i] = int32(i) - s := fmt.Sprint(i) - pb.F_BytesRepeated[i] = []byte(s) - pb.F_StringRepeated[i] = s - pb.F_DoubleRepeated[i] = float64(i) - pb.F_FloatRepeated[i] = float32(i) - pb.F_Uint64Repeated[i] = uint64(i) - pb.F_Uint32Repeated[i] = uint32(i) - pb.F_Fixed64Repeated[i] = uint64(i) - pb.F_Fixed32Repeated[i] = uint32(i) - pb.F_Int64Repeated[i] = int64(i) - pb.F_Int32Repeated[i] = int32(i) - pb.F_BoolRepeated[i] = i%2 == 0 - pb.RepeatedField[i] = igtf - } - - // Marshal. - buf, _ := Marshal(pb) - - // Now test Unmarshal by recreating the original buffer. - pbd := new(GoTest) - Unmarshal(buf, pbd) - - // Check the checkable values - for i := uint64(0); i < N; i++ { - if pbd.Repeatedgroup[i] == nil { // TODO: more checking? - t.Error("pbd.Repeatedgroup bad") - } - var x uint64 - x = uint64(pbd.F_Sint64Repeated[i]) - if x != i { - t.Error("pbd.F_Sint64Repeated bad", x, i) - } - x = uint64(pbd.F_Sint32Repeated[i]) - if x != i { - t.Error("pbd.F_Sint32Repeated bad", x, i) - } - s := fmt.Sprint(i) - equalbytes(pbd.F_BytesRepeated[i], []byte(s), t) - if pbd.F_StringRepeated[i] != s { - t.Error("pbd.F_Sint32Repeated bad", pbd.F_StringRepeated[i], i) - } - x = uint64(pbd.F_DoubleRepeated[i]) - if x != i { - t.Error("pbd.F_DoubleRepeated bad", x, i) - } - x = uint64(pbd.F_FloatRepeated[i]) - if x != i { - t.Error("pbd.F_FloatRepeated bad", x, i) - } - x = pbd.F_Uint64Repeated[i] - if x != i { - t.Error("pbd.F_Uint64Repeated bad", x, i) - } - x = uint64(pbd.F_Uint32Repeated[i]) - if x != i { - t.Error("pbd.F_Uint32Repeated bad", x, i) - } - x = pbd.F_Fixed64Repeated[i] - if x != i { - t.Error("pbd.F_Fixed64Repeated bad", x, i) - } - x = uint64(pbd.F_Fixed32Repeated[i]) - if x != i { - t.Error("pbd.F_Fixed32Repeated bad", x, i) - } - x = uint64(pbd.F_Int64Repeated[i]) - if x != i { - t.Error("pbd.F_Int64Repeated bad", x, i) - } - x = uint64(pbd.F_Int32Repeated[i]) - if x != i { - t.Error("pbd.F_Int32Repeated bad", x, i) - } - if pbd.F_BoolRepeated[i] != (i%2 == 0) { - t.Error("pbd.F_BoolRepeated bad", x, i) - } - if pbd.RepeatedField[i] == nil { // TODO: more checking? - t.Error("pbd.RepeatedField bad") - } - } -} - -// Verify we give a useful message when decoding to the wrong structure type. -func TestTypeMismatch(t *testing.T) { - pb1 := initGoTest(true) - - // Marshal - o := old() - o.Marshal(pb1) - - // Now Unmarshal it to the wrong type. - pb2 := initGoTestField() - err := o.Unmarshal(pb2) - if err == nil { - t.Error("expected error, got no error") - } else if !strings.Contains(err.Error(), "bad wiretype") { - t.Error("expected bad wiretype error, got", err) - } -} - -func encodeDecode(t *testing.T, in, out Message, msg string) { - buf, err := Marshal(in) - if err != nil { - t.Fatalf("failed marshaling %v: %v", msg, err) - } - if err := Unmarshal(buf, out); err != nil { - t.Fatalf("failed unmarshaling %v: %v", msg, err) - } -} - -func TestPackedNonPackedDecoderSwitching(t *testing.T) { - np, p := new(NonPackedTest), new(PackedTest) - - // non-packed -> packed - np.A = []int32{0, 1, 1, 2, 3, 5} - encodeDecode(t, np, p, "non-packed -> packed") - if !reflect.DeepEqual(np.A, p.B) { - t.Errorf("failed non-packed -> packed; np.A=%+v, p.B=%+v", np.A, p.B) - } - - // packed -> non-packed - np.Reset() - p.B = []int32{3, 1, 4, 1, 5, 9} - encodeDecode(t, p, np, "packed -> non-packed") - if !reflect.DeepEqual(p.B, np.A) { - t.Errorf("failed packed -> non-packed; p.B=%+v, np.A=%+v", p.B, np.A) - } -} - -func TestProto1RepeatedGroup(t *testing.T) { - pb := &MessageList{ - Message: []*MessageList_Message{ - { - Name: String("blah"), - Count: Int32(7), - }, - // NOTE: pb.Message[1] is a nil - nil, - }, - } - - o := old() - err := o.Marshal(pb) - if err == nil || !strings.Contains(err.Error(), "repeated field Message has nil") { - t.Fatalf("unexpected or no error when marshaling: %v", err) - } -} - -// Test that enums work. Checks for a bug introduced by making enums -// named types instead of int32: newInt32FromUint64 would crash with -// a type mismatch in reflect.PointTo. -func TestEnum(t *testing.T) { - pb := new(GoEnum) - pb.Foo = FOO_FOO1.Enum() - o := old() - if err := o.Marshal(pb); err != nil { - t.Fatal("error encoding enum:", err) - } - pb1 := new(GoEnum) - if err := o.Unmarshal(pb1); err != nil { - t.Fatal("error decoding enum:", err) - } - if *pb1.Foo != FOO_FOO1 { - t.Error("expected 7 but got ", *pb1.Foo) - } -} - -// Enum types have String methods. Check that enum fields can be printed. -// We don't care what the value actually is, just as long as it doesn't crash. -func TestPrintingNilEnumFields(t *testing.T) { - pb := new(GoEnum) - fmt.Sprintf("%+v", pb) -} - -// Verify that absent required fields cause Marshal/Unmarshal to return errors. -func TestRequiredFieldEnforcement(t *testing.T) { - pb := new(GoTestField) - _, err := Marshal(pb) - if err == nil { - t.Error("marshal: expected error, got nil") - } else if strings.Index(err.Error(), "Label") < 0 { - t.Errorf("marshal: bad error type: %v", err) - } - - // A slightly sneaky, yet valid, proto. It encodes the same required field twice, - // so simply counting the required fields is insufficient. - // field 1, encoding 2, value "hi" - buf := []byte("\x0A\x02hi\x0A\x02hi") - err = Unmarshal(buf, pb) - if err == nil { - t.Error("unmarshal: expected error, got nil") - } else if strings.Index(err.Error(), "{Unknown}") < 0 { - t.Errorf("unmarshal: bad error type: %v", err) - } -} - -func TestTypedNilMarshal(t *testing.T) { - // A typed nil should return ErrNil and not crash. - _, err := Marshal((*GoEnum)(nil)) - if err != ErrNil { - t.Errorf("Marshal: got err %v, want ErrNil", err) - } -} - -// A type that implements the Marshaler interface, but is not nillable. -type nonNillableInt uint64 - -func (nni nonNillableInt) Marshal() ([]byte, error) { - return EncodeVarint(uint64(nni)), nil -} - -type NNIMessage struct { - nni nonNillableInt -} - -func (*NNIMessage) Reset() {} -func (*NNIMessage) String() string { return "" } -func (*NNIMessage) ProtoMessage() {} - -// A type that implements the Marshaler interface and is nillable. -type nillableMessage struct { - x uint64 -} - -func (nm *nillableMessage) Marshal() ([]byte, error) { - return EncodeVarint(nm.x), nil -} - -type NMMessage struct { - nm *nillableMessage -} - -func (*NMMessage) Reset() {} -func (*NMMessage) String() string { return "" } -func (*NMMessage) ProtoMessage() {} - -// Verify a type that uses the Marshaler interface, but has a nil pointer. -func TestNilMarshaler(t *testing.T) { - // Try a struct with a Marshaler field that is nil. - // It should be directly marshable. - nmm := new(NMMessage) - if _, err := Marshal(nmm); err != nil { - t.Error("unexpected error marshaling nmm: ", err) - } - - // Try a struct with a Marshaler field that is not nillable. - nnim := new(NNIMessage) - nnim.nni = 7 - var _ Marshaler = nnim.nni // verify it is truly a Marshaler - if _, err := Marshal(nnim); err != nil { - t.Error("unexpected error marshaling nnim: ", err) - } -} - -func TestAllSetDefaults(t *testing.T) { - // Exercise SetDefaults with all scalar field types. - m := &Defaults{ - // NaN != NaN, so override that here. - F_Nan: Float32(1.7), - } - expected := &Defaults{ - F_Bool: Bool(true), - F_Int32: Int32(32), - F_Int64: Int64(64), - F_Fixed32: Uint32(320), - F_Fixed64: Uint64(640), - F_Uint32: Uint32(3200), - F_Uint64: Uint64(6400), - F_Float: Float32(314159), - F_Double: Float64(271828), - F_String: String(`hello, "world!"` + "\n"), - F_Bytes: []byte("Bignose"), - F_Sint32: Int32(-32), - F_Sint64: Int64(-64), - F_Enum: Defaults_GREEN.Enum(), - F_Pinf: Float32(float32(math.Inf(1))), - F_Ninf: Float32(float32(math.Inf(-1))), - F_Nan: Float32(1.7), - StrZero: String(""), - } - SetDefaults(m) - if !Equal(m, expected) { - t.Errorf("SetDefaults failed\n got %v\nwant %v", m, expected) - } -} - -func TestSetDefaultsWithSetField(t *testing.T) { - // Check that a set value is not overridden. - m := &Defaults{ - F_Int32: Int32(12), - } - SetDefaults(m) - if v := m.GetF_Int32(); v != 12 { - t.Errorf("m.FInt32 = %v, want 12", v) - } -} - -func TestSetDefaultsWithSubMessage(t *testing.T) { - m := &OtherMessage{ - Key: Int64(123), - Inner: &InnerMessage{ - Host: String("gopher"), - }, - } - expected := &OtherMessage{ - Key: Int64(123), - Inner: &InnerMessage{ - Host: String("gopher"), - Port: Int32(4000), - }, - } - SetDefaults(m) - if !Equal(m, expected) { - t.Errorf("\n got %v\nwant %v", m, expected) - } -} - -func TestSetDefaultsWithRepeatedSubMessage(t *testing.T) { - m := &MyMessage{ - RepInner: []*InnerMessage{{}}, - } - expected := &MyMessage{ - RepInner: []*InnerMessage{{ - Port: Int32(4000), - }}, - } - SetDefaults(m) - if !Equal(m, expected) { - t.Errorf("\n got %v\nwant %v", m, expected) - } -} - -func TestSetDefaultWithRepeatedNonMessage(t *testing.T) { - m := &MyMessage{ - Pet: []string{"turtle", "wombat"}, - } - expected := Clone(m) - SetDefaults(m) - if !Equal(m, expected) { - t.Errorf("\n got %v\nwant %v", m, expected) - } -} - -func TestMaximumTagNumber(t *testing.T) { - m := &MaxTag{ - LastField: String("natural goat essence"), - } - buf, err := Marshal(m) - if err != nil { - t.Fatalf("proto.Marshal failed: %v", err) - } - m2 := new(MaxTag) - if err := Unmarshal(buf, m2); err != nil { - t.Fatalf("proto.Unmarshal failed: %v", err) - } - if got, want := m2.GetLastField(), *m.LastField; got != want { - t.Errorf("got %q, want %q", got, want) - } -} - -func TestJSON(t *testing.T) { - m := &MyMessage{ - Count: Int32(4), - Pet: []string{"bunny", "kitty"}, - Inner: &InnerMessage{ - Host: String("cauchy"), - }, - Bikeshed: MyMessage_GREEN.Enum(), - } - const expected = `{"count":4,"pet":["bunny","kitty"],"inner":{"host":"cauchy"},"bikeshed":1}` - - b, err := json.Marshal(m) - if err != nil { - t.Fatalf("json.Marshal failed: %v", err) - } - s := string(b) - if s != expected { - t.Errorf("got %s\nwant %s", s, expected) - } - - received := new(MyMessage) - if err := json.Unmarshal(b, received); err != nil { - t.Fatalf("json.Unmarshal failed: %v", err) - } - if !Equal(received, m) { - t.Fatalf("got %s, want %s", received, m) - } - - // Test unmarshalling of JSON with symbolic enum name. - const old = `{"count":4,"pet":["bunny","kitty"],"inner":{"host":"cauchy"},"bikeshed":"GREEN"}` - received.Reset() - if err := json.Unmarshal([]byte(old), received); err != nil { - t.Fatalf("json.Unmarshal failed: %v", err) - } - if !Equal(received, m) { - t.Fatalf("got %s, want %s", received, m) - } -} - -func TestBadWireType(t *testing.T) { - b := []byte{7<<3 | 6} // field 7, wire type 6 - pb := new(OtherMessage) - if err := Unmarshal(b, pb); err == nil { - t.Errorf("Unmarshal did not fail") - } else if !strings.Contains(err.Error(), "unknown wire type") { - t.Errorf("wrong error: %v", err) - } -} - -func TestBytesWithInvalidLength(t *testing.T) { - // If a byte sequence has an invalid (negative) length, Unmarshal should not panic. - b := []byte{2<<3 | WireBytes, 0xff, 0xff, 0xff, 0xff, 0xff, 0} - Unmarshal(b, new(MyMessage)) -} - -func TestLengthOverflow(t *testing.T) { - // Overflowing a length should not panic. - b := []byte{2<<3 | WireBytes, 1, 1, 3<<3 | WireBytes, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, 0x01} - Unmarshal(b, new(MyMessage)) -} - -func TestVarintOverflow(t *testing.T) { - // Overflowing a 64-bit length should not be allowed. - b := []byte{1<<3 | WireVarint, 0x01, 3<<3 | WireBytes, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x01} - if err := Unmarshal(b, new(MyMessage)); err == nil { - t.Fatalf("Overflowed uint64 length without error") - } -} - -func TestUnmarshalFuzz(t *testing.T) { - const N = 1000 - seed := time.Now().UnixNano() - t.Logf("RNG seed is %d", seed) - rng := rand.New(rand.NewSource(seed)) - buf := make([]byte, 20) - for i := 0; i < N; i++ { - for j := range buf { - buf[j] = byte(rng.Intn(256)) - } - fuzzUnmarshal(t, buf) - } -} - -func TestMergeMessages(t *testing.T) { - pb := &MessageList{Message: []*MessageList_Message{{Name: String("x"), Count: Int32(1)}}} - data, err := Marshal(pb) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - - pb1 := new(MessageList) - if err := Unmarshal(data, pb1); err != nil { - t.Fatalf("first Unmarshal: %v", err) - } - if err := Unmarshal(data, pb1); err != nil { - t.Fatalf("second Unmarshal: %v", err) - } - if len(pb1.Message) != 1 { - t.Errorf("two Unmarshals produced %d Messages, want 1", len(pb1.Message)) - } - - pb2 := new(MessageList) - if err := UnmarshalMerge(data, pb2); err != nil { - t.Fatalf("first UnmarshalMerge: %v", err) - } - if err := UnmarshalMerge(data, pb2); err != nil { - t.Fatalf("second UnmarshalMerge: %v", err) - } - if len(pb2.Message) != 2 { - t.Errorf("two UnmarshalMerges produced %d Messages, want 2", len(pb2.Message)) - } -} - -func TestExtensionMarshalOrder(t *testing.T) { - m := &MyMessage{Count: Int(123)} - if err := SetExtension(m, E_Ext_More, &Ext{Data: String("alpha")}); err != nil { - t.Fatalf("SetExtension: %v", err) - } - if err := SetExtension(m, E_Ext_Text, String("aleph")); err != nil { - t.Fatalf("SetExtension: %v", err) - } - if err := SetExtension(m, E_Ext_Number, Int32(1)); err != nil { - t.Fatalf("SetExtension: %v", err) - } - - // Serialize m several times, and check we get the same bytes each time. - var orig []byte - for i := 0; i < 100; i++ { - b, err := Marshal(m) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - if i == 0 { - orig = b - continue - } - if !bytes.Equal(b, orig) { - t.Errorf("Bytes differ on attempt #%d", i) - } - } -} - -// Many extensions, because small maps might not iterate differently on each iteration. -var exts = []*ExtensionDesc{ - E_X201, - E_X202, - E_X203, - E_X204, - E_X205, - E_X206, - E_X207, - E_X208, - E_X209, - E_X210, - E_X211, - E_X212, - E_X213, - E_X214, - E_X215, - E_X216, - E_X217, - E_X218, - E_X219, - E_X220, - E_X221, - E_X222, - E_X223, - E_X224, - E_X225, - E_X226, - E_X227, - E_X228, - E_X229, - E_X230, - E_X231, - E_X232, - E_X233, - E_X234, - E_X235, - E_X236, - E_X237, - E_X238, - E_X239, - E_X240, - E_X241, - E_X242, - E_X243, - E_X244, - E_X245, - E_X246, - E_X247, - E_X248, - E_X249, - E_X250, -} - -func TestMessageSetMarshalOrder(t *testing.T) { - m := &MyMessageSet{} - for _, x := range exts { - if err := SetExtension(m, x, &Empty{}); err != nil { - t.Fatalf("SetExtension: %v", err) - } - } - - buf, err := Marshal(m) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - - // Serialize m several times, and check we get the same bytes each time. - for i := 0; i < 10; i++ { - b1, err := Marshal(m) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - if !bytes.Equal(b1, buf) { - t.Errorf("Bytes differ on re-Marshal #%d", i) - } - - m2 := &MyMessageSet{} - if err := Unmarshal(buf, m2); err != nil { - t.Errorf("Unmarshal: %v", err) - } - b2, err := Marshal(m2) - if err != nil { - t.Errorf("re-Marshal: %v", err) - } - if !bytes.Equal(b2, buf) { - t.Errorf("Bytes differ on round-trip #%d", i) - } - } -} - -func TestUnmarshalMergesMessages(t *testing.T) { - // If a nested message occurs twice in the input, - // the fields should be merged when decoding. - a := &OtherMessage{ - Key: Int64(123), - Inner: &InnerMessage{ - Host: String("polhode"), - Port: Int32(1234), - }, - } - aData, err := Marshal(a) - if err != nil { - t.Fatalf("Marshal(a): %v", err) - } - b := &OtherMessage{ - Weight: Float32(1.2), - Inner: &InnerMessage{ - Host: String("herpolhode"), - Connected: Bool(true), - }, - } - bData, err := Marshal(b) - if err != nil { - t.Fatalf("Marshal(b): %v", err) - } - want := &OtherMessage{ - Key: Int64(123), - Weight: Float32(1.2), - Inner: &InnerMessage{ - Host: String("herpolhode"), - Port: Int32(1234), - Connected: Bool(true), - }, - } - got := new(OtherMessage) - if err := Unmarshal(append(aData, bData...), got); err != nil { - t.Fatalf("Unmarshal: %v", err) - } - if !Equal(got, want) { - t.Errorf("\n got %v\nwant %v", got, want) - } -} - -func TestEncodingSizes(t *testing.T) { - tests := []struct { - m Message - n int - }{ - {&Defaults{F_Int32: Int32(math.MaxInt32)}, 6}, - {&Defaults{F_Int32: Int32(math.MinInt32)}, 11}, - {&Defaults{F_Uint32: Uint32(uint32(math.MaxInt32) + 1)}, 6}, - {&Defaults{F_Uint32: Uint32(math.MaxUint32)}, 6}, - } - for _, test := range tests { - b, err := Marshal(test.m) - if err != nil { - t.Errorf("Marshal(%v): %v", test.m, err) - continue - } - if len(b) != test.n { - t.Errorf("Marshal(%v) yielded %d bytes, want %d bytes", test.m, len(b), test.n) - } - } -} - -func TestRequiredNotSetError(t *testing.T) { - pb := initGoTest(false) - pb.RequiredField.Label = nil - pb.F_Int32Required = nil - pb.F_Int64Required = nil - - expected := "0807" + // field 1, encoding 0, value 7 - "2206" + "120474797065" + // field 4, encoding 2 (GoTestField) - "5001" + // field 10, encoding 0, value 1 - "6d20000000" + // field 13, encoding 5, value 0x20 - "714000000000000000" + // field 14, encoding 1, value 0x40 - "78a019" + // field 15, encoding 0, value 0xca0 = 3232 - "8001c032" + // field 16, encoding 0, value 0x1940 = 6464 - "8d0100004a45" + // field 17, encoding 5, value 3232.0 - "9101000000000040b940" + // field 18, encoding 1, value 6464.0 - "9a0106" + "737472696e67" + // field 19, encoding 2, string "string" - "b304" + // field 70, encoding 3, start group - "ba0408" + "7265717569726564" + // field 71, encoding 2, string "required" - "b404" + // field 70, encoding 4, end group - "aa0605" + "6279746573" + // field 101, encoding 2, string "bytes" - "b0063f" + // field 102, encoding 0, 0x3f zigzag32 - "b8067f" // field 103, encoding 0, 0x7f zigzag64 - - o := old() - bytes, err := Marshal(pb) - if _, ok := err.(*RequiredNotSetError); !ok { - fmt.Printf("marshal-1 err = %v, want *RequiredNotSetError", err) - o.DebugPrint("", bytes) - t.Fatalf("expected = %s", expected) - } - if strings.Index(err.Error(), "RequiredField.Label") < 0 { - t.Errorf("marshal-1 wrong err msg: %v", err) - } - if !equal(bytes, expected, t) { - o.DebugPrint("neq 1", bytes) - t.Fatalf("expected = %s", expected) - } - - // Now test Unmarshal by recreating the original buffer. - pbd := new(GoTest) - err = Unmarshal(bytes, pbd) - if _, ok := err.(*RequiredNotSetError); !ok { - t.Fatalf("unmarshal err = %v, want *RequiredNotSetError", err) - o.DebugPrint("", bytes) - t.Fatalf("string = %s", expected) - } - if strings.Index(err.Error(), "RequiredField.{Unknown}") < 0 { - t.Errorf("unmarshal wrong err msg: %v", err) - } - bytes, err = Marshal(pbd) - if _, ok := err.(*RequiredNotSetError); !ok { - t.Errorf("marshal-2 err = %v, want *RequiredNotSetError", err) - o.DebugPrint("", bytes) - t.Fatalf("string = %s", expected) - } - if strings.Index(err.Error(), "RequiredField.Label") < 0 { - t.Errorf("marshal-2 wrong err msg: %v", err) - } - if !equal(bytes, expected, t) { - o.DebugPrint("neq 2", bytes) - t.Fatalf("string = %s", expected) - } -} - -func fuzzUnmarshal(t *testing.T, data []byte) { - defer func() { - if e := recover(); e != nil { - t.Errorf("These bytes caused a panic: %+v", data) - t.Logf("Stack:\n%s", debug.Stack()) - t.FailNow() - } - }() - - pb := new(MyMessage) - Unmarshal(data, pb) -} - -func TestMapFieldMarshal(t *testing.T) { - m := &MessageWithMap{ - NameMapping: map[int32]string{ - 1: "Rob", - 4: "Ian", - 8: "Dave", - }, - } - b, err := Marshal(m) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - - // b should be the concatenation of these three byte sequences in some order. - parts := []string{ - "\n\a\b\x01\x12\x03Rob", - "\n\a\b\x04\x12\x03Ian", - "\n\b\b\x08\x12\x04Dave", - } - ok := false - for i := range parts { - for j := range parts { - if j == i { - continue - } - for k := range parts { - if k == i || k == j { - continue - } - try := parts[i] + parts[j] + parts[k] - if bytes.Equal(b, []byte(try)) { - ok = true - break - } - } - } - } - if !ok { - t.Fatalf("Incorrect Marshal output.\n got %q\nwant %q (or a permutation of that)", b, parts[0]+parts[1]+parts[2]) - } - t.Logf("FYI b: %q", b) - - (new(Buffer)).DebugPrint("Dump of b", b) -} - -func TestMapFieldRoundTrips(t *testing.T) { - m := &MessageWithMap{ - NameMapping: map[int32]string{ - 1: "Rob", - 4: "Ian", - 8: "Dave", - }, - MsgMapping: map[int64]*FloatingPoint{ - 0x7001: &FloatingPoint{F: Float64(2.0)}, - }, - ByteMapping: map[bool][]byte{ - false: []byte("that's not right!"), - true: []byte("aye, 'tis true!"), - }, - } - b, err := Marshal(m) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - t.Logf("FYI b: %q", b) - m2 := new(MessageWithMap) - if err := Unmarshal(b, m2); err != nil { - t.Fatalf("Unmarshal: %v", err) - } - for _, pair := range [][2]interface{}{ - {m.NameMapping, m2.NameMapping}, - {m.MsgMapping, m2.MsgMapping}, - {m.ByteMapping, m2.ByteMapping}, - } { - if !reflect.DeepEqual(pair[0], pair[1]) { - t.Errorf("Map did not survive a round trip.\ninitial: %v\n final: %v", pair[0], pair[1]) - } - } -} - -// Benchmarks - -func testMsg() *GoTest { - pb := initGoTest(true) - const N = 1000 // Internally the library starts much smaller. - pb.F_Int32Repeated = make([]int32, N) - pb.F_DoubleRepeated = make([]float64, N) - for i := 0; i < N; i++ { - pb.F_Int32Repeated[i] = int32(i) - pb.F_DoubleRepeated[i] = float64(i) - } - return pb -} - -func bytesMsg() *GoTest { - pb := initGoTest(true) - buf := make([]byte, 4000) - for i := range buf { - buf[i] = byte(i) - } - pb.F_BytesDefaulted = buf - return pb -} - -func benchmarkMarshal(b *testing.B, pb Message, marshal func(Message) ([]byte, error)) { - d, _ := marshal(pb) - b.SetBytes(int64(len(d))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - marshal(pb) - } -} - -func benchmarkBufferMarshal(b *testing.B, pb Message) { - p := NewBuffer(nil) - benchmarkMarshal(b, pb, func(pb0 Message) ([]byte, error) { - p.Reset() - err := p.Marshal(pb0) - return p.Bytes(), err - }) -} - -func benchmarkSize(b *testing.B, pb Message) { - benchmarkMarshal(b, pb, func(pb0 Message) ([]byte, error) { - Size(pb) - return nil, nil - }) -} - -func newOf(pb Message) Message { - in := reflect.ValueOf(pb) - if in.IsNil() { - return pb - } - return reflect.New(in.Type().Elem()).Interface().(Message) -} - -func benchmarkUnmarshal(b *testing.B, pb Message, unmarshal func([]byte, Message) error) { - d, _ := Marshal(pb) - b.SetBytes(int64(len(d))) - pbd := newOf(pb) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - unmarshal(d, pbd) - } -} - -func benchmarkBufferUnmarshal(b *testing.B, pb Message) { - p := NewBuffer(nil) - benchmarkUnmarshal(b, pb, func(d []byte, pb0 Message) error { - p.SetBuf(d) - return p.Unmarshal(pb0) - }) -} - -// Benchmark{Marshal,BufferMarshal,Size,Unmarshal,BufferUnmarshal}{,Bytes} - -func BenchmarkMarshal(b *testing.B) { - benchmarkMarshal(b, testMsg(), Marshal) -} - -func BenchmarkBufferMarshal(b *testing.B) { - benchmarkBufferMarshal(b, testMsg()) -} - -func BenchmarkSize(b *testing.B) { - benchmarkSize(b, testMsg()) -} - -func BenchmarkUnmarshal(b *testing.B) { - benchmarkUnmarshal(b, testMsg(), Unmarshal) -} - -func BenchmarkBufferUnmarshal(b *testing.B) { - benchmarkBufferUnmarshal(b, testMsg()) -} - -func BenchmarkMarshalBytes(b *testing.B) { - benchmarkMarshal(b, bytesMsg(), Marshal) -} - -func BenchmarkBufferMarshalBytes(b *testing.B) { - benchmarkBufferMarshal(b, bytesMsg()) -} - -func BenchmarkSizeBytes(b *testing.B) { - benchmarkSize(b, bytesMsg()) -} - -func BenchmarkUnmarshalBytes(b *testing.B) { - benchmarkUnmarshal(b, bytesMsg(), Unmarshal) -} - -func BenchmarkBufferUnmarshalBytes(b *testing.B) { - benchmarkBufferUnmarshal(b, bytesMsg()) -} - -func BenchmarkUnmarshalUnrecognizedFields(b *testing.B) { - b.StopTimer() - pb := initGoTestField() - skip := &GoSkipTest{ - SkipInt32: Int32(32), - SkipFixed32: Uint32(3232), - SkipFixed64: Uint64(6464), - SkipString: String("skipper"), - Skipgroup: &GoSkipTest_SkipGroup{ - GroupInt32: Int32(75), - GroupString: String("wxyz"), - }, - } - - pbd := new(GoTestField) - p := NewBuffer(nil) - p.Marshal(pb) - p.Marshal(skip) - p2 := NewBuffer(nil) - - b.StartTimer() - for i := 0; i < b.N; i++ { - p2.SetBuf(p.Bytes()) - p2.Unmarshal(pbd) - } -} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/clone.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/clone.go index 6c6a7d95f2..84bbaf4335 100644 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/clone.go +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/clone.go @@ -75,12 +75,13 @@ func Merge(dst, src Message) { } func mergeStruct(out, in reflect.Value) { + sprop := GetProperties(in.Type()) for i := 0; i < in.NumField(); i++ { f := in.Type().Field(i) if strings.HasPrefix(f.Name, "XXX_") { continue } - mergeAny(out.Field(i), in.Field(i)) + mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) } if emIn, ok := in.Addr().Interface().(extendableProto); ok { @@ -98,7 +99,10 @@ func mergeStruct(out, in reflect.Value) { } } -func mergeAny(out, in reflect.Value) { +// mergeAny performs a merge between two values of the same type. +// viaPtr indicates whether the values were indirected through a pointer (implying proto2). +// prop is set if this is a struct field (it may be nil). +func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { if in.Type() == protoMessageType { if !in.IsNil() { if out.IsNil() { @@ -112,7 +116,21 @@ func mergeAny(out, in reflect.Value) { switch in.Kind() { case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, reflect.String, reflect.Uint32, reflect.Uint64: + if !viaPtr && isProto3Zero(in) { + return + } out.Set(in) + case reflect.Interface: + // Probably a oneof field; copy non-nil values. + if in.IsNil() { + return + } + // Allocate destination if it is not set, or set to a different type. + // Otherwise we will merge as normal. + if out.IsNil() || out.Elem().Type() != in.Elem().Type() { + out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) + } + mergeAny(out.Elem(), in.Elem(), false, nil) case reflect.Map: if in.Len() == 0 { return @@ -127,7 +145,7 @@ func mergeAny(out, in reflect.Value) { switch elemKind { case reflect.Ptr: val = reflect.New(in.Type().Elem().Elem()) - mergeAny(val, in.MapIndex(key)) + mergeAny(val, in.MapIndex(key), false, nil) case reflect.Slice: val = in.MapIndex(key) val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) @@ -143,13 +161,21 @@ func mergeAny(out, in reflect.Value) { if out.IsNil() { out.Set(reflect.New(in.Elem().Type())) } - mergeAny(out.Elem(), in.Elem()) + mergeAny(out.Elem(), in.Elem(), true, nil) case reflect.Slice: if in.IsNil() { return } if in.Type().Elem().Kind() == reflect.Uint8 { // []byte is a scalar bytes field, not a repeated field. + + // Edge case: if this is in a proto3 message, a zero length + // bytes field is considered the zero value, and should not + // be merged. + if prop != nil && prop.proto3 && in.Len() == 0 { + return + } + // Make a deep copy. // Append to []byte{} instead of []byte(nil) so that we never end up // with a nil result. @@ -167,7 +193,7 @@ func mergeAny(out, in reflect.Value) { default: for i := 0; i < n; i++ { x := reflect.Indirect(reflect.New(in.Type().Elem())) - mergeAny(x, in.Index(i)) + mergeAny(x, in.Index(i), false, nil) out.Set(reflect.Append(out, x)) } } @@ -184,7 +210,7 @@ func mergeExtension(out, in map[int32]Extension) { eOut := Extension{desc: eIn.desc} if eIn.value != nil { v := reflect.New(reflect.TypeOf(eIn.value)).Elem() - mergeAny(v, reflect.ValueOf(eIn.value)) + mergeAny(v, reflect.ValueOf(eIn.value), false, nil) eOut.value = v.Interface() } if eIn.enc != nil { diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/clone_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/clone_test.go deleted file mode 100644 index 9db0fb65cf..0000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/clone_test.go +++ /dev/null @@ -1,227 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "testing" - - "github.com/golang/protobuf/proto" - - pb "github.com/golang/protobuf/proto/testdata" -) - -var cloneTestMessage = &pb.MyMessage{ - Count: proto.Int32(42), - Name: proto.String("Dave"), - Pet: []string{"bunny", "kitty", "horsey"}, - Inner: &pb.InnerMessage{ - Host: proto.String("niles"), - Port: proto.Int32(9099), - Connected: proto.Bool(true), - }, - Others: []*pb.OtherMessage{ - { - Value: []byte("some bytes"), - }, - }, - Somegroup: &pb.MyMessage_SomeGroup{ - GroupField: proto.Int32(6), - }, - RepBytes: [][]byte{[]byte("sham"), []byte("wow")}, -} - -func init() { - ext := &pb.Ext{ - Data: proto.String("extension"), - } - if err := proto.SetExtension(cloneTestMessage, pb.E_Ext_More, ext); err != nil { - panic("SetExtension: " + err.Error()) - } -} - -func TestClone(t *testing.T) { - m := proto.Clone(cloneTestMessage).(*pb.MyMessage) - if !proto.Equal(m, cloneTestMessage) { - t.Errorf("Clone(%v) = %v", cloneTestMessage, m) - } - - // Verify it was a deep copy. - *m.Inner.Port++ - if proto.Equal(m, cloneTestMessage) { - t.Error("Mutating clone changed the original") - } - // Byte fields and repeated fields should be copied. - if &m.Pet[0] == &cloneTestMessage.Pet[0] { - t.Error("Pet: repeated field not copied") - } - if &m.Others[0] == &cloneTestMessage.Others[0] { - t.Error("Others: repeated field not copied") - } - if &m.Others[0].Value[0] == &cloneTestMessage.Others[0].Value[0] { - t.Error("Others[0].Value: bytes field not copied") - } - if &m.RepBytes[0] == &cloneTestMessage.RepBytes[0] { - t.Error("RepBytes: repeated field not copied") - } - if &m.RepBytes[0][0] == &cloneTestMessage.RepBytes[0][0] { - t.Error("RepBytes[0]: bytes field not copied") - } -} - -func TestCloneNil(t *testing.T) { - var m *pb.MyMessage - if c := proto.Clone(m); !proto.Equal(m, c) { - t.Errorf("Clone(%v) = %v", m, c) - } -} - -var mergeTests = []struct { - src, dst, want proto.Message -}{ - { - src: &pb.MyMessage{ - Count: proto.Int32(42), - }, - dst: &pb.MyMessage{ - Name: proto.String("Dave"), - }, - want: &pb.MyMessage{ - Count: proto.Int32(42), - Name: proto.String("Dave"), - }, - }, - { - src: &pb.MyMessage{ - Inner: &pb.InnerMessage{ - Host: proto.String("hey"), - Connected: proto.Bool(true), - }, - Pet: []string{"horsey"}, - Others: []*pb.OtherMessage{ - { - Value: []byte("some bytes"), - }, - }, - }, - dst: &pb.MyMessage{ - Inner: &pb.InnerMessage{ - Host: proto.String("niles"), - Port: proto.Int32(9099), - }, - Pet: []string{"bunny", "kitty"}, - Others: []*pb.OtherMessage{ - { - Key: proto.Int64(31415926535), - }, - { - // Explicitly test a src=nil field - Inner: nil, - }, - }, - }, - want: &pb.MyMessage{ - Inner: &pb.InnerMessage{ - Host: proto.String("hey"), - Connected: proto.Bool(true), - Port: proto.Int32(9099), - }, - Pet: []string{"bunny", "kitty", "horsey"}, - Others: []*pb.OtherMessage{ - { - Key: proto.Int64(31415926535), - }, - {}, - { - Value: []byte("some bytes"), - }, - }, - }, - }, - { - src: &pb.MyMessage{ - RepBytes: [][]byte{[]byte("wow")}, - }, - dst: &pb.MyMessage{ - Somegroup: &pb.MyMessage_SomeGroup{ - GroupField: proto.Int32(6), - }, - RepBytes: [][]byte{[]byte("sham")}, - }, - want: &pb.MyMessage{ - Somegroup: &pb.MyMessage_SomeGroup{ - GroupField: proto.Int32(6), - }, - RepBytes: [][]byte{[]byte("sham"), []byte("wow")}, - }, - }, - // Check that a scalar bytes field replaces rather than appends. - { - src: &pb.OtherMessage{Value: []byte("foo")}, - dst: &pb.OtherMessage{Value: []byte("bar")}, - want: &pb.OtherMessage{Value: []byte("foo")}, - }, - { - src: &pb.MessageWithMap{ - NameMapping: map[int32]string{6: "Nigel"}, - MsgMapping: map[int64]*pb.FloatingPoint{ - 0x4001: &pb.FloatingPoint{F: proto.Float64(2.0)}, - }, - ByteMapping: map[bool][]byte{true: []byte("wowsa")}, - }, - dst: &pb.MessageWithMap{ - NameMapping: map[int32]string{ - 6: "Bruce", // should be overwritten - 7: "Andrew", - }, - }, - want: &pb.MessageWithMap{ - NameMapping: map[int32]string{ - 6: "Nigel", - 7: "Andrew", - }, - MsgMapping: map[int64]*pb.FloatingPoint{ - 0x4001: &pb.FloatingPoint{F: proto.Float64(2.0)}, - }, - ByteMapping: map[bool][]byte{true: []byte("wowsa")}, - }, - }, -} - -func TestMerge(t *testing.T) { - for _, m := range mergeTests { - got := proto.Clone(m.dst) - proto.Merge(got, m.src) - if !proto.Equal(got, m.want) { - t.Errorf("Merge(%v, %v)\n got %v\nwant %v\n", m.dst, m.src, got, m.want) - } - } -} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/decode.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/decode.go index 129792ed12..5810782fd8 100644 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/decode.go +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/decode.go @@ -46,6 +46,10 @@ import ( // errOverflow is returned when an integer is too large to be represented. var errOverflow = errors.New("proto: integer overflow") +// ErrInternalBadWireType is returned by generated code when an incorrect +// wire type is encountered. It does not get returned to user code. +var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") + // The fundamental decoders that interpret bytes on the wire. // Those that take integer types all return uint64 and are // therefore of type valueDecoder. @@ -314,6 +318,24 @@ func UnmarshalMerge(buf []byte, pb Message) error { return NewBuffer(buf).Unmarshal(pb) } +// DecodeMessage reads a count-delimited message from the Buffer. +func (p *Buffer) DecodeMessage(pb Message) error { + enc, err := p.DecodeRawBytes(false) + if err != nil { + return err + } + return NewBuffer(enc).Unmarshal(pb) +} + +// DecodeGroup reads a tag-delimited group from the Buffer. +func (p *Buffer) DecodeGroup(pb Message) error { + typ, base, err := getbase(pb) + if err != nil { + return err + } + return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base) +} + // Unmarshal parses the protocol buffer representation in the // Buffer and places the decoded result in pb. If the struct // underlying pb does not match the data in the buffer, the results can be @@ -377,6 +399,20 @@ func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group continue } } + // Maybe it's a oneof? + if prop.oneofUnmarshaler != nil { + m := structPointer_Interface(base, st).(Message) + // First return value indicates whether tag is a oneof field. + ok, err = prop.oneofUnmarshaler(m, tag, wire, o) + if err == ErrInternalBadWireType { + // Map the error to something more descriptive. + // Do the formatting here to save generated code space. + err = fmt.Errorf("bad wiretype for oneof field in %T", m) + } + if ok { + continue + } + } err = o.skipAndSave(st, tag, wire, base, prop.unrecField) continue } @@ -561,9 +597,13 @@ func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error return err } nb := int(nn) // number of bytes of encoded bools + fin := o.index + nb + if fin < o.index { + return errOverflow + } y := *v - for i := 0; i < nb; i++ { + for o.index < fin { u, err := p.valDec(o) if err != nil { return err @@ -675,7 +715,7 @@ func (o *Buffer) dec_new_map(p *Properties, base structPointer) error { oi := o.index // index at the end of this map entry o.index -= len(raw) // move buffer back to start of map entry - mptr := structPointer_Map(base, p.field, p.mtype) // *map[K]V + mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V if mptr.Elem().IsNil() { mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem())) } @@ -727,8 +767,14 @@ func (o *Buffer) dec_new_map(p *Properties, base structPointer) error { return fmt.Errorf("proto: bad map data tag %d", raw[0]) } } + keyelem, valelem := keyptr.Elem(), valptr.Elem() + if !keyelem.IsValid() || !valelem.IsValid() { + // We did not decode the key or the value in the map entry. + // Either way, it's an invalid map entry. + return fmt.Errorf("proto: bad map data: missing key/val") + } - v.SetMapIndex(keyptr.Elem(), valptr.Elem()) + v.SetMapIndex(keyelem, valelem) return nil } diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/encode.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/encode.go index d1abc3328d..89d0caa826 100644 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/encode.go +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/encode.go @@ -228,6 +228,20 @@ func Marshal(pb Message) ([]byte, error) { return p.buf, err } +// EncodeMessage writes the protocol buffer to the Buffer, +// prefixed by a varint-encoded length. +func (p *Buffer) EncodeMessage(pb Message) error { + t, base, err := getbase(pb) + if structPointer_IsNil(base) { + return ErrNil + } + if err == nil { + var state errorState + err = p.enc_len_struct(GetProperties(t.Elem()), base, &state) + } + return err +} + // Marshal takes the protocol buffer // and encodes it into the wire format, writing the result to the // Buffer. @@ -318,7 +332,7 @@ func size_bool(p *Properties, base structPointer) int { func size_proto3_bool(p *Properties, base structPointer) int { v := *structPointer_BoolVal(base, p.field) - if !v { + if !v && !p.oneof { return 0 } return len(p.tagcode) + 1 // each bool takes exactly one byte @@ -361,7 +375,7 @@ func size_int32(p *Properties, base structPointer) (n int) { func size_proto3_int32(p *Properties, base structPointer) (n int) { v := structPointer_Word32Val(base, p.field) x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range - if x == 0 { + if x == 0 && !p.oneof { return 0 } n += len(p.tagcode) @@ -407,7 +421,7 @@ func size_uint32(p *Properties, base structPointer) (n int) { func size_proto3_uint32(p *Properties, base structPointer) (n int) { v := structPointer_Word32Val(base, p.field) x := word32Val_Get(v) - if x == 0 { + if x == 0 && !p.oneof { return 0 } n += len(p.tagcode) @@ -452,7 +466,7 @@ func size_int64(p *Properties, base structPointer) (n int) { func size_proto3_int64(p *Properties, base structPointer) (n int) { v := structPointer_Word64Val(base, p.field) x := word64Val_Get(v) - if x == 0 { + if x == 0 && !p.oneof { return 0 } n += len(p.tagcode) @@ -495,7 +509,7 @@ func size_string(p *Properties, base structPointer) (n int) { func size_proto3_string(p *Properties, base structPointer) (n int) { v := *structPointer_StringVal(base, p.field) - if v == "" { + if v == "" && !p.oneof { return 0 } n += len(p.tagcode) @@ -529,7 +543,7 @@ func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error { } o.buf = append(o.buf, p.tagcode...) o.EncodeRawBytes(data) - return nil + return state.err } o.buf = append(o.buf, p.tagcode...) @@ -667,7 +681,7 @@ func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error func size_slice_byte(p *Properties, base structPointer) (n int) { s := *structPointer_Bytes(base, p.field) - if s == nil { + if s == nil && !p.oneof { return 0 } n += len(p.tagcode) @@ -677,7 +691,7 @@ func size_slice_byte(p *Properties, base structPointer) (n int) { func size_proto3_slice_byte(p *Properties, base structPointer) (n int) { s := *structPointer_Bytes(base, p.field) - if len(s) == 0 { + if len(s) == 0 && !p.oneof { return 0 } n += len(p.tagcode) @@ -1084,7 +1098,7 @@ func (o *Buffer) enc_new_map(p *Properties, base structPointer) error { repeated MapFieldEntry map_field = N; */ - v := structPointer_Map(base, p.field, p.mtype).Elem() // map[K]V + v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V if v.Len() == 0 { return nil } @@ -1106,6 +1120,11 @@ func (o *Buffer) enc_new_map(p *Properties, base structPointer) error { for _, key := range keys { val := v.MapIndex(key) + // The only illegal map entry values are nil message pointers. + if val.Kind() == reflect.Ptr && val.IsNil() { + return errors.New("proto: map has nil element") + } + keycopy.Set(key) valcopy.Set(val) @@ -1118,7 +1137,7 @@ func (o *Buffer) enc_new_map(p *Properties, base structPointer) error { } func size_new_map(p *Properties, base structPointer) int { - v := structPointer_Map(base, p.field, p.mtype).Elem() // map[K]V + v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) @@ -1196,6 +1215,14 @@ func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error { } } + // Do oneof fields. + if prop.oneofMarshaler != nil { + m := structPointer_Interface(base, prop.stype).(Message) + if err := prop.oneofMarshaler(m, o); err != nil { + return err + } + } + // Add unrecognized fields at the end. if prop.unrecField.IsValid() { v := *structPointer_Bytes(base, prop.unrecField) @@ -1221,6 +1248,27 @@ func size_struct(prop *StructProperties, base structPointer) (n int) { n += len(v) } + // Factor in any oneof fields. + // TODO: This could be faster and use less reflection. + if prop.oneofMarshaler != nil { + sv := reflect.ValueOf(structPointer_Interface(base, prop.stype)).Elem() + for i := 0; i < prop.stype.NumField(); i++ { + fv := sv.Field(i) + if fv.Kind() != reflect.Interface || fv.IsNil() { + continue + } + if prop.stype.Field(i).Tag.Get("protobuf_oneof") == "" { + continue + } + spv := fv.Elem() // interface -> *T + sv := spv.Elem() // *T -> T + sf := sv.Type().Field(0) // StructField inside T + var prop Properties + prop.Init(sf.Type, "whatever", sf.Tag.Get("protobuf"), &sf) + n += prop.size(&prop, toStructPointer(spv)) + } + } + return } diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/equal.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/equal.go index d8673a3e97..5475c3d959 100644 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/equal.go +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/equal.go @@ -154,6 +154,17 @@ func equalAny(v1, v2 reflect.Value) bool { return v1.Float() == v2.Float() case reflect.Int32, reflect.Int64: return v1.Int() == v2.Int() + case reflect.Interface: + // Probably a oneof field; compare the inner values. + n1, n2 := v1.IsNil(), v2.IsNil() + if n1 || n2 { + return n1 == n2 + } + e1, e2 := v1.Elem(), v2.Elem() + if e1.Type() != e2.Type() { + return false + } + return equalAny(e1, e2) case reflect.Map: if v1.Len() != v2.Len() { return false diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/equal_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/equal_test.go deleted file mode 100644 index b322f65ab6..0000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/equal_test.go +++ /dev/null @@ -1,191 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "testing" - - . "github.com/golang/protobuf/proto" - pb "github.com/golang/protobuf/proto/testdata" -) - -// Four identical base messages. -// The init function adds extensions to some of them. -var messageWithoutExtension = &pb.MyMessage{Count: Int32(7)} -var messageWithExtension1a = &pb.MyMessage{Count: Int32(7)} -var messageWithExtension1b = &pb.MyMessage{Count: Int32(7)} -var messageWithExtension2 = &pb.MyMessage{Count: Int32(7)} - -// Two messages with non-message extensions. -var messageWithInt32Extension1 = &pb.MyMessage{Count: Int32(8)} -var messageWithInt32Extension2 = &pb.MyMessage{Count: Int32(8)} - -func init() { - ext1 := &pb.Ext{Data: String("Kirk")} - ext2 := &pb.Ext{Data: String("Picard")} - - // messageWithExtension1a has ext1, but never marshals it. - if err := SetExtension(messageWithExtension1a, pb.E_Ext_More, ext1); err != nil { - panic("SetExtension on 1a failed: " + err.Error()) - } - - // messageWithExtension1b is the unmarshaled form of messageWithExtension1a. - if err := SetExtension(messageWithExtension1b, pb.E_Ext_More, ext1); err != nil { - panic("SetExtension on 1b failed: " + err.Error()) - } - buf, err := Marshal(messageWithExtension1b) - if err != nil { - panic("Marshal of 1b failed: " + err.Error()) - } - messageWithExtension1b.Reset() - if err := Unmarshal(buf, messageWithExtension1b); err != nil { - panic("Unmarshal of 1b failed: " + err.Error()) - } - - // messageWithExtension2 has ext2. - if err := SetExtension(messageWithExtension2, pb.E_Ext_More, ext2); err != nil { - panic("SetExtension on 2 failed: " + err.Error()) - } - - if err := SetExtension(messageWithInt32Extension1, pb.E_Ext_Number, Int32(23)); err != nil { - panic("SetExtension on Int32-1 failed: " + err.Error()) - } - if err := SetExtension(messageWithInt32Extension1, pb.E_Ext_Number, Int32(24)); err != nil { - panic("SetExtension on Int32-2 failed: " + err.Error()) - } -} - -var EqualTests = []struct { - desc string - a, b Message - exp bool -}{ - {"different types", &pb.GoEnum{}, &pb.GoTestField{}, false}, - {"equal empty", &pb.GoEnum{}, &pb.GoEnum{}, true}, - {"nil vs nil", nil, nil, true}, - {"typed nil vs typed nil", (*pb.GoEnum)(nil), (*pb.GoEnum)(nil), true}, - {"typed nil vs empty", (*pb.GoEnum)(nil), &pb.GoEnum{}, false}, - {"different typed nil", (*pb.GoEnum)(nil), (*pb.GoTestField)(nil), false}, - - {"one set field, one unset field", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{}, false}, - {"one set field zero, one unset field", &pb.GoTest{Param: Int32(0)}, &pb.GoTest{}, false}, - {"different set fields", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{Label: String("bar")}, false}, - {"equal set", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{Label: String("foo")}, true}, - - {"repeated, one set", &pb.GoTest{F_Int32Repeated: []int32{2, 3}}, &pb.GoTest{}, false}, - {"repeated, different length", &pb.GoTest{F_Int32Repeated: []int32{2, 3}}, &pb.GoTest{F_Int32Repeated: []int32{2}}, false}, - {"repeated, different value", &pb.GoTest{F_Int32Repeated: []int32{2}}, &pb.GoTest{F_Int32Repeated: []int32{3}}, false}, - {"repeated, equal", &pb.GoTest{F_Int32Repeated: []int32{2, 4}}, &pb.GoTest{F_Int32Repeated: []int32{2, 4}}, true}, - {"repeated, nil equal nil", &pb.GoTest{F_Int32Repeated: nil}, &pb.GoTest{F_Int32Repeated: nil}, true}, - {"repeated, nil equal empty", &pb.GoTest{F_Int32Repeated: nil}, &pb.GoTest{F_Int32Repeated: []int32{}}, true}, - {"repeated, empty equal nil", &pb.GoTest{F_Int32Repeated: []int32{}}, &pb.GoTest{F_Int32Repeated: nil}, true}, - - { - "nested, different", - &pb.GoTest{RequiredField: &pb.GoTestField{Label: String("foo")}}, - &pb.GoTest{RequiredField: &pb.GoTestField{Label: String("bar")}}, - false, - }, - { - "nested, equal", - &pb.GoTest{RequiredField: &pb.GoTestField{Label: String("wow")}}, - &pb.GoTest{RequiredField: &pb.GoTestField{Label: String("wow")}}, - true, - }, - - {"bytes", &pb.OtherMessage{Value: []byte("foo")}, &pb.OtherMessage{Value: []byte("foo")}, true}, - {"bytes, empty", &pb.OtherMessage{Value: []byte{}}, &pb.OtherMessage{Value: []byte{}}, true}, - {"bytes, empty vs nil", &pb.OtherMessage{Value: []byte{}}, &pb.OtherMessage{Value: nil}, false}, - { - "repeated bytes", - &pb.MyMessage{RepBytes: [][]byte{[]byte("sham"), []byte("wow")}}, - &pb.MyMessage{RepBytes: [][]byte{[]byte("sham"), []byte("wow")}}, - true, - }, - - {"extension vs. no extension", messageWithoutExtension, messageWithExtension1a, false}, - {"extension vs. same extension", messageWithExtension1a, messageWithExtension1b, true}, - {"extension vs. different extension", messageWithExtension1a, messageWithExtension2, false}, - - {"int32 extension vs. itself", messageWithInt32Extension1, messageWithInt32Extension1, true}, - {"int32 extension vs. a different int32", messageWithInt32Extension1, messageWithInt32Extension2, false}, - - { - "message with group", - &pb.MyMessage{ - Count: Int32(1), - Somegroup: &pb.MyMessage_SomeGroup{ - GroupField: Int32(5), - }, - }, - &pb.MyMessage{ - Count: Int32(1), - Somegroup: &pb.MyMessage_SomeGroup{ - GroupField: Int32(5), - }, - }, - true, - }, - - { - "map same", - &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}}, - &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}}, - true, - }, - { - "map different entry", - &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}}, - &pb.MessageWithMap{NameMapping: map[int32]string{2: "Rob"}}, - false, - }, - { - "map different key only", - &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}}, - &pb.MessageWithMap{NameMapping: map[int32]string{2: "Ken"}}, - false, - }, - { - "map different value only", - &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}}, - &pb.MessageWithMap{NameMapping: map[int32]string{1: "Rob"}}, - false, - }, -} - -func TestEqual(t *testing.T) { - for _, tc := range EqualTests { - if res := Equal(tc.a, tc.b); res != tc.exp { - t.Errorf("%v: Equal(%v, %v) = %v, want %v", tc.desc, tc.a, tc.b, res, tc.exp) - } - } -} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions.go index 5f62dff247..e591ccef79 100644 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions.go +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions.go @@ -222,7 +222,7 @@ func ClearExtension(pb extendableProto, extension *ExtensionDesc) { } // GetExtension parses and returns the given extension of pb. -// If the extension is not present it returns ErrMissingExtension. +// If the extension is not present and has no default value it returns ErrMissingExtension. func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, error) { if err := checkExtensionTypes(pb, extension); err != nil { return nil, err @@ -231,8 +231,11 @@ func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, er emap := pb.ExtensionMap() e, ok := emap[extension.Field] if !ok { - return nil, ErrMissingExtension + // defaultExtensionValue returns the default value or + // ErrMissingExtension if there is no default. + return defaultExtensionValue(extension) } + if e.value != nil { // Already decoded. Check the descriptor, though. if e.desc != extension { @@ -258,6 +261,41 @@ func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, er return e.value, nil } +// defaultExtensionValue returns the default value for extension. +// If no default for an extension is defined ErrMissingExtension is returned. +func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { + t := reflect.TypeOf(extension.ExtensionType) + props := extensionProperties(extension) + + sf, _, err := fieldDefault(t, props) + if err != nil { + return nil, err + } + + if sf == nil || sf.value == nil { + // There is no default value. + return nil, ErrMissingExtension + } + + if t.Kind() != reflect.Ptr { + // We do not need to return a Ptr, we can directly return sf.value. + return sf.value, nil + } + + // We need to return an interface{} that is a pointer to sf.value. + value := reflect.New(t).Elem() + value.Set(reflect.New(value.Type().Elem())) + if sf.kind == reflect.Int32 { + // We may have an int32 or an enum, but the underlying data is int32. + // Since we can't set an int32 into a non int32 reflect.value directly + // set it as a int32. + value.Elem().SetInt(int64(sf.value.(int32))) + } else { + value.Elem().Set(reflect.ValueOf(sf.value)) + } + return value.Interface(), nil +} + // decodeExtension decodes an extension encoded in b. func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { o := NewBuffer(b) diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions_test.go deleted file mode 100644 index 92d13341d6..0000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions_test.go +++ /dev/null @@ -1,153 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2014 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "testing" - - "github.com/golang/protobuf/proto" - pb "github.com/golang/protobuf/proto/testdata" -) - -func TestGetExtensionsWithMissingExtensions(t *testing.T) { - msg := &pb.MyMessage{} - ext1 := &pb.Ext{} - if err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil { - t.Fatalf("Could not set ext1: %s", ext1) - } - exts, err := proto.GetExtensions(msg, []*proto.ExtensionDesc{ - pb.E_Ext_More, - pb.E_Ext_Text, - }) - if err != nil { - t.Fatalf("GetExtensions() failed: %s", err) - } - if exts[0] != ext1 { - t.Errorf("ext1 not in returned extensions: %T %v", exts[0], exts[0]) - } - if exts[1] != nil { - t.Errorf("ext2 in returned extensions: %T %v", exts[1], exts[1]) - } -} - -func TestGetExtensionStability(t *testing.T) { - check := func(m *pb.MyMessage) bool { - ext1, err := proto.GetExtension(m, pb.E_Ext_More) - if err != nil { - t.Fatalf("GetExtension() failed: %s", err) - } - ext2, err := proto.GetExtension(m, pb.E_Ext_More) - if err != nil { - t.Fatalf("GetExtension() failed: %s", err) - } - return ext1 == ext2 - } - msg := &pb.MyMessage{Count: proto.Int32(4)} - ext0 := &pb.Ext{} - if err := proto.SetExtension(msg, pb.E_Ext_More, ext0); err != nil { - t.Fatalf("Could not set ext1: %s", ext0) - } - if !check(msg) { - t.Errorf("GetExtension() not stable before marshaling") - } - bb, err := proto.Marshal(msg) - if err != nil { - t.Fatalf("Marshal() failed: %s", err) - } - msg1 := &pb.MyMessage{} - err = proto.Unmarshal(bb, msg1) - if err != nil { - t.Fatalf("Unmarshal() failed: %s", err) - } - if !check(msg1) { - t.Errorf("GetExtension() not stable after unmarshaling") - } -} - -func TestExtensionsRoundTrip(t *testing.T) { - msg := &pb.MyMessage{} - ext1 := &pb.Ext{ - Data: proto.String("hi"), - } - ext2 := &pb.Ext{ - Data: proto.String("there"), - } - exists := proto.HasExtension(msg, pb.E_Ext_More) - if exists { - t.Error("Extension More present unexpectedly") - } - if err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil { - t.Error(err) - } - if err := proto.SetExtension(msg, pb.E_Ext_More, ext2); err != nil { - t.Error(err) - } - e, err := proto.GetExtension(msg, pb.E_Ext_More) - if err != nil { - t.Error(err) - } - x, ok := e.(*pb.Ext) - if !ok { - t.Errorf("e has type %T, expected testdata.Ext", e) - } else if *x.Data != "there" { - t.Errorf("SetExtension failed to overwrite, got %+v, not 'there'", x) - } - proto.ClearExtension(msg, pb.E_Ext_More) - if _, err = proto.GetExtension(msg, pb.E_Ext_More); err != proto.ErrMissingExtension { - t.Errorf("got %v, expected ErrMissingExtension", e) - } - if _, err := proto.GetExtension(msg, pb.E_X215); err == nil { - t.Error("expected bad extension error, got nil") - } - if err := proto.SetExtension(msg, pb.E_X215, 12); err == nil { - t.Error("expected extension err") - } - if err := proto.SetExtension(msg, pb.E_Ext_More, 12); err == nil { - t.Error("expected some sort of type mismatch error, got nil") - } -} - -func TestNilExtension(t *testing.T) { - msg := &pb.MyMessage{ - Count: proto.Int32(1), - } - if err := proto.SetExtension(msg, pb.E_Ext_Text, proto.String("hello")); err != nil { - t.Fatal(err) - } - if err := proto.SetExtension(msg, pb.E_Ext_More, (*pb.Ext)(nil)); err == nil { - t.Error("expected SetExtension to fail due to a nil extension") - } else if want := "proto: SetExtension called with nil value of type *testdata.Ext"; err.Error() != want { - t.Errorf("expected error %v, got %v", want, err) - } - // Note: if the behavior of Marshal is ever changed to ignore nil extensions, update - // this test to verify that E_Ext_Text is properly propagated through marshal->unmarshal. -} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/lib.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/lib.go index fb139f6b74..abbf7583ed 100644 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/lib.go +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/lib.go @@ -30,179 +30,230 @@ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. /* - Package proto converts data structures to and from the wire format of - protocol buffers. It works in concert with the Go source code generated - for .proto files by the protocol compiler. +Package proto converts data structures to and from the wire format of +protocol buffers. It works in concert with the Go source code generated +for .proto files by the protocol compiler. - A summary of the properties of the protocol buffer interface - for a protocol buffer variable v: +A summary of the properties of the protocol buffer interface +for a protocol buffer variable v: - - Names are turned from camel_case to CamelCase for export. - - There are no methods on v to set fields; just treat - them as structure fields. - - There are getters that return a field's value if set, - and return the field's default value if unset. - The getters work even if the receiver is a nil message. - - The zero value for a struct is its correct initialization state. - All desired fields must be set before marshaling. - - A Reset() method will restore a protobuf struct to its zero state. - - Non-repeated fields are pointers to the values; nil means unset. - That is, optional or required field int32 f becomes F *int32. - - Repeated fields are slices. - - Helper functions are available to aid the setting of fields. - msg.Foo = proto.String("hello") // set field - - Constants are defined to hold the default values of all fields that - have them. They have the form Default_StructName_FieldName. - Because the getter methods handle defaulted values, - direct use of these constants should be rare. - - Enums are given type names and maps from names to values. - Enum values are prefixed by the enclosing message's name, or by the - enum's type name if it is a top-level enum. Enum types have a String - method, and a Enum method to assist in message construction. - - Nested messages, groups and enums have type names prefixed with the name of - the surrounding message type. - - Extensions are given descriptor names that start with E_, - followed by an underscore-delimited list of the nested messages - that contain it (if any) followed by the CamelCased name of the - extension field itself. HasExtension, ClearExtension, GetExtension - and SetExtension are functions for manipulating extensions. - - Marshal and Unmarshal are functions to encode and decode the wire format. + - Names are turned from camel_case to CamelCase for export. + - There are no methods on v to set fields; just treat + them as structure fields. + - There are getters that return a field's value if set, + and return the field's default value if unset. + The getters work even if the receiver is a nil message. + - The zero value for a struct is its correct initialization state. + All desired fields must be set before marshaling. + - A Reset() method will restore a protobuf struct to its zero state. + - Non-repeated fields are pointers to the values; nil means unset. + That is, optional or required field int32 f becomes F *int32. + - Repeated fields are slices. + - Helper functions are available to aid the setting of fields. + msg.Foo = proto.String("hello") // set field + - Constants are defined to hold the default values of all fields that + have them. They have the form Default_StructName_FieldName. + Because the getter methods handle defaulted values, + direct use of these constants should be rare. + - Enums are given type names and maps from names to values. + Enum values are prefixed by the enclosing message's name, or by the + enum's type name if it is a top-level enum. Enum types have a String + method, and a Enum method to assist in message construction. + - Nested messages, groups and enums have type names prefixed with the name of + the surrounding message type. + - Extensions are given descriptor names that start with E_, + followed by an underscore-delimited list of the nested messages + that contain it (if any) followed by the CamelCased name of the + extension field itself. HasExtension, ClearExtension, GetExtension + and SetExtension are functions for manipulating extensions. + - Oneof field sets are given a single field in their message, + with distinguished wrapper types for each possible field value. + - Marshal and Unmarshal are functions to encode and decode the wire format. - The simplest way to describe this is to see an example. - Given file test.proto, containing +The simplest way to describe this is to see an example. +Given file test.proto, containing - package example; + package example; - enum FOO { X = 17; } + enum FOO { X = 17; } - message Test { - required string label = 1; - optional int32 type = 2 [default=77]; - repeated int64 reps = 3; - optional group OptionalGroup = 4 { - required string RequiredField = 5; - } + message Test { + required string label = 1; + optional int32 type = 2 [default=77]; + repeated int64 reps = 3; + optional group OptionalGroup = 4 { + required string RequiredField = 5; + } + oneof union { + int32 number = 6; + string name = 7; + } + } + +The resulting file, test.pb.go, is: + + package example + + import proto "github.com/golang/protobuf/proto" + import math "math" + + type FOO int32 + const ( + FOO_X FOO = 17 + ) + var FOO_name = map[int32]string{ + 17: "X", + } + var FOO_value = map[string]int32{ + "X": 17, + } + + func (x FOO) Enum() *FOO { + p := new(FOO) + *p = x + return p + } + func (x FOO) String() string { + return proto.EnumName(FOO_name, int32(x)) + } + func (x *FOO) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FOO_value, data) + if err != nil { + return err } + *x = FOO(value) + return nil + } - The resulting file, test.pb.go, is: + type Test struct { + Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` + Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` + Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` + Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` + // Types that are valid to be assigned to Union: + // *Test_Number + // *Test_Name + Union isTest_Union `protobuf_oneof:"union"` + XXX_unrecognized []byte `json:"-"` + } + func (m *Test) Reset() { *m = Test{} } + func (m *Test) String() string { return proto.CompactTextString(m) } + func (*Test) ProtoMessage() {} - package example + type isTest_Union interface { + isTest_Union() + } - import proto "github.com/golang/protobuf/proto" - import math "math" + type Test_Number struct { + Number int32 `protobuf:"varint,6,opt,name=number"` + } + type Test_Name struct { + Name string `protobuf:"bytes,7,opt,name=name"` + } - type FOO int32 - const ( - FOO_X FOO = 17 - ) - var FOO_name = map[int32]string{ - 17: "X", + func (*Test_Number) isTest_Union() {} + func (*Test_Name) isTest_Union() {} + + func (m *Test) GetUnion() isTest_Union { + if m != nil { + return m.Union } - var FOO_value = map[string]int32{ - "X": 17, + return nil + } + const Default_Test_Type int32 = 77 + + func (m *Test) GetLabel() string { + if m != nil && m.Label != nil { + return *m.Label } + return "" + } - func (x FOO) Enum() *FOO { - p := new(FOO) - *p = x - return p + func (m *Test) GetType() int32 { + if m != nil && m.Type != nil { + return *m.Type } - func (x FOO) String() string { - return proto.EnumName(FOO_name, int32(x)) + return Default_Test_Type + } + + func (m *Test) GetOptionalgroup() *Test_OptionalGroup { + if m != nil { + return m.Optionalgroup } - func (x *FOO) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FOO_value, data) - if err != nil { - return err - } - *x = FOO(value) - return nil + return nil + } + + type Test_OptionalGroup struct { + RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` + } + func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } + func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } + + func (m *Test_OptionalGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField } + return "" + } - type Test struct { - Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` - Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` - Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` - Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` - XXX_unrecognized []byte `json:"-"` + func (m *Test) GetNumber() int32 { + if x, ok := m.GetUnion().(*Test_Number); ok { + return x.Number } - func (m *Test) Reset() { *m = Test{} } - func (m *Test) String() string { return proto.CompactTextString(m) } - func (*Test) ProtoMessage() {} - const Default_Test_Type int32 = 77 + return 0 + } - func (m *Test) GetLabel() string { - if m != nil && m.Label != nil { - return *m.Label - } - return "" + func (m *Test) GetName() string { + if x, ok := m.GetUnion().(*Test_Name); ok { + return x.Name } + return "" + } - func (m *Test) GetType() int32 { - if m != nil && m.Type != nil { - return *m.Type - } - return Default_Test_Type + func init() { + proto.RegisterEnum("example.FOO", FOO_name, FOO_value) + } + +To create and play with a Test object: + +package main + + import ( + "log" + + "github.com/golang/protobuf/proto" + pb "./example.pb" + ) + + func main() { + test := &pb.Test{ + Label: proto.String("hello"), + Type: proto.Int32(17), + Optionalgroup: &pb.Test_OptionalGroup{ + RequiredField: proto.String("good bye"), + }, + Union: &pb.Test_Name{"fred"}, } - - func (m *Test) GetOptionalgroup() *Test_OptionalGroup { - if m != nil { - return m.Optionalgroup - } - return nil + data, err := proto.Marshal(test) + if err != nil { + log.Fatal("marshaling error: ", err) } - - type Test_OptionalGroup struct { - RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` + newTest := &pb.Test{} + err = proto.Unmarshal(data, newTest) + if err != nil { + log.Fatal("unmarshaling error: ", err) } - func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } - func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } - - func (m *Test_OptionalGroup) GetRequiredField() string { - if m != nil && m.RequiredField != nil { - return *m.RequiredField - } - return "" + // Now test and newTest contain the same data. + if test.GetLabel() != newTest.GetLabel() { + log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) } - - func init() { - proto.RegisterEnum("example.FOO", FOO_name, FOO_value) - } - - To create and play with a Test object: - - package main - - import ( - "log" - - "github.com/golang/protobuf/proto" - pb "./example.pb" - ) - - func main() { - test := &pb.Test{ - Label: proto.String("hello"), - Type: proto.Int32(17), - Optionalgroup: &pb.Test_OptionalGroup{ - RequiredField: proto.String("good bye"), - }, - } - data, err := proto.Marshal(test) - if err != nil { - log.Fatal("marshaling error: ", err) - } - newTest := &pb.Test{} - err = proto.Unmarshal(data, newTest) - if err != nil { - log.Fatal("unmarshaling error: ", err) - } - // Now test and newTest contain the same data. - if test.GetLabel() != newTest.GetLabel() { - log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) - } - // etc. + // Use a type switch to determine which oneof was set. + switch u := test.Union.(type) { + case *pb.Test_Number: // u.Number contains the number. + case *pb.Test_Name: // u.Name contains the string. } + // etc. + } */ package proto @@ -211,6 +262,7 @@ import ( "fmt" "log" "reflect" + "sort" "strconv" "sync" ) @@ -385,13 +437,13 @@ func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, // DebugPrint dumps the encoded data in b in a debugging format with a header // including the string s. Used in testing but made available for general debugging. -func (o *Buffer) DebugPrint(s string, b []byte) { +func (p *Buffer) DebugPrint(s string, b []byte) { var u uint64 - obuf := o.buf - index := o.index - o.buf = b - o.index = 0 + obuf := p.buf + index := p.index + p.buf = b + p.index = 0 depth := 0 fmt.Printf("\n--- %s ---\n", s) @@ -402,12 +454,12 @@ out: fmt.Print(" ") } - index := o.index - if index == len(o.buf) { + index := p.index + if index == len(p.buf) { break } - op, err := o.DecodeVarint() + op, err := p.DecodeVarint() if err != nil { fmt.Printf("%3d: fetching op err %v\n", index, err) break out @@ -424,7 +476,7 @@ out: case WireBytes: var r []byte - r, err = o.DecodeRawBytes(false) + r, err = p.DecodeRawBytes(false) if err != nil { break out } @@ -445,7 +497,7 @@ out: fmt.Printf("\n") case WireFixed32: - u, err = o.DecodeFixed32() + u, err = p.DecodeFixed32() if err != nil { fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) break out @@ -453,16 +505,15 @@ out: fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) case WireFixed64: - u, err = o.DecodeFixed64() + u, err = p.DecodeFixed64() if err != nil { fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) break out } fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) - break case WireVarint: - u, err = o.DecodeVarint() + u, err = p.DecodeVarint() if err != nil { fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) break out @@ -470,30 +521,22 @@ out: fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) case WireStartGroup: - if err != nil { - fmt.Printf("%3d: t=%3d start err %v\n", index, tag, err) - break out - } fmt.Printf("%3d: t=%3d start\n", index, tag) depth++ case WireEndGroup: depth-- - if err != nil { - fmt.Printf("%3d: t=%3d end err %v\n", index, tag, err) - break out - } fmt.Printf("%3d: t=%3d end\n", index, tag) } } if depth != 0 { - fmt.Printf("%3d: start-end not balanced %d\n", o.index, depth) + fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) } fmt.Printf("\n") - o.buf = obuf - o.index = index + p.buf = obuf + p.index = index } // SetDefaults sets unset protocol buffer fields to their default values. @@ -668,123 +711,173 @@ func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { } ft := t.Field(fi).Type - var canHaveDefault, nestedMessage bool - switch ft.Kind() { - case reflect.Ptr: - if ft.Elem().Kind() == reflect.Struct { - nestedMessage = true - } else { - canHaveDefault = true // proto2 scalar field - } - - case reflect.Slice: - switch ft.Elem().Kind() { - case reflect.Ptr: - nestedMessage = true // repeated message - case reflect.Uint8: - canHaveDefault = true // bytes field - } - - case reflect.Map: - if ft.Elem().Kind() == reflect.Ptr { - nestedMessage = true // map with message values - } + sf, nested, err := fieldDefault(ft, prop) + switch { + case err != nil: + log.Print(err) + case nested: + dm.nested = append(dm.nested, fi) + case sf != nil: + sf.index = fi + dm.scalars = append(dm.scalars, *sf) } - - if !canHaveDefault { - if nestedMessage { - dm.nested = append(dm.nested, fi) - } - continue - } - - sf := scalarField{ - index: fi, - kind: ft.Elem().Kind(), - } - - // scalar fields without defaults - if !prop.HasDefault { - dm.scalars = append(dm.scalars, sf) - continue - } - - // a scalar field: either *T or []byte - switch ft.Elem().Kind() { - case reflect.Bool: - x, err := strconv.ParseBool(prop.Default) - if err != nil { - log.Printf("proto: bad default bool %q: %v", prop.Default, err) - continue - } - sf.value = x - case reflect.Float32: - x, err := strconv.ParseFloat(prop.Default, 32) - if err != nil { - log.Printf("proto: bad default float32 %q: %v", prop.Default, err) - continue - } - sf.value = float32(x) - case reflect.Float64: - x, err := strconv.ParseFloat(prop.Default, 64) - if err != nil { - log.Printf("proto: bad default float64 %q: %v", prop.Default, err) - continue - } - sf.value = x - case reflect.Int32: - x, err := strconv.ParseInt(prop.Default, 10, 32) - if err != nil { - log.Printf("proto: bad default int32 %q: %v", prop.Default, err) - continue - } - sf.value = int32(x) - case reflect.Int64: - x, err := strconv.ParseInt(prop.Default, 10, 64) - if err != nil { - log.Printf("proto: bad default int64 %q: %v", prop.Default, err) - continue - } - sf.value = x - case reflect.String: - sf.value = prop.Default - case reflect.Uint8: - // []byte (not *uint8) - sf.value = []byte(prop.Default) - case reflect.Uint32: - x, err := strconv.ParseUint(prop.Default, 10, 32) - if err != nil { - log.Printf("proto: bad default uint32 %q: %v", prop.Default, err) - continue - } - sf.value = uint32(x) - case reflect.Uint64: - x, err := strconv.ParseUint(prop.Default, 10, 64) - if err != nil { - log.Printf("proto: bad default uint64 %q: %v", prop.Default, err) - continue - } - sf.value = x - default: - log.Printf("proto: unhandled def kind %v", ft.Elem().Kind()) - continue - } - - dm.scalars = append(dm.scalars, sf) } return dm } +// fieldDefault returns the scalarField for field type ft. +// sf will be nil if the field can not have a default. +// nestedMessage will be true if this is a nested message. +// Note that sf.index is not set on return. +func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { + var canHaveDefault bool + switch ft.Kind() { + case reflect.Ptr: + if ft.Elem().Kind() == reflect.Struct { + nestedMessage = true + } else { + canHaveDefault = true // proto2 scalar field + } + + case reflect.Slice: + switch ft.Elem().Kind() { + case reflect.Ptr: + nestedMessage = true // repeated message + case reflect.Uint8: + canHaveDefault = true // bytes field + } + + case reflect.Map: + if ft.Elem().Kind() == reflect.Ptr { + nestedMessage = true // map with message values + } + } + + if !canHaveDefault { + if nestedMessage { + return nil, true, nil + } + return nil, false, nil + } + + // We now know that ft is a pointer or slice. + sf = &scalarField{kind: ft.Elem().Kind()} + + // scalar fields without defaults + if !prop.HasDefault { + return sf, false, nil + } + + // a scalar field: either *T or []byte + switch ft.Elem().Kind() { + case reflect.Bool: + x, err := strconv.ParseBool(prop.Default) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Float32: + x, err := strconv.ParseFloat(prop.Default, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) + } + sf.value = float32(x) + case reflect.Float64: + x, err := strconv.ParseFloat(prop.Default, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Int32: + x, err := strconv.ParseInt(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) + } + sf.value = int32(x) + case reflect.Int64: + x, err := strconv.ParseInt(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.String: + sf.value = prop.Default + case reflect.Uint8: + // []byte (not *uint8) + sf.value = []byte(prop.Default) + case reflect.Uint32: + x, err := strconv.ParseUint(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) + } + sf.value = uint32(x) + case reflect.Uint64: + x, err := strconv.ParseUint(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) + } + sf.value = x + default: + return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) + } + + return sf, false, nil +} + // Map fields may have key types of non-float scalars, strings and enums. // The easiest way to sort them in some deterministic order is to use fmt. // If this turns out to be inefficient we can always consider other options, // such as doing a Schwartzian transform. -type mapKeys []reflect.Value +func mapKeys(vs []reflect.Value) sort.Interface { + s := mapKeySorter{ + vs: vs, + // default Less function: textual comparison + less: func(a, b reflect.Value) bool { + return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface()) + }, + } -func (s mapKeys) Len() int { return len(s) } -func (s mapKeys) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s mapKeys) Less(i, j int) bool { - return fmt.Sprint(s[i].Interface()) < fmt.Sprint(s[j].Interface()) + // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps; + // numeric keys are sorted numerically. + if len(vs) == 0 { + return s + } + switch vs[0].Kind() { + case reflect.Int32, reflect.Int64: + s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } + case reflect.Uint32, reflect.Uint64: + s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } + } + + return s +} + +type mapKeySorter struct { + vs []reflect.Value + less func(a, b reflect.Value) bool +} + +func (s mapKeySorter) Len() int { return len(s.vs) } +func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } +func (s mapKeySorter) Less(i, j int) bool { + return s.less(s.vs[i], s.vs[j]) +} + +// isProto3Zero reports whether v is a zero proto3 value. +func isProto3Zero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return !v.Bool() + case reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint32, reflect.Uint64: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.String: + return v.String() == "" + } + return false } diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/message_set_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/message_set_test.go deleted file mode 100644 index 7c29bccf4b..0000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/message_set_test.go +++ /dev/null @@ -1,66 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2014 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "bytes" - "testing" -) - -func TestUnmarshalMessageSetWithDuplicate(t *testing.T) { - // Check that a repeated message set entry will be concatenated. - in := &MessageSet{ - Item: []*_MessageSet_Item{ - {TypeId: Int32(12345), Message: []byte("hoo")}, - {TypeId: Int32(12345), Message: []byte("hah")}, - }, - } - b, err := Marshal(in) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - t.Logf("Marshaled bytes: %q", b) - - m := make(map[int32]Extension) - if err := UnmarshalMessageSet(b, m); err != nil { - t.Fatalf("UnmarshalMessageSet: %v", err) - } - ext, ok := m[12345] - if !ok { - t.Fatalf("Didn't retrieve extension 12345; map is %v", m) - } - // Skip wire type/field number and length varints. - got := skipVarint(skipVarint(ext.enc)) - if want := []byte("hoohah"); !bytes.Equal(got, want) { - t.Errorf("Combined extension is %q, want %q", got, want) - } -} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_reflect.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_reflect.go index c68b12525c..749919d250 100644 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_reflect.go +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_reflect.go @@ -144,8 +144,8 @@ func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { return structPointer_ifield(p, f).(*map[int32]Extension) } -// Map returns the reflect.Value for the address of a map field in the struct. -func structPointer_Map(p structPointer, f field, typ reflect.Type) reflect.Value { +// NewAt returns the reflect.Value for a pointer to a field in the struct. +func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { return structPointer_field(p, f).Addr() } diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_unsafe.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_unsafe.go index 48bc0fa482..e9be0fe92e 100644 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_unsafe.go +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_unsafe.go @@ -130,8 +130,8 @@ func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f))) } -// Map returns the reflect.Value for the address of a map field in the struct. -func structPointer_Map(p structPointer, f field, typ reflect.Type) reflect.Value { +// NewAt returns the reflect.Value for a pointer to a field in the struct. +func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f))) } diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/properties.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/properties.go index d74844ab2a..bbca4daf05 100644 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/properties.go +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/properties.go @@ -84,6 +84,12 @@ type decoder func(p *Buffer, prop *Properties, base structPointer) error // A valueDecoder decodes a single integer in a particular encoding. type valueDecoder func(o *Buffer) (x uint64, err error) +// A oneofMarshaler does the marshaling for all oneof fields in a message. +type oneofMarshaler func(Message, *Buffer) error + +// A oneofUnmarshaler does the unmarshaling for a oneof field in a message. +type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error) + // tagMap is an optimization over map[int]int for typical protocol buffer // use-cases. Encoded protocol buffers are often in tag order with small tag // numbers. @@ -132,6 +138,21 @@ type StructProperties struct { order []int // list of struct field numbers in tag order unrecField field // field id of the XXX_unrecognized []byte field extendable bool // is this an extendable proto + + oneofMarshaler oneofMarshaler + oneofUnmarshaler oneofUnmarshaler + stype reflect.Type + + // OneofTypes contains information about the oneof fields in this message. + // It is keyed by the original name of a field. + OneofTypes map[string]*OneofProperties +} + +// OneofProperties represents information about a specific field in a oneof. +type OneofProperties struct { + Type reflect.Type // pointer to generated struct type for this oneof field + Field int // struct field number of the containing oneof in the message + Prop *Properties } // Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. @@ -156,6 +177,7 @@ type Properties struct { Packed bool // relevant for repeated primitives only Enum string // set for enum types only proto3 bool // whether this is known to be a proto3 field; set for []byte only + oneof bool // whether this is a oneof field Default string // default value HasDefault bool // whether an explicit default was provided @@ -208,6 +230,9 @@ func (p *Properties) String() string { if p.proto3 { s += ",proto3" } + if p.oneof { + s += ",oneof" + } if len(p.Enum) > 0 { s += ",enum=" + p.Enum } @@ -284,6 +309,8 @@ func (p *Properties) Parse(s string) { p.Enum = f[5:] case f == "proto3": p.proto3 = true + case f == "oneof": + p.oneof = true case strings.HasPrefix(f, "def="): p.HasDefault = true p.Default = f[4:] // rest of string @@ -665,6 +692,7 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { if f.Name == "XXX_unrecognized" { // special case prop.unrecField = toField(&f) } + oneof := f.Tag.Get("protobuf_oneof") != "" // special case prop.Prop[i] = p prop.order[i] = i if debug { @@ -674,7 +702,7 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { } print("\n") } - if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") { + if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && !oneof { fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]") } } @@ -682,6 +710,41 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { // Re-order prop.order. sort.Sort(prop) + type oneofMessage interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), []interface{}) + } + if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { + var oots []interface{} + prop.oneofMarshaler, prop.oneofUnmarshaler, oots = om.XXX_OneofFuncs() + prop.stype = t + + // Interpret oneof metadata. + prop.OneofTypes = make(map[string]*OneofProperties) + for _, oot := range oots { + oop := &OneofProperties{ + Type: reflect.ValueOf(oot).Type(), // *T + Prop: new(Properties), + } + sft := oop.Type.Elem().Field(0) + oop.Prop.Name = sft.Name + oop.Prop.Parse(sft.Tag.Get("protobuf")) + // There will be exactly one interface field that + // this new value is assignable to. + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.Type.Kind() != reflect.Interface { + continue + } + if !oop.Type.AssignableTo(f.Type) { + continue + } + oop.Field = i + break + } + prop.OneofTypes[oop.Prop.OrigName] = oop + } + } + // build required counts // build tags reqCount := 0 diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go new file mode 100644 index 0000000000..37c7782092 --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go @@ -0,0 +1,122 @@ +// Code generated by protoc-gen-go. +// source: proto3_proto/proto3.proto +// DO NOT EDIT! + +/* +Package proto3_proto is a generated protocol buffer package. + +It is generated from these files: + proto3_proto/proto3.proto + +It has these top-level messages: + Message + Nested + MessageWithMap +*/ +package proto3_proto + +import proto "github.com/golang/protobuf/proto" +import testdata "github.com/golang/protobuf/proto/testdata" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal + +type Message_Humour int32 + +const ( + Message_UNKNOWN Message_Humour = 0 + Message_PUNS Message_Humour = 1 + Message_SLAPSTICK Message_Humour = 2 + Message_BILL_BAILEY Message_Humour = 3 +) + +var Message_Humour_name = map[int32]string{ + 0: "UNKNOWN", + 1: "PUNS", + 2: "SLAPSTICK", + 3: "BILL_BAILEY", +} +var Message_Humour_value = map[string]int32{ + "UNKNOWN": 0, + "PUNS": 1, + "SLAPSTICK": 2, + "BILL_BAILEY": 3, +} + +func (x Message_Humour) String() string { + return proto.EnumName(Message_Humour_name, int32(x)) +} + +type Message struct { + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Hilarity Message_Humour `protobuf:"varint,2,opt,name=hilarity,enum=proto3_proto.Message_Humour" json:"hilarity,omitempty"` + HeightInCm uint32 `protobuf:"varint,3,opt,name=height_in_cm" json:"height_in_cm,omitempty"` + Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"` + ResultCount int64 `protobuf:"varint,7,opt,name=result_count" json:"result_count,omitempty"` + TrueScotsman bool `protobuf:"varint,8,opt,name=true_scotsman" json:"true_scotsman,omitempty"` + Score float32 `protobuf:"fixed32,9,opt,name=score" json:"score,omitempty"` + Key []uint64 `protobuf:"varint,5,rep,name=key" json:"key,omitempty"` + Nested *Nested `protobuf:"bytes,6,opt,name=nested" json:"nested,omitempty"` + Terrain map[string]*Nested `protobuf:"bytes,10,rep,name=terrain" json:"terrain,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Proto2Field *testdata.SubDefaults `protobuf:"bytes,11,opt,name=proto2_field" json:"proto2_field,omitempty"` + Proto2Value map[string]*testdata.SubDefaults `protobuf:"bytes,13,rep,name=proto2_value" json:"proto2_value,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *Message) Reset() { *m = Message{} } +func (m *Message) String() string { return proto.CompactTextString(m) } +func (*Message) ProtoMessage() {} + +func (m *Message) GetNested() *Nested { + if m != nil { + return m.Nested + } + return nil +} + +func (m *Message) GetTerrain() map[string]*Nested { + if m != nil { + return m.Terrain + } + return nil +} + +func (m *Message) GetProto2Field() *testdata.SubDefaults { + if m != nil { + return m.Proto2Field + } + return nil +} + +func (m *Message) GetProto2Value() map[string]*testdata.SubDefaults { + if m != nil { + return m.Proto2Value + } + return nil +} + +type Nested struct { + Bunny string `protobuf:"bytes,1,opt,name=bunny" json:"bunny,omitempty"` +} + +func (m *Nested) Reset() { *m = Nested{} } +func (m *Nested) String() string { return proto.CompactTextString(m) } +func (*Nested) ProtoMessage() {} + +type MessageWithMap struct { + ByteMapping map[bool][]byte `protobuf:"bytes,1,rep,name=byte_mapping" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *MessageWithMap) Reset() { *m = MessageWithMap{} } +func (m *MessageWithMap) String() string { return proto.CompactTextString(m) } +func (*MessageWithMap) ProtoMessage() {} + +func (m *MessageWithMap) GetByteMapping() map[bool][]byte { + if m != nil { + return m.ByteMapping + } + return nil +} + +func init() { + proto.RegisterEnum("proto3_proto.Message_Humour", Message_Humour_name, Message_Humour_value) +} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_test.go deleted file mode 100644 index 462f8055c3..0000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_test.go +++ /dev/null @@ -1,125 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2014 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "testing" - - "github.com/golang/protobuf/proto" - pb "github.com/golang/protobuf/proto/proto3_proto" - tpb "github.com/golang/protobuf/proto/testdata" -) - -func TestProto3ZeroValues(t *testing.T) { - tests := []struct { - desc string - m proto.Message - }{ - {"zero message", &pb.Message{}}, - {"empty bytes field", &pb.Message{Data: []byte{}}}, - } - for _, test := range tests { - b, err := proto.Marshal(test.m) - if err != nil { - t.Errorf("%s: proto.Marshal: %v", test.desc, err) - continue - } - if len(b) > 0 { - t.Errorf("%s: Encoding is non-empty: %q", test.desc, b) - } - } -} - -func TestRoundTripProto3(t *testing.T) { - m := &pb.Message{ - Name: "David", // (2 | 1<<3): 0x0a 0x05 "David" - Hilarity: pb.Message_PUNS, // (0 | 2<<3): 0x10 0x01 - HeightInCm: 178, // (0 | 3<<3): 0x18 0xb2 0x01 - Data: []byte("roboto"), // (2 | 4<<3): 0x20 0x06 "roboto" - ResultCount: 47, // (0 | 7<<3): 0x38 0x2f - TrueScotsman: true, // (0 | 8<<3): 0x40 0x01 - Score: 8.1, // (5 | 9<<3): 0x4d <8.1> - - Key: []uint64{1, 0xdeadbeef}, - Nested: &pb.Nested{ - Bunny: "Monty", - }, - } - t.Logf(" m: %v", m) - - b, err := proto.Marshal(m) - if err != nil { - t.Fatalf("proto.Marshal: %v", err) - } - t.Logf(" b: %q", b) - - m2 := new(pb.Message) - if err := proto.Unmarshal(b, m2); err != nil { - t.Fatalf("proto.Unmarshal: %v", err) - } - t.Logf("m2: %v", m2) - - if !proto.Equal(m, m2) { - t.Errorf("proto.Equal returned false:\n m: %v\nm2: %v", m, m2) - } -} - -func TestProto3SetDefaults(t *testing.T) { - in := &pb.Message{ - Terrain: map[string]*pb.Nested{ - "meadow": new(pb.Nested), - }, - Proto2Field: new(tpb.SubDefaults), - Proto2Value: map[string]*tpb.SubDefaults{ - "badlands": new(tpb.SubDefaults), - }, - } - - got := proto.Clone(in).(*pb.Message) - proto.SetDefaults(got) - - // There are no defaults in proto3. Everything should be the zero value, but - // we need to remember to set defaults for nested proto2 messages. - want := &pb.Message{ - Terrain: map[string]*pb.Nested{ - "meadow": new(pb.Nested), - }, - Proto2Field: &tpb.SubDefaults{N: proto.Int64(7)}, - Proto2Value: map[string]*tpb.SubDefaults{ - "badlands": &tpb.SubDefaults{N: proto.Int64(7)}, - }, - } - - if !proto.Equal(got, want) { - t.Errorf("with in = %v\nproto.SetDefaults(in) =>\ngot %v\nwant %v", in, got, want) - } -} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/size_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/size_test.go deleted file mode 100644 index db5614fd1d..0000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/size_test.go +++ /dev/null @@ -1,142 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "log" - "strings" - "testing" - - . "github.com/golang/protobuf/proto" - proto3pb "github.com/golang/protobuf/proto/proto3_proto" - pb "github.com/golang/protobuf/proto/testdata" -) - -var messageWithExtension1 = &pb.MyMessage{Count: Int32(7)} - -// messageWithExtension2 is in equal_test.go. -var messageWithExtension3 = &pb.MyMessage{Count: Int32(8)} - -func init() { - if err := SetExtension(messageWithExtension1, pb.E_Ext_More, &pb.Ext{Data: String("Abbott")}); err != nil { - log.Panicf("SetExtension: %v", err) - } - if err := SetExtension(messageWithExtension3, pb.E_Ext_More, &pb.Ext{Data: String("Costello")}); err != nil { - log.Panicf("SetExtension: %v", err) - } - - // Force messageWithExtension3 to have the extension encoded. - Marshal(messageWithExtension3) - -} - -var SizeTests = []struct { - desc string - pb Message -}{ - {"empty", &pb.OtherMessage{}}, - // Basic types. - {"bool", &pb.Defaults{F_Bool: Bool(true)}}, - {"int32", &pb.Defaults{F_Int32: Int32(12)}}, - {"negative int32", &pb.Defaults{F_Int32: Int32(-1)}}, - {"small int64", &pb.Defaults{F_Int64: Int64(1)}}, - {"big int64", &pb.Defaults{F_Int64: Int64(1 << 20)}}, - {"negative int64", &pb.Defaults{F_Int64: Int64(-1)}}, - {"fixed32", &pb.Defaults{F_Fixed32: Uint32(71)}}, - {"fixed64", &pb.Defaults{F_Fixed64: Uint64(72)}}, - {"uint32", &pb.Defaults{F_Uint32: Uint32(123)}}, - {"uint64", &pb.Defaults{F_Uint64: Uint64(124)}}, - {"float", &pb.Defaults{F_Float: Float32(12.6)}}, - {"double", &pb.Defaults{F_Double: Float64(13.9)}}, - {"string", &pb.Defaults{F_String: String("niles")}}, - {"bytes", &pb.Defaults{F_Bytes: []byte("wowsa")}}, - {"bytes, empty", &pb.Defaults{F_Bytes: []byte{}}}, - {"sint32", &pb.Defaults{F_Sint32: Int32(65)}}, - {"sint64", &pb.Defaults{F_Sint64: Int64(67)}}, - {"enum", &pb.Defaults{F_Enum: pb.Defaults_BLUE.Enum()}}, - // Repeated. - {"empty repeated bool", &pb.MoreRepeated{Bools: []bool{}}}, - {"repeated bool", &pb.MoreRepeated{Bools: []bool{false, true, true, false}}}, - {"packed repeated bool", &pb.MoreRepeated{BoolsPacked: []bool{false, true, true, false, true, true, true}}}, - {"repeated int32", &pb.MoreRepeated{Ints: []int32{1, 12203, 1729, -1}}}, - {"repeated int32 packed", &pb.MoreRepeated{IntsPacked: []int32{1, 12203, 1729}}}, - {"repeated int64 packed", &pb.MoreRepeated{Int64SPacked: []int64{ - // Need enough large numbers to verify that the header is counting the number of bytes - // for the field, not the number of elements. - 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, - 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, - }}}, - {"repeated string", &pb.MoreRepeated{Strings: []string{"r", "ken", "gri"}}}, - {"repeated fixed", &pb.MoreRepeated{Fixeds: []uint32{1, 2, 3, 4}}}, - // Nested. - {"nested", &pb.OldMessage{Nested: &pb.OldMessage_Nested{Name: String("whatever")}}}, - {"group", &pb.GroupOld{G: &pb.GroupOld_G{X: Int32(12345)}}}, - // Other things. - {"unrecognized", &pb.MoreRepeated{XXX_unrecognized: []byte{13<<3 | 0, 4}}}, - {"extension (unencoded)", messageWithExtension1}, - {"extension (encoded)", messageWithExtension3}, - // proto3 message - {"proto3 empty", &proto3pb.Message{}}, - {"proto3 bool", &proto3pb.Message{TrueScotsman: true}}, - {"proto3 int64", &proto3pb.Message{ResultCount: 1}}, - {"proto3 uint32", &proto3pb.Message{HeightInCm: 123}}, - {"proto3 float", &proto3pb.Message{Score: 12.6}}, - {"proto3 string", &proto3pb.Message{Name: "Snezana"}}, - {"proto3 bytes", &proto3pb.Message{Data: []byte("wowsa")}}, - {"proto3 bytes, empty", &proto3pb.Message{Data: []byte{}}}, - {"proto3 enum", &proto3pb.Message{Hilarity: proto3pb.Message_PUNS}}, - {"proto3 map field with empty bytes", &proto3pb.MessageWithMap{ByteMapping: map[bool][]byte{false: []byte{}}}}, - - {"map field", &pb.MessageWithMap{NameMapping: map[int32]string{1: "Rob", 7: "Andrew"}}}, - {"map field with message", &pb.MessageWithMap{MsgMapping: map[int64]*pb.FloatingPoint{0x7001: &pb.FloatingPoint{F: Float64(2.0)}}}}, - {"map field with bytes", &pb.MessageWithMap{ByteMapping: map[bool][]byte{true: []byte("this time for sure")}}}, - {"map field with empty bytes", &pb.MessageWithMap{ByteMapping: map[bool][]byte{true: []byte{}}}}, - - {"map field with big entry", &pb.MessageWithMap{NameMapping: map[int32]string{8: strings.Repeat("x", 125)}}}, - {"map field with big key and val", &pb.MessageWithMap{StrToStr: map[string]string{strings.Repeat("x", 70): strings.Repeat("y", 70)}}}, - {"map field with big numeric key", &pb.MessageWithMap{NameMapping: map[int32]string{0xf00d: "om nom nom"}}}, -} - -func TestSize(t *testing.T) { - for _, tc := range SizeTests { - size := Size(tc.pb) - b, err := Marshal(tc.pb) - if err != nil { - t.Errorf("%v: Marshal failed: %v", tc.desc, err) - continue - } - if size != len(b) { - t.Errorf("%v: Size(%v) = %d, want %d", tc.desc, tc.pb, size, len(b)) - t.Logf("%v: bytes: %#v", tc.desc, b) - } - } -} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/Makefile b/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/Makefile deleted file mode 100644 index fc288628a7..0000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/Makefile +++ /dev/null @@ -1,50 +0,0 @@ -# Go support for Protocol Buffers - Google's data interchange format -# -# Copyright 2010 The Go Authors. All rights reserved. -# https://github.com/golang/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -include ../../Make.protobuf - -all: regenerate - -regenerate: - rm -f test.pb.go - make test.pb.go - -# The following rules are just aids to development. Not needed for typical testing. - -diff: regenerate - git diff test.pb.go - -restore: - cp test.pb.go.golden test.pb.go - -preserve: - cp test.pb.go test.pb.go.golden diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/golden_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/golden_test.go deleted file mode 100644 index 7172d0e969..0000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/golden_test.go +++ /dev/null @@ -1,86 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Verify that the compiler output for test.proto is unchanged. - -package testdata - -import ( - "crypto/sha1" - "fmt" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "testing" -) - -// sum returns in string form (for easy comparison) the SHA-1 hash of the named file. -func sum(t *testing.T, name string) string { - data, err := ioutil.ReadFile(name) - if err != nil { - t.Fatal(err) - } - t.Logf("sum(%q): length is %d", name, len(data)) - hash := sha1.New() - _, err = hash.Write(data) - if err != nil { - t.Fatal(err) - } - return fmt.Sprintf("% x", hash.Sum(nil)) -} - -func run(t *testing.T, name string, args ...string) { - cmd := exec.Command(name, args...) - cmd.Stdin = os.Stdin - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - err := cmd.Run() - if err != nil { - t.Fatal(err) - } -} - -func TestGolden(t *testing.T) { - // Compute the original checksum. - goldenSum := sum(t, "test.pb.go") - // Run the proto compiler. - run(t, "protoc", "--go_out="+os.TempDir(), "test.proto") - newFile := filepath.Join(os.TempDir(), "test.pb.go") - defer os.Remove(newFile) - // Compute the new checksum. - newSum := sum(t, newFile) - // Verify - if newSum != goldenSum { - run(t, "diff", "-u", "test.pb.go", newFile) - t.Fatal("Code generated by protoc-gen-go has changed; update test.pb.go") - } -} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.pb.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.pb.go deleted file mode 100644 index 20a57e8af2..0000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.pb.go +++ /dev/null @@ -1,2397 +0,0 @@ -// Code generated by protoc-gen-go. -// source: test.proto -// DO NOT EDIT! - -/* -Package testdata is a generated protocol buffer package. - -It is generated from these files: - test.proto - -It has these top-level messages: - GoEnum - GoTestField - GoTest - GoSkipTest - NonPackedTest - PackedTest - MaxTag - OldMessage - NewMessage - InnerMessage - OtherMessage - MyMessage - Ext - MyMessageSet - Empty - MessageList - Strings - Defaults - SubDefaults - RepeatedEnum - MoreRepeated - GroupOld - GroupNew - FloatingPoint - MessageWithMap -*/ -package testdata - -import proto "github.com/golang/protobuf/proto" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = math.Inf - -type FOO int32 - -const ( - FOO_FOO1 FOO = 1 -) - -var FOO_name = map[int32]string{ - 1: "FOO1", -} -var FOO_value = map[string]int32{ - "FOO1": 1, -} - -func (x FOO) Enum() *FOO { - p := new(FOO) - *p = x - return p -} -func (x FOO) String() string { - return proto.EnumName(FOO_name, int32(x)) -} -func (x *FOO) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FOO_value, data, "FOO") - if err != nil { - return err - } - *x = FOO(value) - return nil -} - -// An enum, for completeness. -type GoTest_KIND int32 - -const ( - GoTest_VOID GoTest_KIND = 0 - // Basic types - GoTest_BOOL GoTest_KIND = 1 - GoTest_BYTES GoTest_KIND = 2 - GoTest_FINGERPRINT GoTest_KIND = 3 - GoTest_FLOAT GoTest_KIND = 4 - GoTest_INT GoTest_KIND = 5 - GoTest_STRING GoTest_KIND = 6 - GoTest_TIME GoTest_KIND = 7 - // Groupings - GoTest_TUPLE GoTest_KIND = 8 - GoTest_ARRAY GoTest_KIND = 9 - GoTest_MAP GoTest_KIND = 10 - // Table types - GoTest_TABLE GoTest_KIND = 11 - // Functions - GoTest_FUNCTION GoTest_KIND = 12 -) - -var GoTest_KIND_name = map[int32]string{ - 0: "VOID", - 1: "BOOL", - 2: "BYTES", - 3: "FINGERPRINT", - 4: "FLOAT", - 5: "INT", - 6: "STRING", - 7: "TIME", - 8: "TUPLE", - 9: "ARRAY", - 10: "MAP", - 11: "TABLE", - 12: "FUNCTION", -} -var GoTest_KIND_value = map[string]int32{ - "VOID": 0, - "BOOL": 1, - "BYTES": 2, - "FINGERPRINT": 3, - "FLOAT": 4, - "INT": 5, - "STRING": 6, - "TIME": 7, - "TUPLE": 8, - "ARRAY": 9, - "MAP": 10, - "TABLE": 11, - "FUNCTION": 12, -} - -func (x GoTest_KIND) Enum() *GoTest_KIND { - p := new(GoTest_KIND) - *p = x - return p -} -func (x GoTest_KIND) String() string { - return proto.EnumName(GoTest_KIND_name, int32(x)) -} -func (x *GoTest_KIND) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(GoTest_KIND_value, data, "GoTest_KIND") - if err != nil { - return err - } - *x = GoTest_KIND(value) - return nil -} - -type MyMessage_Color int32 - -const ( - MyMessage_RED MyMessage_Color = 0 - MyMessage_GREEN MyMessage_Color = 1 - MyMessage_BLUE MyMessage_Color = 2 -) - -var MyMessage_Color_name = map[int32]string{ - 0: "RED", - 1: "GREEN", - 2: "BLUE", -} -var MyMessage_Color_value = map[string]int32{ - "RED": 0, - "GREEN": 1, - "BLUE": 2, -} - -func (x MyMessage_Color) Enum() *MyMessage_Color { - p := new(MyMessage_Color) - *p = x - return p -} -func (x MyMessage_Color) String() string { - return proto.EnumName(MyMessage_Color_name, int32(x)) -} -func (x *MyMessage_Color) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(MyMessage_Color_value, data, "MyMessage_Color") - if err != nil { - return err - } - *x = MyMessage_Color(value) - return nil -} - -type Defaults_Color int32 - -const ( - Defaults_RED Defaults_Color = 0 - Defaults_GREEN Defaults_Color = 1 - Defaults_BLUE Defaults_Color = 2 -) - -var Defaults_Color_name = map[int32]string{ - 0: "RED", - 1: "GREEN", - 2: "BLUE", -} -var Defaults_Color_value = map[string]int32{ - "RED": 0, - "GREEN": 1, - "BLUE": 2, -} - -func (x Defaults_Color) Enum() *Defaults_Color { - p := new(Defaults_Color) - *p = x - return p -} -func (x Defaults_Color) String() string { - return proto.EnumName(Defaults_Color_name, int32(x)) -} -func (x *Defaults_Color) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Defaults_Color_value, data, "Defaults_Color") - if err != nil { - return err - } - *x = Defaults_Color(value) - return nil -} - -type RepeatedEnum_Color int32 - -const ( - RepeatedEnum_RED RepeatedEnum_Color = 1 -) - -var RepeatedEnum_Color_name = map[int32]string{ - 1: "RED", -} -var RepeatedEnum_Color_value = map[string]int32{ - "RED": 1, -} - -func (x RepeatedEnum_Color) Enum() *RepeatedEnum_Color { - p := new(RepeatedEnum_Color) - *p = x - return p -} -func (x RepeatedEnum_Color) String() string { - return proto.EnumName(RepeatedEnum_Color_name, int32(x)) -} -func (x *RepeatedEnum_Color) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(RepeatedEnum_Color_value, data, "RepeatedEnum_Color") - if err != nil { - return err - } - *x = RepeatedEnum_Color(value) - return nil -} - -type GoEnum struct { - Foo *FOO `protobuf:"varint,1,req,name=foo,enum=testdata.FOO" json:"foo,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GoEnum) Reset() { *m = GoEnum{} } -func (m *GoEnum) String() string { return proto.CompactTextString(m) } -func (*GoEnum) ProtoMessage() {} - -func (m *GoEnum) GetFoo() FOO { - if m != nil && m.Foo != nil { - return *m.Foo - } - return FOO_FOO1 -} - -type GoTestField struct { - Label *string `protobuf:"bytes,1,req" json:"Label,omitempty"` - Type *string `protobuf:"bytes,2,req" json:"Type,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GoTestField) Reset() { *m = GoTestField{} } -func (m *GoTestField) String() string { return proto.CompactTextString(m) } -func (*GoTestField) ProtoMessage() {} - -func (m *GoTestField) GetLabel() string { - if m != nil && m.Label != nil { - return *m.Label - } - return "" -} - -func (m *GoTestField) GetType() string { - if m != nil && m.Type != nil { - return *m.Type - } - return "" -} - -type GoTest struct { - // Some typical parameters - Kind *GoTest_KIND `protobuf:"varint,1,req,enum=testdata.GoTest_KIND" json:"Kind,omitempty"` - Table *string `protobuf:"bytes,2,opt" json:"Table,omitempty"` - Param *int32 `protobuf:"varint,3,opt" json:"Param,omitempty"` - // Required, repeated and optional foreign fields. - RequiredField *GoTestField `protobuf:"bytes,4,req" json:"RequiredField,omitempty"` - RepeatedField []*GoTestField `protobuf:"bytes,5,rep" json:"RepeatedField,omitempty"` - OptionalField *GoTestField `protobuf:"bytes,6,opt" json:"OptionalField,omitempty"` - // Required fields of all basic types - F_BoolRequired *bool `protobuf:"varint,10,req,name=F_Bool_required" json:"F_Bool_required,omitempty"` - F_Int32Required *int32 `protobuf:"varint,11,req,name=F_Int32_required" json:"F_Int32_required,omitempty"` - F_Int64Required *int64 `protobuf:"varint,12,req,name=F_Int64_required" json:"F_Int64_required,omitempty"` - F_Fixed32Required *uint32 `protobuf:"fixed32,13,req,name=F_Fixed32_required" json:"F_Fixed32_required,omitempty"` - F_Fixed64Required *uint64 `protobuf:"fixed64,14,req,name=F_Fixed64_required" json:"F_Fixed64_required,omitempty"` - F_Uint32Required *uint32 `protobuf:"varint,15,req,name=F_Uint32_required" json:"F_Uint32_required,omitempty"` - F_Uint64Required *uint64 `protobuf:"varint,16,req,name=F_Uint64_required" json:"F_Uint64_required,omitempty"` - F_FloatRequired *float32 `protobuf:"fixed32,17,req,name=F_Float_required" json:"F_Float_required,omitempty"` - F_DoubleRequired *float64 `protobuf:"fixed64,18,req,name=F_Double_required" json:"F_Double_required,omitempty"` - F_StringRequired *string `protobuf:"bytes,19,req,name=F_String_required" json:"F_String_required,omitempty"` - F_BytesRequired []byte `protobuf:"bytes,101,req,name=F_Bytes_required" json:"F_Bytes_required,omitempty"` - F_Sint32Required *int32 `protobuf:"zigzag32,102,req,name=F_Sint32_required" json:"F_Sint32_required,omitempty"` - F_Sint64Required *int64 `protobuf:"zigzag64,103,req,name=F_Sint64_required" json:"F_Sint64_required,omitempty"` - // Repeated fields of all basic types - F_BoolRepeated []bool `protobuf:"varint,20,rep,name=F_Bool_repeated" json:"F_Bool_repeated,omitempty"` - F_Int32Repeated []int32 `protobuf:"varint,21,rep,name=F_Int32_repeated" json:"F_Int32_repeated,omitempty"` - F_Int64Repeated []int64 `protobuf:"varint,22,rep,name=F_Int64_repeated" json:"F_Int64_repeated,omitempty"` - F_Fixed32Repeated []uint32 `protobuf:"fixed32,23,rep,name=F_Fixed32_repeated" json:"F_Fixed32_repeated,omitempty"` - F_Fixed64Repeated []uint64 `protobuf:"fixed64,24,rep,name=F_Fixed64_repeated" json:"F_Fixed64_repeated,omitempty"` - F_Uint32Repeated []uint32 `protobuf:"varint,25,rep,name=F_Uint32_repeated" json:"F_Uint32_repeated,omitempty"` - F_Uint64Repeated []uint64 `protobuf:"varint,26,rep,name=F_Uint64_repeated" json:"F_Uint64_repeated,omitempty"` - F_FloatRepeated []float32 `protobuf:"fixed32,27,rep,name=F_Float_repeated" json:"F_Float_repeated,omitempty"` - F_DoubleRepeated []float64 `protobuf:"fixed64,28,rep,name=F_Double_repeated" json:"F_Double_repeated,omitempty"` - F_StringRepeated []string `protobuf:"bytes,29,rep,name=F_String_repeated" json:"F_String_repeated,omitempty"` - F_BytesRepeated [][]byte `protobuf:"bytes,201,rep,name=F_Bytes_repeated" json:"F_Bytes_repeated,omitempty"` - F_Sint32Repeated []int32 `protobuf:"zigzag32,202,rep,name=F_Sint32_repeated" json:"F_Sint32_repeated,omitempty"` - F_Sint64Repeated []int64 `protobuf:"zigzag64,203,rep,name=F_Sint64_repeated" json:"F_Sint64_repeated,omitempty"` - // Optional fields of all basic types - F_BoolOptional *bool `protobuf:"varint,30,opt,name=F_Bool_optional" json:"F_Bool_optional,omitempty"` - F_Int32Optional *int32 `protobuf:"varint,31,opt,name=F_Int32_optional" json:"F_Int32_optional,omitempty"` - F_Int64Optional *int64 `protobuf:"varint,32,opt,name=F_Int64_optional" json:"F_Int64_optional,omitempty"` - F_Fixed32Optional *uint32 `protobuf:"fixed32,33,opt,name=F_Fixed32_optional" json:"F_Fixed32_optional,omitempty"` - F_Fixed64Optional *uint64 `protobuf:"fixed64,34,opt,name=F_Fixed64_optional" json:"F_Fixed64_optional,omitempty"` - F_Uint32Optional *uint32 `protobuf:"varint,35,opt,name=F_Uint32_optional" json:"F_Uint32_optional,omitempty"` - F_Uint64Optional *uint64 `protobuf:"varint,36,opt,name=F_Uint64_optional" json:"F_Uint64_optional,omitempty"` - F_FloatOptional *float32 `protobuf:"fixed32,37,opt,name=F_Float_optional" json:"F_Float_optional,omitempty"` - F_DoubleOptional *float64 `protobuf:"fixed64,38,opt,name=F_Double_optional" json:"F_Double_optional,omitempty"` - F_StringOptional *string `protobuf:"bytes,39,opt,name=F_String_optional" json:"F_String_optional,omitempty"` - F_BytesOptional []byte `protobuf:"bytes,301,opt,name=F_Bytes_optional" json:"F_Bytes_optional,omitempty"` - F_Sint32Optional *int32 `protobuf:"zigzag32,302,opt,name=F_Sint32_optional" json:"F_Sint32_optional,omitempty"` - F_Sint64Optional *int64 `protobuf:"zigzag64,303,opt,name=F_Sint64_optional" json:"F_Sint64_optional,omitempty"` - // Default-valued fields of all basic types - F_BoolDefaulted *bool `protobuf:"varint,40,opt,name=F_Bool_defaulted,def=1" json:"F_Bool_defaulted,omitempty"` - F_Int32Defaulted *int32 `protobuf:"varint,41,opt,name=F_Int32_defaulted,def=32" json:"F_Int32_defaulted,omitempty"` - F_Int64Defaulted *int64 `protobuf:"varint,42,opt,name=F_Int64_defaulted,def=64" json:"F_Int64_defaulted,omitempty"` - F_Fixed32Defaulted *uint32 `protobuf:"fixed32,43,opt,name=F_Fixed32_defaulted,def=320" json:"F_Fixed32_defaulted,omitempty"` - F_Fixed64Defaulted *uint64 `protobuf:"fixed64,44,opt,name=F_Fixed64_defaulted,def=640" json:"F_Fixed64_defaulted,omitempty"` - F_Uint32Defaulted *uint32 `protobuf:"varint,45,opt,name=F_Uint32_defaulted,def=3200" json:"F_Uint32_defaulted,omitempty"` - F_Uint64Defaulted *uint64 `protobuf:"varint,46,opt,name=F_Uint64_defaulted,def=6400" json:"F_Uint64_defaulted,omitempty"` - F_FloatDefaulted *float32 `protobuf:"fixed32,47,opt,name=F_Float_defaulted,def=314159" json:"F_Float_defaulted,omitempty"` - F_DoubleDefaulted *float64 `protobuf:"fixed64,48,opt,name=F_Double_defaulted,def=271828" json:"F_Double_defaulted,omitempty"` - F_StringDefaulted *string `protobuf:"bytes,49,opt,name=F_String_defaulted,def=hello, \"world!\"\n" json:"F_String_defaulted,omitempty"` - F_BytesDefaulted []byte `protobuf:"bytes,401,opt,name=F_Bytes_defaulted,def=Bignose" json:"F_Bytes_defaulted,omitempty"` - F_Sint32Defaulted *int32 `protobuf:"zigzag32,402,opt,name=F_Sint32_defaulted,def=-32" json:"F_Sint32_defaulted,omitempty"` - F_Sint64Defaulted *int64 `protobuf:"zigzag64,403,opt,name=F_Sint64_defaulted,def=-64" json:"F_Sint64_defaulted,omitempty"` - // Packed repeated fields (no string or bytes). - F_BoolRepeatedPacked []bool `protobuf:"varint,50,rep,packed,name=F_Bool_repeated_packed" json:"F_Bool_repeated_packed,omitempty"` - F_Int32RepeatedPacked []int32 `protobuf:"varint,51,rep,packed,name=F_Int32_repeated_packed" json:"F_Int32_repeated_packed,omitempty"` - F_Int64RepeatedPacked []int64 `protobuf:"varint,52,rep,packed,name=F_Int64_repeated_packed" json:"F_Int64_repeated_packed,omitempty"` - F_Fixed32RepeatedPacked []uint32 `protobuf:"fixed32,53,rep,packed,name=F_Fixed32_repeated_packed" json:"F_Fixed32_repeated_packed,omitempty"` - F_Fixed64RepeatedPacked []uint64 `protobuf:"fixed64,54,rep,packed,name=F_Fixed64_repeated_packed" json:"F_Fixed64_repeated_packed,omitempty"` - F_Uint32RepeatedPacked []uint32 `protobuf:"varint,55,rep,packed,name=F_Uint32_repeated_packed" json:"F_Uint32_repeated_packed,omitempty"` - F_Uint64RepeatedPacked []uint64 `protobuf:"varint,56,rep,packed,name=F_Uint64_repeated_packed" json:"F_Uint64_repeated_packed,omitempty"` - F_FloatRepeatedPacked []float32 `protobuf:"fixed32,57,rep,packed,name=F_Float_repeated_packed" json:"F_Float_repeated_packed,omitempty"` - F_DoubleRepeatedPacked []float64 `protobuf:"fixed64,58,rep,packed,name=F_Double_repeated_packed" json:"F_Double_repeated_packed,omitempty"` - F_Sint32RepeatedPacked []int32 `protobuf:"zigzag32,502,rep,packed,name=F_Sint32_repeated_packed" json:"F_Sint32_repeated_packed,omitempty"` - F_Sint64RepeatedPacked []int64 `protobuf:"zigzag64,503,rep,packed,name=F_Sint64_repeated_packed" json:"F_Sint64_repeated_packed,omitempty"` - Requiredgroup *GoTest_RequiredGroup `protobuf:"group,70,req,name=RequiredGroup" json:"requiredgroup,omitempty"` - Repeatedgroup []*GoTest_RepeatedGroup `protobuf:"group,80,rep,name=RepeatedGroup" json:"repeatedgroup,omitempty"` - Optionalgroup *GoTest_OptionalGroup `protobuf:"group,90,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GoTest) Reset() { *m = GoTest{} } -func (m *GoTest) String() string { return proto.CompactTextString(m) } -func (*GoTest) ProtoMessage() {} - -const Default_GoTest_F_BoolDefaulted bool = true -const Default_GoTest_F_Int32Defaulted int32 = 32 -const Default_GoTest_F_Int64Defaulted int64 = 64 -const Default_GoTest_F_Fixed32Defaulted uint32 = 320 -const Default_GoTest_F_Fixed64Defaulted uint64 = 640 -const Default_GoTest_F_Uint32Defaulted uint32 = 3200 -const Default_GoTest_F_Uint64Defaulted uint64 = 6400 -const Default_GoTest_F_FloatDefaulted float32 = 314159 -const Default_GoTest_F_DoubleDefaulted float64 = 271828 -const Default_GoTest_F_StringDefaulted string = "hello, \"world!\"\n" - -var Default_GoTest_F_BytesDefaulted []byte = []byte("Bignose") - -const Default_GoTest_F_Sint32Defaulted int32 = -32 -const Default_GoTest_F_Sint64Defaulted int64 = -64 - -func (m *GoTest) GetKind() GoTest_KIND { - if m != nil && m.Kind != nil { - return *m.Kind - } - return GoTest_VOID -} - -func (m *GoTest) GetTable() string { - if m != nil && m.Table != nil { - return *m.Table - } - return "" -} - -func (m *GoTest) GetParam() int32 { - if m != nil && m.Param != nil { - return *m.Param - } - return 0 -} - -func (m *GoTest) GetRequiredField() *GoTestField { - if m != nil { - return m.RequiredField - } - return nil -} - -func (m *GoTest) GetRepeatedField() []*GoTestField { - if m != nil { - return m.RepeatedField - } - return nil -} - -func (m *GoTest) GetOptionalField() *GoTestField { - if m != nil { - return m.OptionalField - } - return nil -} - -func (m *GoTest) GetF_BoolRequired() bool { - if m != nil && m.F_BoolRequired != nil { - return *m.F_BoolRequired - } - return false -} - -func (m *GoTest) GetF_Int32Required() int32 { - if m != nil && m.F_Int32Required != nil { - return *m.F_Int32Required - } - return 0 -} - -func (m *GoTest) GetF_Int64Required() int64 { - if m != nil && m.F_Int64Required != nil { - return *m.F_Int64Required - } - return 0 -} - -func (m *GoTest) GetF_Fixed32Required() uint32 { - if m != nil && m.F_Fixed32Required != nil { - return *m.F_Fixed32Required - } - return 0 -} - -func (m *GoTest) GetF_Fixed64Required() uint64 { - if m != nil && m.F_Fixed64Required != nil { - return *m.F_Fixed64Required - } - return 0 -} - -func (m *GoTest) GetF_Uint32Required() uint32 { - if m != nil && m.F_Uint32Required != nil { - return *m.F_Uint32Required - } - return 0 -} - -func (m *GoTest) GetF_Uint64Required() uint64 { - if m != nil && m.F_Uint64Required != nil { - return *m.F_Uint64Required - } - return 0 -} - -func (m *GoTest) GetF_FloatRequired() float32 { - if m != nil && m.F_FloatRequired != nil { - return *m.F_FloatRequired - } - return 0 -} - -func (m *GoTest) GetF_DoubleRequired() float64 { - if m != nil && m.F_DoubleRequired != nil { - return *m.F_DoubleRequired - } - return 0 -} - -func (m *GoTest) GetF_StringRequired() string { - if m != nil && m.F_StringRequired != nil { - return *m.F_StringRequired - } - return "" -} - -func (m *GoTest) GetF_BytesRequired() []byte { - if m != nil { - return m.F_BytesRequired - } - return nil -} - -func (m *GoTest) GetF_Sint32Required() int32 { - if m != nil && m.F_Sint32Required != nil { - return *m.F_Sint32Required - } - return 0 -} - -func (m *GoTest) GetF_Sint64Required() int64 { - if m != nil && m.F_Sint64Required != nil { - return *m.F_Sint64Required - } - return 0 -} - -func (m *GoTest) GetF_BoolRepeated() []bool { - if m != nil { - return m.F_BoolRepeated - } - return nil -} - -func (m *GoTest) GetF_Int32Repeated() []int32 { - if m != nil { - return m.F_Int32Repeated - } - return nil -} - -func (m *GoTest) GetF_Int64Repeated() []int64 { - if m != nil { - return m.F_Int64Repeated - } - return nil -} - -func (m *GoTest) GetF_Fixed32Repeated() []uint32 { - if m != nil { - return m.F_Fixed32Repeated - } - return nil -} - -func (m *GoTest) GetF_Fixed64Repeated() []uint64 { - if m != nil { - return m.F_Fixed64Repeated - } - return nil -} - -func (m *GoTest) GetF_Uint32Repeated() []uint32 { - if m != nil { - return m.F_Uint32Repeated - } - return nil -} - -func (m *GoTest) GetF_Uint64Repeated() []uint64 { - if m != nil { - return m.F_Uint64Repeated - } - return nil -} - -func (m *GoTest) GetF_FloatRepeated() []float32 { - if m != nil { - return m.F_FloatRepeated - } - return nil -} - -func (m *GoTest) GetF_DoubleRepeated() []float64 { - if m != nil { - return m.F_DoubleRepeated - } - return nil -} - -func (m *GoTest) GetF_StringRepeated() []string { - if m != nil { - return m.F_StringRepeated - } - return nil -} - -func (m *GoTest) GetF_BytesRepeated() [][]byte { - if m != nil { - return m.F_BytesRepeated - } - return nil -} - -func (m *GoTest) GetF_Sint32Repeated() []int32 { - if m != nil { - return m.F_Sint32Repeated - } - return nil -} - -func (m *GoTest) GetF_Sint64Repeated() []int64 { - if m != nil { - return m.F_Sint64Repeated - } - return nil -} - -func (m *GoTest) GetF_BoolOptional() bool { - if m != nil && m.F_BoolOptional != nil { - return *m.F_BoolOptional - } - return false -} - -func (m *GoTest) GetF_Int32Optional() int32 { - if m != nil && m.F_Int32Optional != nil { - return *m.F_Int32Optional - } - return 0 -} - -func (m *GoTest) GetF_Int64Optional() int64 { - if m != nil && m.F_Int64Optional != nil { - return *m.F_Int64Optional - } - return 0 -} - -func (m *GoTest) GetF_Fixed32Optional() uint32 { - if m != nil && m.F_Fixed32Optional != nil { - return *m.F_Fixed32Optional - } - return 0 -} - -func (m *GoTest) GetF_Fixed64Optional() uint64 { - if m != nil && m.F_Fixed64Optional != nil { - return *m.F_Fixed64Optional - } - return 0 -} - -func (m *GoTest) GetF_Uint32Optional() uint32 { - if m != nil && m.F_Uint32Optional != nil { - return *m.F_Uint32Optional - } - return 0 -} - -func (m *GoTest) GetF_Uint64Optional() uint64 { - if m != nil && m.F_Uint64Optional != nil { - return *m.F_Uint64Optional - } - return 0 -} - -func (m *GoTest) GetF_FloatOptional() float32 { - if m != nil && m.F_FloatOptional != nil { - return *m.F_FloatOptional - } - return 0 -} - -func (m *GoTest) GetF_DoubleOptional() float64 { - if m != nil && m.F_DoubleOptional != nil { - return *m.F_DoubleOptional - } - return 0 -} - -func (m *GoTest) GetF_StringOptional() string { - if m != nil && m.F_StringOptional != nil { - return *m.F_StringOptional - } - return "" -} - -func (m *GoTest) GetF_BytesOptional() []byte { - if m != nil { - return m.F_BytesOptional - } - return nil -} - -func (m *GoTest) GetF_Sint32Optional() int32 { - if m != nil && m.F_Sint32Optional != nil { - return *m.F_Sint32Optional - } - return 0 -} - -func (m *GoTest) GetF_Sint64Optional() int64 { - if m != nil && m.F_Sint64Optional != nil { - return *m.F_Sint64Optional - } - return 0 -} - -func (m *GoTest) GetF_BoolDefaulted() bool { - if m != nil && m.F_BoolDefaulted != nil { - return *m.F_BoolDefaulted - } - return Default_GoTest_F_BoolDefaulted -} - -func (m *GoTest) GetF_Int32Defaulted() int32 { - if m != nil && m.F_Int32Defaulted != nil { - return *m.F_Int32Defaulted - } - return Default_GoTest_F_Int32Defaulted -} - -func (m *GoTest) GetF_Int64Defaulted() int64 { - if m != nil && m.F_Int64Defaulted != nil { - return *m.F_Int64Defaulted - } - return Default_GoTest_F_Int64Defaulted -} - -func (m *GoTest) GetF_Fixed32Defaulted() uint32 { - if m != nil && m.F_Fixed32Defaulted != nil { - return *m.F_Fixed32Defaulted - } - return Default_GoTest_F_Fixed32Defaulted -} - -func (m *GoTest) GetF_Fixed64Defaulted() uint64 { - if m != nil && m.F_Fixed64Defaulted != nil { - return *m.F_Fixed64Defaulted - } - return Default_GoTest_F_Fixed64Defaulted -} - -func (m *GoTest) GetF_Uint32Defaulted() uint32 { - if m != nil && m.F_Uint32Defaulted != nil { - return *m.F_Uint32Defaulted - } - return Default_GoTest_F_Uint32Defaulted -} - -func (m *GoTest) GetF_Uint64Defaulted() uint64 { - if m != nil && m.F_Uint64Defaulted != nil { - return *m.F_Uint64Defaulted - } - return Default_GoTest_F_Uint64Defaulted -} - -func (m *GoTest) GetF_FloatDefaulted() float32 { - if m != nil && m.F_FloatDefaulted != nil { - return *m.F_FloatDefaulted - } - return Default_GoTest_F_FloatDefaulted -} - -func (m *GoTest) GetF_DoubleDefaulted() float64 { - if m != nil && m.F_DoubleDefaulted != nil { - return *m.F_DoubleDefaulted - } - return Default_GoTest_F_DoubleDefaulted -} - -func (m *GoTest) GetF_StringDefaulted() string { - if m != nil && m.F_StringDefaulted != nil { - return *m.F_StringDefaulted - } - return Default_GoTest_F_StringDefaulted -} - -func (m *GoTest) GetF_BytesDefaulted() []byte { - if m != nil && m.F_BytesDefaulted != nil { - return m.F_BytesDefaulted - } - return append([]byte(nil), Default_GoTest_F_BytesDefaulted...) -} - -func (m *GoTest) GetF_Sint32Defaulted() int32 { - if m != nil && m.F_Sint32Defaulted != nil { - return *m.F_Sint32Defaulted - } - return Default_GoTest_F_Sint32Defaulted -} - -func (m *GoTest) GetF_Sint64Defaulted() int64 { - if m != nil && m.F_Sint64Defaulted != nil { - return *m.F_Sint64Defaulted - } - return Default_GoTest_F_Sint64Defaulted -} - -func (m *GoTest) GetF_BoolRepeatedPacked() []bool { - if m != nil { - return m.F_BoolRepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Int32RepeatedPacked() []int32 { - if m != nil { - return m.F_Int32RepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Int64RepeatedPacked() []int64 { - if m != nil { - return m.F_Int64RepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Fixed32RepeatedPacked() []uint32 { - if m != nil { - return m.F_Fixed32RepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Fixed64RepeatedPacked() []uint64 { - if m != nil { - return m.F_Fixed64RepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Uint32RepeatedPacked() []uint32 { - if m != nil { - return m.F_Uint32RepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Uint64RepeatedPacked() []uint64 { - if m != nil { - return m.F_Uint64RepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_FloatRepeatedPacked() []float32 { - if m != nil { - return m.F_FloatRepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_DoubleRepeatedPacked() []float64 { - if m != nil { - return m.F_DoubleRepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Sint32RepeatedPacked() []int32 { - if m != nil { - return m.F_Sint32RepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Sint64RepeatedPacked() []int64 { - if m != nil { - return m.F_Sint64RepeatedPacked - } - return nil -} - -func (m *GoTest) GetRequiredgroup() *GoTest_RequiredGroup { - if m != nil { - return m.Requiredgroup - } - return nil -} - -func (m *GoTest) GetRepeatedgroup() []*GoTest_RepeatedGroup { - if m != nil { - return m.Repeatedgroup - } - return nil -} - -func (m *GoTest) GetOptionalgroup() *GoTest_OptionalGroup { - if m != nil { - return m.Optionalgroup - } - return nil -} - -// Required, repeated, and optional groups. -type GoTest_RequiredGroup struct { - RequiredField *string `protobuf:"bytes,71,req" json:"RequiredField,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GoTest_RequiredGroup) Reset() { *m = GoTest_RequiredGroup{} } -func (m *GoTest_RequiredGroup) String() string { return proto.CompactTextString(m) } -func (*GoTest_RequiredGroup) ProtoMessage() {} - -func (m *GoTest_RequiredGroup) GetRequiredField() string { - if m != nil && m.RequiredField != nil { - return *m.RequiredField - } - return "" -} - -type GoTest_RepeatedGroup struct { - RequiredField *string `protobuf:"bytes,81,req" json:"RequiredField,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GoTest_RepeatedGroup) Reset() { *m = GoTest_RepeatedGroup{} } -func (m *GoTest_RepeatedGroup) String() string { return proto.CompactTextString(m) } -func (*GoTest_RepeatedGroup) ProtoMessage() {} - -func (m *GoTest_RepeatedGroup) GetRequiredField() string { - if m != nil && m.RequiredField != nil { - return *m.RequiredField - } - return "" -} - -type GoTest_OptionalGroup struct { - RequiredField *string `protobuf:"bytes,91,req" json:"RequiredField,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GoTest_OptionalGroup) Reset() { *m = GoTest_OptionalGroup{} } -func (m *GoTest_OptionalGroup) String() string { return proto.CompactTextString(m) } -func (*GoTest_OptionalGroup) ProtoMessage() {} - -func (m *GoTest_OptionalGroup) GetRequiredField() string { - if m != nil && m.RequiredField != nil { - return *m.RequiredField - } - return "" -} - -// For testing skipping of unrecognized fields. -// Numbers are all big, larger than tag numbers in GoTestField, -// the message used in the corresponding test. -type GoSkipTest struct { - SkipInt32 *int32 `protobuf:"varint,11,req,name=skip_int32" json:"skip_int32,omitempty"` - SkipFixed32 *uint32 `protobuf:"fixed32,12,req,name=skip_fixed32" json:"skip_fixed32,omitempty"` - SkipFixed64 *uint64 `protobuf:"fixed64,13,req,name=skip_fixed64" json:"skip_fixed64,omitempty"` - SkipString *string `protobuf:"bytes,14,req,name=skip_string" json:"skip_string,omitempty"` - Skipgroup *GoSkipTest_SkipGroup `protobuf:"group,15,req,name=SkipGroup" json:"skipgroup,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GoSkipTest) Reset() { *m = GoSkipTest{} } -func (m *GoSkipTest) String() string { return proto.CompactTextString(m) } -func (*GoSkipTest) ProtoMessage() {} - -func (m *GoSkipTest) GetSkipInt32() int32 { - if m != nil && m.SkipInt32 != nil { - return *m.SkipInt32 - } - return 0 -} - -func (m *GoSkipTest) GetSkipFixed32() uint32 { - if m != nil && m.SkipFixed32 != nil { - return *m.SkipFixed32 - } - return 0 -} - -func (m *GoSkipTest) GetSkipFixed64() uint64 { - if m != nil && m.SkipFixed64 != nil { - return *m.SkipFixed64 - } - return 0 -} - -func (m *GoSkipTest) GetSkipString() string { - if m != nil && m.SkipString != nil { - return *m.SkipString - } - return "" -} - -func (m *GoSkipTest) GetSkipgroup() *GoSkipTest_SkipGroup { - if m != nil { - return m.Skipgroup - } - return nil -} - -type GoSkipTest_SkipGroup struct { - GroupInt32 *int32 `protobuf:"varint,16,req,name=group_int32" json:"group_int32,omitempty"` - GroupString *string `protobuf:"bytes,17,req,name=group_string" json:"group_string,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GoSkipTest_SkipGroup) Reset() { *m = GoSkipTest_SkipGroup{} } -func (m *GoSkipTest_SkipGroup) String() string { return proto.CompactTextString(m) } -func (*GoSkipTest_SkipGroup) ProtoMessage() {} - -func (m *GoSkipTest_SkipGroup) GetGroupInt32() int32 { - if m != nil && m.GroupInt32 != nil { - return *m.GroupInt32 - } - return 0 -} - -func (m *GoSkipTest_SkipGroup) GetGroupString() string { - if m != nil && m.GroupString != nil { - return *m.GroupString - } - return "" -} - -// For testing packed/non-packed decoder switching. -// A serialized instance of one should be deserializable as the other. -type NonPackedTest struct { - A []int32 `protobuf:"varint,1,rep,name=a" json:"a,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *NonPackedTest) Reset() { *m = NonPackedTest{} } -func (m *NonPackedTest) String() string { return proto.CompactTextString(m) } -func (*NonPackedTest) ProtoMessage() {} - -func (m *NonPackedTest) GetA() []int32 { - if m != nil { - return m.A - } - return nil -} - -type PackedTest struct { - B []int32 `protobuf:"varint,1,rep,packed,name=b" json:"b,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PackedTest) Reset() { *m = PackedTest{} } -func (m *PackedTest) String() string { return proto.CompactTextString(m) } -func (*PackedTest) ProtoMessage() {} - -func (m *PackedTest) GetB() []int32 { - if m != nil { - return m.B - } - return nil -} - -type MaxTag struct { - // Maximum possible tag number. - LastField *string `protobuf:"bytes,536870911,opt,name=last_field" json:"last_field,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MaxTag) Reset() { *m = MaxTag{} } -func (m *MaxTag) String() string { return proto.CompactTextString(m) } -func (*MaxTag) ProtoMessage() {} - -func (m *MaxTag) GetLastField() string { - if m != nil && m.LastField != nil { - return *m.LastField - } - return "" -} - -type OldMessage struct { - Nested *OldMessage_Nested `protobuf:"bytes,1,opt,name=nested" json:"nested,omitempty"` - Num *int32 `protobuf:"varint,2,opt,name=num" json:"num,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *OldMessage) Reset() { *m = OldMessage{} } -func (m *OldMessage) String() string { return proto.CompactTextString(m) } -func (*OldMessage) ProtoMessage() {} - -func (m *OldMessage) GetNested() *OldMessage_Nested { - if m != nil { - return m.Nested - } - return nil -} - -func (m *OldMessage) GetNum() int32 { - if m != nil && m.Num != nil { - return *m.Num - } - return 0 -} - -type OldMessage_Nested struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *OldMessage_Nested) Reset() { *m = OldMessage_Nested{} } -func (m *OldMessage_Nested) String() string { return proto.CompactTextString(m) } -func (*OldMessage_Nested) ProtoMessage() {} - -func (m *OldMessage_Nested) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -// NewMessage is wire compatible with OldMessage; -// imagine it as a future version. -type NewMessage struct { - Nested *NewMessage_Nested `protobuf:"bytes,1,opt,name=nested" json:"nested,omitempty"` - // This is an int32 in OldMessage. - Num *int64 `protobuf:"varint,2,opt,name=num" json:"num,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *NewMessage) Reset() { *m = NewMessage{} } -func (m *NewMessage) String() string { return proto.CompactTextString(m) } -func (*NewMessage) ProtoMessage() {} - -func (m *NewMessage) GetNested() *NewMessage_Nested { - if m != nil { - return m.Nested - } - return nil -} - -func (m *NewMessage) GetNum() int64 { - if m != nil && m.Num != nil { - return *m.Num - } - return 0 -} - -type NewMessage_Nested struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - FoodGroup *string `protobuf:"bytes,2,opt,name=food_group" json:"food_group,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *NewMessage_Nested) Reset() { *m = NewMessage_Nested{} } -func (m *NewMessage_Nested) String() string { return proto.CompactTextString(m) } -func (*NewMessage_Nested) ProtoMessage() {} - -func (m *NewMessage_Nested) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *NewMessage_Nested) GetFoodGroup() string { - if m != nil && m.FoodGroup != nil { - return *m.FoodGroup - } - return "" -} - -type InnerMessage struct { - Host *string `protobuf:"bytes,1,req,name=host" json:"host,omitempty"` - Port *int32 `protobuf:"varint,2,opt,name=port,def=4000" json:"port,omitempty"` - Connected *bool `protobuf:"varint,3,opt,name=connected" json:"connected,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *InnerMessage) Reset() { *m = InnerMessage{} } -func (m *InnerMessage) String() string { return proto.CompactTextString(m) } -func (*InnerMessage) ProtoMessage() {} - -const Default_InnerMessage_Port int32 = 4000 - -func (m *InnerMessage) GetHost() string { - if m != nil && m.Host != nil { - return *m.Host - } - return "" -} - -func (m *InnerMessage) GetPort() int32 { - if m != nil && m.Port != nil { - return *m.Port - } - return Default_InnerMessage_Port -} - -func (m *InnerMessage) GetConnected() bool { - if m != nil && m.Connected != nil { - return *m.Connected - } - return false -} - -type OtherMessage struct { - Key *int64 `protobuf:"varint,1,opt,name=key" json:"key,omitempty"` - Value []byte `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` - Weight *float32 `protobuf:"fixed32,3,opt,name=weight" json:"weight,omitempty"` - Inner *InnerMessage `protobuf:"bytes,4,opt,name=inner" json:"inner,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *OtherMessage) Reset() { *m = OtherMessage{} } -func (m *OtherMessage) String() string { return proto.CompactTextString(m) } -func (*OtherMessage) ProtoMessage() {} - -func (m *OtherMessage) GetKey() int64 { - if m != nil && m.Key != nil { - return *m.Key - } - return 0 -} - -func (m *OtherMessage) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -func (m *OtherMessage) GetWeight() float32 { - if m != nil && m.Weight != nil { - return *m.Weight - } - return 0 -} - -func (m *OtherMessage) GetInner() *InnerMessage { - if m != nil { - return m.Inner - } - return nil -} - -type MyMessage struct { - Count *int32 `protobuf:"varint,1,req,name=count" json:"count,omitempty"` - Name *string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` - Quote *string `protobuf:"bytes,3,opt,name=quote" json:"quote,omitempty"` - Pet []string `protobuf:"bytes,4,rep,name=pet" json:"pet,omitempty"` - Inner *InnerMessage `protobuf:"bytes,5,opt,name=inner" json:"inner,omitempty"` - Others []*OtherMessage `protobuf:"bytes,6,rep,name=others" json:"others,omitempty"` - RepInner []*InnerMessage `protobuf:"bytes,12,rep,name=rep_inner" json:"rep_inner,omitempty"` - Bikeshed *MyMessage_Color `protobuf:"varint,7,opt,name=bikeshed,enum=testdata.MyMessage_Color" json:"bikeshed,omitempty"` - Somegroup *MyMessage_SomeGroup `protobuf:"group,8,opt,name=SomeGroup" json:"somegroup,omitempty"` - // This field becomes [][]byte in the generated code. - RepBytes [][]byte `protobuf:"bytes,10,rep,name=rep_bytes" json:"rep_bytes,omitempty"` - Bigfloat *float64 `protobuf:"fixed64,11,opt,name=bigfloat" json:"bigfloat,omitempty"` - XXX_extensions map[int32]proto.Extension `json:"-"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MyMessage) Reset() { *m = MyMessage{} } -func (m *MyMessage) String() string { return proto.CompactTextString(m) } -func (*MyMessage) ProtoMessage() {} - -var extRange_MyMessage = []proto.ExtensionRange{ - {100, 536870911}, -} - -func (*MyMessage) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_MyMessage -} -func (m *MyMessage) ExtensionMap() map[int32]proto.Extension { - if m.XXX_extensions == nil { - m.XXX_extensions = make(map[int32]proto.Extension) - } - return m.XXX_extensions -} - -func (m *MyMessage) GetCount() int32 { - if m != nil && m.Count != nil { - return *m.Count - } - return 0 -} - -func (m *MyMessage) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *MyMessage) GetQuote() string { - if m != nil && m.Quote != nil { - return *m.Quote - } - return "" -} - -func (m *MyMessage) GetPet() []string { - if m != nil { - return m.Pet - } - return nil -} - -func (m *MyMessage) GetInner() *InnerMessage { - if m != nil { - return m.Inner - } - return nil -} - -func (m *MyMessage) GetOthers() []*OtherMessage { - if m != nil { - return m.Others - } - return nil -} - -func (m *MyMessage) GetRepInner() []*InnerMessage { - if m != nil { - return m.RepInner - } - return nil -} - -func (m *MyMessage) GetBikeshed() MyMessage_Color { - if m != nil && m.Bikeshed != nil { - return *m.Bikeshed - } - return MyMessage_RED -} - -func (m *MyMessage) GetSomegroup() *MyMessage_SomeGroup { - if m != nil { - return m.Somegroup - } - return nil -} - -func (m *MyMessage) GetRepBytes() [][]byte { - if m != nil { - return m.RepBytes - } - return nil -} - -func (m *MyMessage) GetBigfloat() float64 { - if m != nil && m.Bigfloat != nil { - return *m.Bigfloat - } - return 0 -} - -type MyMessage_SomeGroup struct { - GroupField *int32 `protobuf:"varint,9,opt,name=group_field" json:"group_field,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MyMessage_SomeGroup) Reset() { *m = MyMessage_SomeGroup{} } -func (m *MyMessage_SomeGroup) String() string { return proto.CompactTextString(m) } -func (*MyMessage_SomeGroup) ProtoMessage() {} - -func (m *MyMessage_SomeGroup) GetGroupField() int32 { - if m != nil && m.GroupField != nil { - return *m.GroupField - } - return 0 -} - -type Ext struct { - Data *string `protobuf:"bytes,1,opt,name=data" json:"data,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Ext) Reset() { *m = Ext{} } -func (m *Ext) String() string { return proto.CompactTextString(m) } -func (*Ext) ProtoMessage() {} - -func (m *Ext) GetData() string { - if m != nil && m.Data != nil { - return *m.Data - } - return "" -} - -var E_Ext_More = &proto.ExtensionDesc{ - ExtendedType: (*MyMessage)(nil), - ExtensionType: (*Ext)(nil), - Field: 103, - Name: "testdata.Ext.more", - Tag: "bytes,103,opt,name=more", -} - -var E_Ext_Text = &proto.ExtensionDesc{ - ExtendedType: (*MyMessage)(nil), - ExtensionType: (*string)(nil), - Field: 104, - Name: "testdata.Ext.text", - Tag: "bytes,104,opt,name=text", -} - -var E_Ext_Number = &proto.ExtensionDesc{ - ExtendedType: (*MyMessage)(nil), - ExtensionType: (*int32)(nil), - Field: 105, - Name: "testdata.Ext.number", - Tag: "varint,105,opt,name=number", -} - -type MyMessageSet struct { - XXX_extensions map[int32]proto.Extension `json:"-"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MyMessageSet) Reset() { *m = MyMessageSet{} } -func (m *MyMessageSet) String() string { return proto.CompactTextString(m) } -func (*MyMessageSet) ProtoMessage() {} - -func (m *MyMessageSet) Marshal() ([]byte, error) { - return proto.MarshalMessageSet(m.ExtensionMap()) -} -func (m *MyMessageSet) Unmarshal(buf []byte) error { - return proto.UnmarshalMessageSet(buf, m.ExtensionMap()) -} -func (m *MyMessageSet) MarshalJSON() ([]byte, error) { - return proto.MarshalMessageSetJSON(m.XXX_extensions) -} -func (m *MyMessageSet) UnmarshalJSON(buf []byte) error { - return proto.UnmarshalMessageSetJSON(buf, m.XXX_extensions) -} - -// ensure MyMessageSet satisfies proto.Marshaler and proto.Unmarshaler -var _ proto.Marshaler = (*MyMessageSet)(nil) -var _ proto.Unmarshaler = (*MyMessageSet)(nil) - -var extRange_MyMessageSet = []proto.ExtensionRange{ - {100, 2147483646}, -} - -func (*MyMessageSet) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_MyMessageSet -} -func (m *MyMessageSet) ExtensionMap() map[int32]proto.Extension { - if m.XXX_extensions == nil { - m.XXX_extensions = make(map[int32]proto.Extension) - } - return m.XXX_extensions -} - -type Empty struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *Empty) Reset() { *m = Empty{} } -func (m *Empty) String() string { return proto.CompactTextString(m) } -func (*Empty) ProtoMessage() {} - -type MessageList struct { - Message []*MessageList_Message `protobuf:"group,1,rep" json:"message,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MessageList) Reset() { *m = MessageList{} } -func (m *MessageList) String() string { return proto.CompactTextString(m) } -func (*MessageList) ProtoMessage() {} - -func (m *MessageList) GetMessage() []*MessageList_Message { - if m != nil { - return m.Message - } - return nil -} - -type MessageList_Message struct { - Name *string `protobuf:"bytes,2,req,name=name" json:"name,omitempty"` - Count *int32 `protobuf:"varint,3,req,name=count" json:"count,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MessageList_Message) Reset() { *m = MessageList_Message{} } -func (m *MessageList_Message) String() string { return proto.CompactTextString(m) } -func (*MessageList_Message) ProtoMessage() {} - -func (m *MessageList_Message) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *MessageList_Message) GetCount() int32 { - if m != nil && m.Count != nil { - return *m.Count - } - return 0 -} - -type Strings struct { - StringField *string `protobuf:"bytes,1,opt,name=string_field" json:"string_field,omitempty"` - BytesField []byte `protobuf:"bytes,2,opt,name=bytes_field" json:"bytes_field,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Strings) Reset() { *m = Strings{} } -func (m *Strings) String() string { return proto.CompactTextString(m) } -func (*Strings) ProtoMessage() {} - -func (m *Strings) GetStringField() string { - if m != nil && m.StringField != nil { - return *m.StringField - } - return "" -} - -func (m *Strings) GetBytesField() []byte { - if m != nil { - return m.BytesField - } - return nil -} - -type Defaults struct { - // Default-valued fields of all basic types. - // Same as GoTest, but copied here to make testing easier. - F_Bool *bool `protobuf:"varint,1,opt,def=1" json:"F_Bool,omitempty"` - F_Int32 *int32 `protobuf:"varint,2,opt,def=32" json:"F_Int32,omitempty"` - F_Int64 *int64 `protobuf:"varint,3,opt,def=64" json:"F_Int64,omitempty"` - F_Fixed32 *uint32 `protobuf:"fixed32,4,opt,def=320" json:"F_Fixed32,omitempty"` - F_Fixed64 *uint64 `protobuf:"fixed64,5,opt,def=640" json:"F_Fixed64,omitempty"` - F_Uint32 *uint32 `protobuf:"varint,6,opt,def=3200" json:"F_Uint32,omitempty"` - F_Uint64 *uint64 `protobuf:"varint,7,opt,def=6400" json:"F_Uint64,omitempty"` - F_Float *float32 `protobuf:"fixed32,8,opt,def=314159" json:"F_Float,omitempty"` - F_Double *float64 `protobuf:"fixed64,9,opt,def=271828" json:"F_Double,omitempty"` - F_String *string `protobuf:"bytes,10,opt,def=hello, \"world!\"\n" json:"F_String,omitempty"` - F_Bytes []byte `protobuf:"bytes,11,opt,def=Bignose" json:"F_Bytes,omitempty"` - F_Sint32 *int32 `protobuf:"zigzag32,12,opt,def=-32" json:"F_Sint32,omitempty"` - F_Sint64 *int64 `protobuf:"zigzag64,13,opt,def=-64" json:"F_Sint64,omitempty"` - F_Enum *Defaults_Color `protobuf:"varint,14,opt,enum=testdata.Defaults_Color,def=1" json:"F_Enum,omitempty"` - // More fields with crazy defaults. - F_Pinf *float32 `protobuf:"fixed32,15,opt,def=inf" json:"F_Pinf,omitempty"` - F_Ninf *float32 `protobuf:"fixed32,16,opt,def=-inf" json:"F_Ninf,omitempty"` - F_Nan *float32 `protobuf:"fixed32,17,opt,def=nan" json:"F_Nan,omitempty"` - // Sub-message. - Sub *SubDefaults `protobuf:"bytes,18,opt,name=sub" json:"sub,omitempty"` - // Redundant but explicit defaults. - StrZero *string `protobuf:"bytes,19,opt,name=str_zero,def=" json:"str_zero,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Defaults) Reset() { *m = Defaults{} } -func (m *Defaults) String() string { return proto.CompactTextString(m) } -func (*Defaults) ProtoMessage() {} - -const Default_Defaults_F_Bool bool = true -const Default_Defaults_F_Int32 int32 = 32 -const Default_Defaults_F_Int64 int64 = 64 -const Default_Defaults_F_Fixed32 uint32 = 320 -const Default_Defaults_F_Fixed64 uint64 = 640 -const Default_Defaults_F_Uint32 uint32 = 3200 -const Default_Defaults_F_Uint64 uint64 = 6400 -const Default_Defaults_F_Float float32 = 314159 -const Default_Defaults_F_Double float64 = 271828 -const Default_Defaults_F_String string = "hello, \"world!\"\n" - -var Default_Defaults_F_Bytes []byte = []byte("Bignose") - -const Default_Defaults_F_Sint32 int32 = -32 -const Default_Defaults_F_Sint64 int64 = -64 -const Default_Defaults_F_Enum Defaults_Color = Defaults_GREEN - -var Default_Defaults_F_Pinf float32 = float32(math.Inf(1)) -var Default_Defaults_F_Ninf float32 = float32(math.Inf(-1)) -var Default_Defaults_F_Nan float32 = float32(math.NaN()) - -func (m *Defaults) GetF_Bool() bool { - if m != nil && m.F_Bool != nil { - return *m.F_Bool - } - return Default_Defaults_F_Bool -} - -func (m *Defaults) GetF_Int32() int32 { - if m != nil && m.F_Int32 != nil { - return *m.F_Int32 - } - return Default_Defaults_F_Int32 -} - -func (m *Defaults) GetF_Int64() int64 { - if m != nil && m.F_Int64 != nil { - return *m.F_Int64 - } - return Default_Defaults_F_Int64 -} - -func (m *Defaults) GetF_Fixed32() uint32 { - if m != nil && m.F_Fixed32 != nil { - return *m.F_Fixed32 - } - return Default_Defaults_F_Fixed32 -} - -func (m *Defaults) GetF_Fixed64() uint64 { - if m != nil && m.F_Fixed64 != nil { - return *m.F_Fixed64 - } - return Default_Defaults_F_Fixed64 -} - -func (m *Defaults) GetF_Uint32() uint32 { - if m != nil && m.F_Uint32 != nil { - return *m.F_Uint32 - } - return Default_Defaults_F_Uint32 -} - -func (m *Defaults) GetF_Uint64() uint64 { - if m != nil && m.F_Uint64 != nil { - return *m.F_Uint64 - } - return Default_Defaults_F_Uint64 -} - -func (m *Defaults) GetF_Float() float32 { - if m != nil && m.F_Float != nil { - return *m.F_Float - } - return Default_Defaults_F_Float -} - -func (m *Defaults) GetF_Double() float64 { - if m != nil && m.F_Double != nil { - return *m.F_Double - } - return Default_Defaults_F_Double -} - -func (m *Defaults) GetF_String() string { - if m != nil && m.F_String != nil { - return *m.F_String - } - return Default_Defaults_F_String -} - -func (m *Defaults) GetF_Bytes() []byte { - if m != nil && m.F_Bytes != nil { - return m.F_Bytes - } - return append([]byte(nil), Default_Defaults_F_Bytes...) -} - -func (m *Defaults) GetF_Sint32() int32 { - if m != nil && m.F_Sint32 != nil { - return *m.F_Sint32 - } - return Default_Defaults_F_Sint32 -} - -func (m *Defaults) GetF_Sint64() int64 { - if m != nil && m.F_Sint64 != nil { - return *m.F_Sint64 - } - return Default_Defaults_F_Sint64 -} - -func (m *Defaults) GetF_Enum() Defaults_Color { - if m != nil && m.F_Enum != nil { - return *m.F_Enum - } - return Default_Defaults_F_Enum -} - -func (m *Defaults) GetF_Pinf() float32 { - if m != nil && m.F_Pinf != nil { - return *m.F_Pinf - } - return Default_Defaults_F_Pinf -} - -func (m *Defaults) GetF_Ninf() float32 { - if m != nil && m.F_Ninf != nil { - return *m.F_Ninf - } - return Default_Defaults_F_Ninf -} - -func (m *Defaults) GetF_Nan() float32 { - if m != nil && m.F_Nan != nil { - return *m.F_Nan - } - return Default_Defaults_F_Nan -} - -func (m *Defaults) GetSub() *SubDefaults { - if m != nil { - return m.Sub - } - return nil -} - -func (m *Defaults) GetStrZero() string { - if m != nil && m.StrZero != nil { - return *m.StrZero - } - return "" -} - -type SubDefaults struct { - N *int64 `protobuf:"varint,1,opt,name=n,def=7" json:"n,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *SubDefaults) Reset() { *m = SubDefaults{} } -func (m *SubDefaults) String() string { return proto.CompactTextString(m) } -func (*SubDefaults) ProtoMessage() {} - -const Default_SubDefaults_N int64 = 7 - -func (m *SubDefaults) GetN() int64 { - if m != nil && m.N != nil { - return *m.N - } - return Default_SubDefaults_N -} - -type RepeatedEnum struct { - Color []RepeatedEnum_Color `protobuf:"varint,1,rep,name=color,enum=testdata.RepeatedEnum_Color" json:"color,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *RepeatedEnum) Reset() { *m = RepeatedEnum{} } -func (m *RepeatedEnum) String() string { return proto.CompactTextString(m) } -func (*RepeatedEnum) ProtoMessage() {} - -func (m *RepeatedEnum) GetColor() []RepeatedEnum_Color { - if m != nil { - return m.Color - } - return nil -} - -type MoreRepeated struct { - Bools []bool `protobuf:"varint,1,rep,name=bools" json:"bools,omitempty"` - BoolsPacked []bool `protobuf:"varint,2,rep,packed,name=bools_packed" json:"bools_packed,omitempty"` - Ints []int32 `protobuf:"varint,3,rep,name=ints" json:"ints,omitempty"` - IntsPacked []int32 `protobuf:"varint,4,rep,packed,name=ints_packed" json:"ints_packed,omitempty"` - Int64SPacked []int64 `protobuf:"varint,7,rep,packed,name=int64s_packed" json:"int64s_packed,omitempty"` - Strings []string `protobuf:"bytes,5,rep,name=strings" json:"strings,omitempty"` - Fixeds []uint32 `protobuf:"fixed32,6,rep,name=fixeds" json:"fixeds,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MoreRepeated) Reset() { *m = MoreRepeated{} } -func (m *MoreRepeated) String() string { return proto.CompactTextString(m) } -func (*MoreRepeated) ProtoMessage() {} - -func (m *MoreRepeated) GetBools() []bool { - if m != nil { - return m.Bools - } - return nil -} - -func (m *MoreRepeated) GetBoolsPacked() []bool { - if m != nil { - return m.BoolsPacked - } - return nil -} - -func (m *MoreRepeated) GetInts() []int32 { - if m != nil { - return m.Ints - } - return nil -} - -func (m *MoreRepeated) GetIntsPacked() []int32 { - if m != nil { - return m.IntsPacked - } - return nil -} - -func (m *MoreRepeated) GetInt64SPacked() []int64 { - if m != nil { - return m.Int64SPacked - } - return nil -} - -func (m *MoreRepeated) GetStrings() []string { - if m != nil { - return m.Strings - } - return nil -} - -func (m *MoreRepeated) GetFixeds() []uint32 { - if m != nil { - return m.Fixeds - } - return nil -} - -type GroupOld struct { - G *GroupOld_G `protobuf:"group,101,opt" json:"g,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GroupOld) Reset() { *m = GroupOld{} } -func (m *GroupOld) String() string { return proto.CompactTextString(m) } -func (*GroupOld) ProtoMessage() {} - -func (m *GroupOld) GetG() *GroupOld_G { - if m != nil { - return m.G - } - return nil -} - -type GroupOld_G struct { - X *int32 `protobuf:"varint,2,opt,name=x" json:"x,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GroupOld_G) Reset() { *m = GroupOld_G{} } -func (m *GroupOld_G) String() string { return proto.CompactTextString(m) } -func (*GroupOld_G) ProtoMessage() {} - -func (m *GroupOld_G) GetX() int32 { - if m != nil && m.X != nil { - return *m.X - } - return 0 -} - -type GroupNew struct { - G *GroupNew_G `protobuf:"group,101,opt" json:"g,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GroupNew) Reset() { *m = GroupNew{} } -func (m *GroupNew) String() string { return proto.CompactTextString(m) } -func (*GroupNew) ProtoMessage() {} - -func (m *GroupNew) GetG() *GroupNew_G { - if m != nil { - return m.G - } - return nil -} - -type GroupNew_G struct { - X *int32 `protobuf:"varint,2,opt,name=x" json:"x,omitempty"` - Y *int32 `protobuf:"varint,3,opt,name=y" json:"y,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GroupNew_G) Reset() { *m = GroupNew_G{} } -func (m *GroupNew_G) String() string { return proto.CompactTextString(m) } -func (*GroupNew_G) ProtoMessage() {} - -func (m *GroupNew_G) GetX() int32 { - if m != nil && m.X != nil { - return *m.X - } - return 0 -} - -func (m *GroupNew_G) GetY() int32 { - if m != nil && m.Y != nil { - return *m.Y - } - return 0 -} - -type FloatingPoint struct { - F *float64 `protobuf:"fixed64,1,req,name=f" json:"f,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *FloatingPoint) Reset() { *m = FloatingPoint{} } -func (m *FloatingPoint) String() string { return proto.CompactTextString(m) } -func (*FloatingPoint) ProtoMessage() {} - -func (m *FloatingPoint) GetF() float64 { - if m != nil && m.F != nil { - return *m.F - } - return 0 -} - -type MessageWithMap struct { - NameMapping map[int32]string `protobuf:"bytes,1,rep,name=name_mapping" json:"name_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - MsgMapping map[int64]*FloatingPoint `protobuf:"bytes,2,rep,name=msg_mapping" json:"msg_mapping,omitempty" protobuf_key:"zigzag64,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - ByteMapping map[bool][]byte `protobuf:"bytes,3,rep,name=byte_mapping" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - StrToStr map[string]string `protobuf:"bytes,4,rep,name=str_to_str" json:"str_to_str,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MessageWithMap) Reset() { *m = MessageWithMap{} } -func (m *MessageWithMap) String() string { return proto.CompactTextString(m) } -func (*MessageWithMap) ProtoMessage() {} - -func (m *MessageWithMap) GetNameMapping() map[int32]string { - if m != nil { - return m.NameMapping - } - return nil -} - -func (m *MessageWithMap) GetMsgMapping() map[int64]*FloatingPoint { - if m != nil { - return m.MsgMapping - } - return nil -} - -func (m *MessageWithMap) GetByteMapping() map[bool][]byte { - if m != nil { - return m.ByteMapping - } - return nil -} - -func (m *MessageWithMap) GetStrToStr() map[string]string { - if m != nil { - return m.StrToStr - } - return nil -} - -var E_Greeting = &proto.ExtensionDesc{ - ExtendedType: (*MyMessage)(nil), - ExtensionType: ([]string)(nil), - Field: 106, - Name: "testdata.greeting", - Tag: "bytes,106,rep,name=greeting", -} - -var E_X201 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 201, - Name: "testdata.x201", - Tag: "bytes,201,opt,name=x201", -} - -var E_X202 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 202, - Name: "testdata.x202", - Tag: "bytes,202,opt,name=x202", -} - -var E_X203 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 203, - Name: "testdata.x203", - Tag: "bytes,203,opt,name=x203", -} - -var E_X204 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 204, - Name: "testdata.x204", - Tag: "bytes,204,opt,name=x204", -} - -var E_X205 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 205, - Name: "testdata.x205", - Tag: "bytes,205,opt,name=x205", -} - -var E_X206 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 206, - Name: "testdata.x206", - Tag: "bytes,206,opt,name=x206", -} - -var E_X207 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 207, - Name: "testdata.x207", - Tag: "bytes,207,opt,name=x207", -} - -var E_X208 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 208, - Name: "testdata.x208", - Tag: "bytes,208,opt,name=x208", -} - -var E_X209 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 209, - Name: "testdata.x209", - Tag: "bytes,209,opt,name=x209", -} - -var E_X210 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 210, - Name: "testdata.x210", - Tag: "bytes,210,opt,name=x210", -} - -var E_X211 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 211, - Name: "testdata.x211", - Tag: "bytes,211,opt,name=x211", -} - -var E_X212 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 212, - Name: "testdata.x212", - Tag: "bytes,212,opt,name=x212", -} - -var E_X213 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 213, - Name: "testdata.x213", - Tag: "bytes,213,opt,name=x213", -} - -var E_X214 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 214, - Name: "testdata.x214", - Tag: "bytes,214,opt,name=x214", -} - -var E_X215 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 215, - Name: "testdata.x215", - Tag: "bytes,215,opt,name=x215", -} - -var E_X216 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 216, - Name: "testdata.x216", - Tag: "bytes,216,opt,name=x216", -} - -var E_X217 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 217, - Name: "testdata.x217", - Tag: "bytes,217,opt,name=x217", -} - -var E_X218 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 218, - Name: "testdata.x218", - Tag: "bytes,218,opt,name=x218", -} - -var E_X219 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 219, - Name: "testdata.x219", - Tag: "bytes,219,opt,name=x219", -} - -var E_X220 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 220, - Name: "testdata.x220", - Tag: "bytes,220,opt,name=x220", -} - -var E_X221 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 221, - Name: "testdata.x221", - Tag: "bytes,221,opt,name=x221", -} - -var E_X222 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 222, - Name: "testdata.x222", - Tag: "bytes,222,opt,name=x222", -} - -var E_X223 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 223, - Name: "testdata.x223", - Tag: "bytes,223,opt,name=x223", -} - -var E_X224 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 224, - Name: "testdata.x224", - Tag: "bytes,224,opt,name=x224", -} - -var E_X225 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 225, - Name: "testdata.x225", - Tag: "bytes,225,opt,name=x225", -} - -var E_X226 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 226, - Name: "testdata.x226", - Tag: "bytes,226,opt,name=x226", -} - -var E_X227 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 227, - Name: "testdata.x227", - Tag: "bytes,227,opt,name=x227", -} - -var E_X228 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 228, - Name: "testdata.x228", - Tag: "bytes,228,opt,name=x228", -} - -var E_X229 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 229, - Name: "testdata.x229", - Tag: "bytes,229,opt,name=x229", -} - -var E_X230 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 230, - Name: "testdata.x230", - Tag: "bytes,230,opt,name=x230", -} - -var E_X231 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 231, - Name: "testdata.x231", - Tag: "bytes,231,opt,name=x231", -} - -var E_X232 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 232, - Name: "testdata.x232", - Tag: "bytes,232,opt,name=x232", -} - -var E_X233 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 233, - Name: "testdata.x233", - Tag: "bytes,233,opt,name=x233", -} - -var E_X234 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 234, - Name: "testdata.x234", - Tag: "bytes,234,opt,name=x234", -} - -var E_X235 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 235, - Name: "testdata.x235", - Tag: "bytes,235,opt,name=x235", -} - -var E_X236 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 236, - Name: "testdata.x236", - Tag: "bytes,236,opt,name=x236", -} - -var E_X237 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 237, - Name: "testdata.x237", - Tag: "bytes,237,opt,name=x237", -} - -var E_X238 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 238, - Name: "testdata.x238", - Tag: "bytes,238,opt,name=x238", -} - -var E_X239 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 239, - Name: "testdata.x239", - Tag: "bytes,239,opt,name=x239", -} - -var E_X240 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 240, - Name: "testdata.x240", - Tag: "bytes,240,opt,name=x240", -} - -var E_X241 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 241, - Name: "testdata.x241", - Tag: "bytes,241,opt,name=x241", -} - -var E_X242 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 242, - Name: "testdata.x242", - Tag: "bytes,242,opt,name=x242", -} - -var E_X243 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 243, - Name: "testdata.x243", - Tag: "bytes,243,opt,name=x243", -} - -var E_X244 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 244, - Name: "testdata.x244", - Tag: "bytes,244,opt,name=x244", -} - -var E_X245 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 245, - Name: "testdata.x245", - Tag: "bytes,245,opt,name=x245", -} - -var E_X246 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 246, - Name: "testdata.x246", - Tag: "bytes,246,opt,name=x246", -} - -var E_X247 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 247, - Name: "testdata.x247", - Tag: "bytes,247,opt,name=x247", -} - -var E_X248 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 248, - Name: "testdata.x248", - Tag: "bytes,248,opt,name=x248", -} - -var E_X249 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 249, - Name: "testdata.x249", - Tag: "bytes,249,opt,name=x249", -} - -var E_X250 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 250, - Name: "testdata.x250", - Tag: "bytes,250,opt,name=x250", -} - -func init() { - proto.RegisterEnum("testdata.FOO", FOO_name, FOO_value) - proto.RegisterEnum("testdata.GoTest_KIND", GoTest_KIND_name, GoTest_KIND_value) - proto.RegisterEnum("testdata.MyMessage_Color", MyMessage_Color_name, MyMessage_Color_value) - proto.RegisterEnum("testdata.Defaults_Color", Defaults_Color_name, Defaults_Color_value) - proto.RegisterEnum("testdata.RepeatedEnum_Color", RepeatedEnum_Color_name, RepeatedEnum_Color_value) - proto.RegisterExtension(E_Ext_More) - proto.RegisterExtension(E_Ext_Text) - proto.RegisterExtension(E_Ext_Number) - proto.RegisterExtension(E_Greeting) - proto.RegisterExtension(E_X201) - proto.RegisterExtension(E_X202) - proto.RegisterExtension(E_X203) - proto.RegisterExtension(E_X204) - proto.RegisterExtension(E_X205) - proto.RegisterExtension(E_X206) - proto.RegisterExtension(E_X207) - proto.RegisterExtension(E_X208) - proto.RegisterExtension(E_X209) - proto.RegisterExtension(E_X210) - proto.RegisterExtension(E_X211) - proto.RegisterExtension(E_X212) - proto.RegisterExtension(E_X213) - proto.RegisterExtension(E_X214) - proto.RegisterExtension(E_X215) - proto.RegisterExtension(E_X216) - proto.RegisterExtension(E_X217) - proto.RegisterExtension(E_X218) - proto.RegisterExtension(E_X219) - proto.RegisterExtension(E_X220) - proto.RegisterExtension(E_X221) - proto.RegisterExtension(E_X222) - proto.RegisterExtension(E_X223) - proto.RegisterExtension(E_X224) - proto.RegisterExtension(E_X225) - proto.RegisterExtension(E_X226) - proto.RegisterExtension(E_X227) - proto.RegisterExtension(E_X228) - proto.RegisterExtension(E_X229) - proto.RegisterExtension(E_X230) - proto.RegisterExtension(E_X231) - proto.RegisterExtension(E_X232) - proto.RegisterExtension(E_X233) - proto.RegisterExtension(E_X234) - proto.RegisterExtension(E_X235) - proto.RegisterExtension(E_X236) - proto.RegisterExtension(E_X237) - proto.RegisterExtension(E_X238) - proto.RegisterExtension(E_X239) - proto.RegisterExtension(E_X240) - proto.RegisterExtension(E_X241) - proto.RegisterExtension(E_X242) - proto.RegisterExtension(E_X243) - proto.RegisterExtension(E_X244) - proto.RegisterExtension(E_X245) - proto.RegisterExtension(E_X246) - proto.RegisterExtension(E_X247) - proto.RegisterExtension(E_X248) - proto.RegisterExtension(E_X249) - proto.RegisterExtension(E_X250) -} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.proto b/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.proto deleted file mode 100644 index 411634d82f..0000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.proto +++ /dev/null @@ -1,435 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// A feature-rich test file for the protocol compiler and libraries. - -syntax = "proto2"; - -package testdata; - -enum FOO { FOO1 = 1; }; - -message GoEnum { - required FOO foo = 1; -} - -message GoTestField { - required string Label = 1; - required string Type = 2; -} - -message GoTest { - // An enum, for completeness. - enum KIND { - VOID = 0; - - // Basic types - BOOL = 1; - BYTES = 2; - FINGERPRINT = 3; - FLOAT = 4; - INT = 5; - STRING = 6; - TIME = 7; - - // Groupings - TUPLE = 8; - ARRAY = 9; - MAP = 10; - - // Table types - TABLE = 11; - - // Functions - FUNCTION = 12; // last tag - }; - - // Some typical parameters - required KIND Kind = 1; - optional string Table = 2; - optional int32 Param = 3; - - // Required, repeated and optional foreign fields. - required GoTestField RequiredField = 4; - repeated GoTestField RepeatedField = 5; - optional GoTestField OptionalField = 6; - - // Required fields of all basic types - required bool F_Bool_required = 10; - required int32 F_Int32_required = 11; - required int64 F_Int64_required = 12; - required fixed32 F_Fixed32_required = 13; - required fixed64 F_Fixed64_required = 14; - required uint32 F_Uint32_required = 15; - required uint64 F_Uint64_required = 16; - required float F_Float_required = 17; - required double F_Double_required = 18; - required string F_String_required = 19; - required bytes F_Bytes_required = 101; - required sint32 F_Sint32_required = 102; - required sint64 F_Sint64_required = 103; - - // Repeated fields of all basic types - repeated bool F_Bool_repeated = 20; - repeated int32 F_Int32_repeated = 21; - repeated int64 F_Int64_repeated = 22; - repeated fixed32 F_Fixed32_repeated = 23; - repeated fixed64 F_Fixed64_repeated = 24; - repeated uint32 F_Uint32_repeated = 25; - repeated uint64 F_Uint64_repeated = 26; - repeated float F_Float_repeated = 27; - repeated double F_Double_repeated = 28; - repeated string F_String_repeated = 29; - repeated bytes F_Bytes_repeated = 201; - repeated sint32 F_Sint32_repeated = 202; - repeated sint64 F_Sint64_repeated = 203; - - // Optional fields of all basic types - optional bool F_Bool_optional = 30; - optional int32 F_Int32_optional = 31; - optional int64 F_Int64_optional = 32; - optional fixed32 F_Fixed32_optional = 33; - optional fixed64 F_Fixed64_optional = 34; - optional uint32 F_Uint32_optional = 35; - optional uint64 F_Uint64_optional = 36; - optional float F_Float_optional = 37; - optional double F_Double_optional = 38; - optional string F_String_optional = 39; - optional bytes F_Bytes_optional = 301; - optional sint32 F_Sint32_optional = 302; - optional sint64 F_Sint64_optional = 303; - - // Default-valued fields of all basic types - optional bool F_Bool_defaulted = 40 [default=true]; - optional int32 F_Int32_defaulted = 41 [default=32]; - optional int64 F_Int64_defaulted = 42 [default=64]; - optional fixed32 F_Fixed32_defaulted = 43 [default=320]; - optional fixed64 F_Fixed64_defaulted = 44 [default=640]; - optional uint32 F_Uint32_defaulted = 45 [default=3200]; - optional uint64 F_Uint64_defaulted = 46 [default=6400]; - optional float F_Float_defaulted = 47 [default=314159.]; - optional double F_Double_defaulted = 48 [default=271828.]; - optional string F_String_defaulted = 49 [default="hello, \"world!\"\n"]; - optional bytes F_Bytes_defaulted = 401 [default="Bignose"]; - optional sint32 F_Sint32_defaulted = 402 [default = -32]; - optional sint64 F_Sint64_defaulted = 403 [default = -64]; - - // Packed repeated fields (no string or bytes). - repeated bool F_Bool_repeated_packed = 50 [packed=true]; - repeated int32 F_Int32_repeated_packed = 51 [packed=true]; - repeated int64 F_Int64_repeated_packed = 52 [packed=true]; - repeated fixed32 F_Fixed32_repeated_packed = 53 [packed=true]; - repeated fixed64 F_Fixed64_repeated_packed = 54 [packed=true]; - repeated uint32 F_Uint32_repeated_packed = 55 [packed=true]; - repeated uint64 F_Uint64_repeated_packed = 56 [packed=true]; - repeated float F_Float_repeated_packed = 57 [packed=true]; - repeated double F_Double_repeated_packed = 58 [packed=true]; - repeated sint32 F_Sint32_repeated_packed = 502 [packed=true]; - repeated sint64 F_Sint64_repeated_packed = 503 [packed=true]; - - // Required, repeated, and optional groups. - required group RequiredGroup = 70 { - required string RequiredField = 71; - }; - - repeated group RepeatedGroup = 80 { - required string RequiredField = 81; - }; - - optional group OptionalGroup = 90 { - required string RequiredField = 91; - }; -} - -// For testing skipping of unrecognized fields. -// Numbers are all big, larger than tag numbers in GoTestField, -// the message used in the corresponding test. -message GoSkipTest { - required int32 skip_int32 = 11; - required fixed32 skip_fixed32 = 12; - required fixed64 skip_fixed64 = 13; - required string skip_string = 14; - required group SkipGroup = 15 { - required int32 group_int32 = 16; - required string group_string = 17; - } -} - -// For testing packed/non-packed decoder switching. -// A serialized instance of one should be deserializable as the other. -message NonPackedTest { - repeated int32 a = 1; -} - -message PackedTest { - repeated int32 b = 1 [packed=true]; -} - -message MaxTag { - // Maximum possible tag number. - optional string last_field = 536870911; -} - -message OldMessage { - message Nested { - optional string name = 1; - } - optional Nested nested = 1; - - optional int32 num = 2; -} - -// NewMessage is wire compatible with OldMessage; -// imagine it as a future version. -message NewMessage { - message Nested { - optional string name = 1; - optional string food_group = 2; - } - optional Nested nested = 1; - - // This is an int32 in OldMessage. - optional int64 num = 2; -} - -// Smaller tests for ASCII formatting. - -message InnerMessage { - required string host = 1; - optional int32 port = 2 [default=4000]; - optional bool connected = 3; -} - -message OtherMessage { - optional int64 key = 1; - optional bytes value = 2; - optional float weight = 3; - optional InnerMessage inner = 4; -} - -message MyMessage { - required int32 count = 1; - optional string name = 2; - optional string quote = 3; - repeated string pet = 4; - optional InnerMessage inner = 5; - repeated OtherMessage others = 6; - repeated InnerMessage rep_inner = 12; - - enum Color { - RED = 0; - GREEN = 1; - BLUE = 2; - }; - optional Color bikeshed = 7; - - optional group SomeGroup = 8 { - optional int32 group_field = 9; - } - - // This field becomes [][]byte in the generated code. - repeated bytes rep_bytes = 10; - - optional double bigfloat = 11; - - extensions 100 to max; -} - -message Ext { - extend MyMessage { - optional Ext more = 103; - optional string text = 104; - optional int32 number = 105; - } - - optional string data = 1; -} - -extend MyMessage { - repeated string greeting = 106; -} - -message MyMessageSet { - option message_set_wire_format = true; - extensions 100 to max; -} - -message Empty { -} - -extend MyMessageSet { - optional Empty x201 = 201; - optional Empty x202 = 202; - optional Empty x203 = 203; - optional Empty x204 = 204; - optional Empty x205 = 205; - optional Empty x206 = 206; - optional Empty x207 = 207; - optional Empty x208 = 208; - optional Empty x209 = 209; - optional Empty x210 = 210; - optional Empty x211 = 211; - optional Empty x212 = 212; - optional Empty x213 = 213; - optional Empty x214 = 214; - optional Empty x215 = 215; - optional Empty x216 = 216; - optional Empty x217 = 217; - optional Empty x218 = 218; - optional Empty x219 = 219; - optional Empty x220 = 220; - optional Empty x221 = 221; - optional Empty x222 = 222; - optional Empty x223 = 223; - optional Empty x224 = 224; - optional Empty x225 = 225; - optional Empty x226 = 226; - optional Empty x227 = 227; - optional Empty x228 = 228; - optional Empty x229 = 229; - optional Empty x230 = 230; - optional Empty x231 = 231; - optional Empty x232 = 232; - optional Empty x233 = 233; - optional Empty x234 = 234; - optional Empty x235 = 235; - optional Empty x236 = 236; - optional Empty x237 = 237; - optional Empty x238 = 238; - optional Empty x239 = 239; - optional Empty x240 = 240; - optional Empty x241 = 241; - optional Empty x242 = 242; - optional Empty x243 = 243; - optional Empty x244 = 244; - optional Empty x245 = 245; - optional Empty x246 = 246; - optional Empty x247 = 247; - optional Empty x248 = 248; - optional Empty x249 = 249; - optional Empty x250 = 250; -} - -message MessageList { - repeated group Message = 1 { - required string name = 2; - required int32 count = 3; - } -} - -message Strings { - optional string string_field = 1; - optional bytes bytes_field = 2; -} - -message Defaults { - enum Color { - RED = 0; - GREEN = 1; - BLUE = 2; - } - - // Default-valued fields of all basic types. - // Same as GoTest, but copied here to make testing easier. - optional bool F_Bool = 1 [default=true]; - optional int32 F_Int32 = 2 [default=32]; - optional int64 F_Int64 = 3 [default=64]; - optional fixed32 F_Fixed32 = 4 [default=320]; - optional fixed64 F_Fixed64 = 5 [default=640]; - optional uint32 F_Uint32 = 6 [default=3200]; - optional uint64 F_Uint64 = 7 [default=6400]; - optional float F_Float = 8 [default=314159.]; - optional double F_Double = 9 [default=271828.]; - optional string F_String = 10 [default="hello, \"world!\"\n"]; - optional bytes F_Bytes = 11 [default="Bignose"]; - optional sint32 F_Sint32 = 12 [default=-32]; - optional sint64 F_Sint64 = 13 [default=-64]; - optional Color F_Enum = 14 [default=GREEN]; - - // More fields with crazy defaults. - optional float F_Pinf = 15 [default=inf]; - optional float F_Ninf = 16 [default=-inf]; - optional float F_Nan = 17 [default=nan]; - - // Sub-message. - optional SubDefaults sub = 18; - - // Redundant but explicit defaults. - optional string str_zero = 19 [default=""]; -} - -message SubDefaults { - optional int64 n = 1 [default=7]; -} - -message RepeatedEnum { - enum Color { - RED = 1; - } - repeated Color color = 1; -} - -message MoreRepeated { - repeated bool bools = 1; - repeated bool bools_packed = 2 [packed=true]; - repeated int32 ints = 3; - repeated int32 ints_packed = 4 [packed=true]; - repeated int64 int64s_packed = 7 [packed=true]; - repeated string strings = 5; - repeated fixed32 fixeds = 6; -} - -// GroupOld and GroupNew have the same wire format. -// GroupNew has a new field inside a group. - -message GroupOld { - optional group G = 101 { - optional int32 x = 2; - } -} - -message GroupNew { - optional group G = 101 { - optional int32 x = 2; - optional int32 y = 3; - } -} - -message FloatingPoint { - required double f = 1; -} - -message MessageWithMap { - map name_mapping = 1; - map msg_mapping = 2; - map byte_mapping = 3; - map str_to_str = 4; -} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/text.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/text.go index 720eac4705..81e1bdf386 100644 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/text.go +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/text.go @@ -37,11 +37,11 @@ import ( "bufio" "bytes" "encoding" + "errors" "fmt" "io" "log" "math" - "os" "reflect" "sort" "strings" @@ -246,7 +246,7 @@ func writeStruct(w *textWriter, sv reflect.Value) error { } if fv.Kind() == reflect.Map { // Map fields are rendered as a repeated struct with key/value fields. - keys := fv.MapKeys() // TODO: should we sort these for deterministic output? + keys := fv.MapKeys() sort.Sort(mapKeys(keys)) for _, key := range keys { val := fv.MapIndex(key) @@ -283,20 +283,23 @@ func writeStruct(w *textWriter, sv reflect.Value) error { if err := w.WriteByte('\n'); err != nil { return err } - // value - if _, err := w.WriteString("value:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { + // nil values aren't legal, but we can avoid panicking because of them. + if val.Kind() != reflect.Ptr || !val.IsNil() { + // value + if _, err := w.WriteString("value:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := writeAny(w, val, props.mvalprop); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { return err } - } - if err := writeAny(w, val, props.mvalprop); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err } // close struct w.unindent() @@ -315,26 +318,34 @@ func writeStruct(w *textWriter, sv reflect.Value) error { } if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { // proto3 non-repeated scalar field; skip if zero value - switch fv.Kind() { - case reflect.Bool: - if !fv.Bool() { + if isProto3Zero(fv) { + continue + } + } + + if fv.Kind() == reflect.Interface { + // Check if it is a oneof. + if st.Field(i).Tag.Get("protobuf_oneof") != "" { + // fv is nil, or holds a pointer to generated struct. + // That generated struct has exactly one field, + // which has a protobuf struct tag. + if fv.IsNil() { continue } - case reflect.Int32, reflect.Int64: - if fv.Int() == 0 { - continue - } - case reflect.Uint32, reflect.Uint64: - if fv.Uint() == 0 { - continue - } - case reflect.Float32, reflect.Float64: - if fv.Float() == 0 { - continue - } - case reflect.String: - if fv.String() == "" { - continue + inner := fv.Elem().Elem() // interface -> *T -> T + tag := inner.Type().Field(0).Tag.Get("protobuf") + props = new(Properties) // Overwrite the outer props var, but not its pointee. + props.Parse(tag) + // Write the value in the oneof, not the oneof itself. + fv = inner.Field(0) + + // Special case to cope with malformed messages gracefully: + // If the value in the oneof is a nil pointer, don't panic + // in writeAny. + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Use errors.New so writeAny won't render quotes. + msg := errors.New("/* nil */") + fv = reflect.ValueOf(&msg).Elem() } } } @@ -666,10 +677,7 @@ func writeExtensions(w *textWriter, pv reflect.Value) error { pb, err := GetExtension(ep, desc) if err != nil { - if _, err := fmt.Fprintln(os.Stderr, "proto: failed getting extension: ", err); err != nil { - return err - } - continue + return fmt.Errorf("failed getting extension: %v", err) } // Repeated extensions will appear as a slice. diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser.go index d1caeff5d9..8bce369204 100644 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser.go +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser.go @@ -174,7 +174,7 @@ func (p *textParser) advance() { } unq, err := unquoteC(p.s[1:i], rune(p.s[0])) if err != nil { - p.errorf("invalid quoted string %v", p.s[0:i+1]) + p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) return } p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] @@ -385,8 +385,7 @@ func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSet } // Returns the index in the struct for the named field, as well as the parsed tag properties. -func structFieldByName(st reflect.Type, name string) (int, *Properties, bool) { - sprops := GetProperties(st) +func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { i, ok := sprops.decoderOrigNames[name] if ok { return i, sprops.Prop[i], true @@ -438,7 +437,8 @@ func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseEr func (p *textParser) readStruct(sv reflect.Value, terminator string) error { st := sv.Type() - reqCount := GetProperties(st).reqCount + sprops := GetProperties(st) + reqCount := sprops.reqCount var reqFieldErr error fieldSet := make(map[string]bool) // A struct is a sequence of "name: value", terminated by one of @@ -520,99 +520,113 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error { sl = reflect.Append(sl, ext) SetExtension(ep, desc, sl.Interface()) } - } else { - // This is a normal, non-extension field. - name := tok.value - fi, props, ok := structFieldByName(st, name) - if !ok { - return p.errorf("unknown field name %q in %v", name, st) + if err := p.consumeOptionalSeparator(); err != nil { + return err } + continue + } - dst := sv.Field(fi) + // This is a normal, non-extension field. + name := tok.value + var dst reflect.Value + fi, props, ok := structFieldByName(sprops, name) + if ok { + dst = sv.Field(fi) + } else if oop, ok := sprops.OneofTypes[name]; ok { + // It is a oneof. + props = oop.Prop + nv := reflect.New(oop.Type.Elem()) + dst = nv.Elem().Field(0) + sv.Field(oop.Field).Set(nv) + } + if !dst.IsValid() { + return p.errorf("unknown field name %q in %v", name, st) + } - if dst.Kind() == reflect.Map { - // Consume any colon. - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Construct the map if it doesn't already exist. - if dst.IsNil() { - dst.Set(reflect.MakeMap(dst.Type())) - } - key := reflect.New(dst.Type().Key()).Elem() - val := reflect.New(dst.Type().Elem()).Elem() - - // The map entry should be this sequence of tokens: - // < key : KEY value : VALUE > - // Technically the "key" and "value" could come in any order, - // but in practice they won't. - - tok := p.next() - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - if err := p.consumeToken("key"); err != nil { - return err - } - if err := p.consumeToken(":"); err != nil { - return err - } - if err := p.readAny(key, props.mkeyprop); err != nil { - return err - } - if err := p.consumeToken("value"); err != nil { - return err - } - if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil { - return err - } - if err := p.readAny(val, props.mvalprop); err != nil { - return err - } - if err := p.consumeToken(terminator); err != nil { - return err - } - - dst.SetMapIndex(key, val) - continue - } - - // Check that it's not already set if it's not a repeated field. - if !props.Repeated && fieldSet[name] { - return p.errorf("non-repeated field %q was repeated", name) - } - - if err := p.checkForColon(props, st.Field(fi).Type); err != nil { + if dst.Kind() == reflect.Map { + // Consume any colon. + if err := p.checkForColon(props, dst.Type()); err != nil { return err } - // Parse into the field. - fieldSet[name] = true - if err := p.readAny(dst, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } else if props.Required { - reqCount-- + // Construct the map if it doesn't already exist. + if dst.IsNil() { + dst.Set(reflect.MakeMap(dst.Type())) } + key := reflect.New(dst.Type().Key()).Elem() + val := reflect.New(dst.Type().Elem()).Elem() + + // The map entry should be this sequence of tokens: + // < key : KEY value : VALUE > + // Technically the "key" and "value" could come in any order, + // but in practice they won't. + + tok := p.next() + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + if err := p.consumeToken("key"); err != nil { + return err + } + if err := p.consumeToken(":"); err != nil { + return err + } + if err := p.readAny(key, props.mkeyprop); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + if err := p.consumeToken("value"); err != nil { + return err + } + if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil { + return err + } + if err := p.readAny(val, props.mvalprop); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + if err := p.consumeToken(terminator); err != nil { + return err + } + + dst.SetMapIndex(key, val) + continue } - // For backward compatibility, permit a semicolon or comma after a field. - tok = p.next() - if tok.err != nil { - return tok.err + // Check that it's not already set if it's not a repeated field. + if !props.Repeated && fieldSet[name] { + return p.errorf("non-repeated field %q was repeated", name) } - if tok.value != ";" && tok.value != "," { - p.back() + + if err := p.checkForColon(props, dst.Type()); err != nil { + return err } + + // Parse into the field. + fieldSet[name] = true + if err := p.readAny(dst, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } else if props.Required { + reqCount-- + } + + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + } if reqCount > 0 { @@ -621,6 +635,19 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error { return reqFieldErr } +// consumeOptionalSeparator consumes an optional semicolon or comma. +// It is used in readStruct to provide backward compatibility. +func (p *textParser) consumeOptionalSeparator() error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ";" && tok.value != "," { + p.back() + } + return nil +} + func (p *textParser) readAny(v reflect.Value, props *Properties) error { tok := p.next() if tok.err != nil { diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser_test.go deleted file mode 100644 index 2e51da30c9..0000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser_test.go +++ /dev/null @@ -1,511 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "math" - "reflect" - "testing" - - . "github.com/golang/protobuf/proto" - proto3pb "github.com/golang/protobuf/proto/proto3_proto" - . "github.com/golang/protobuf/proto/testdata" -) - -type UnmarshalTextTest struct { - in string - err string // if "", no error expected - out *MyMessage -} - -func buildExtStructTest(text string) UnmarshalTextTest { - msg := &MyMessage{ - Count: Int32(42), - } - SetExtension(msg, E_Ext_More, &Ext{ - Data: String("Hello, world!"), - }) - return UnmarshalTextTest{in: text, out: msg} -} - -func buildExtDataTest(text string) UnmarshalTextTest { - msg := &MyMessage{ - Count: Int32(42), - } - SetExtension(msg, E_Ext_Text, String("Hello, world!")) - SetExtension(msg, E_Ext_Number, Int32(1729)) - return UnmarshalTextTest{in: text, out: msg} -} - -func buildExtRepStringTest(text string) UnmarshalTextTest { - msg := &MyMessage{ - Count: Int32(42), - } - if err := SetExtension(msg, E_Greeting, []string{"bula", "hola"}); err != nil { - panic(err) - } - return UnmarshalTextTest{in: text, out: msg} -} - -var unMarshalTextTests = []UnmarshalTextTest{ - // Basic - { - in: " count:42\n name:\"Dave\" ", - out: &MyMessage{ - Count: Int32(42), - Name: String("Dave"), - }, - }, - - // Empty quoted string - { - in: `count:42 name:""`, - out: &MyMessage{ - Count: Int32(42), - Name: String(""), - }, - }, - - // Quoted string concatenation - { - in: `count:42 name: "My name is "` + "\n" + `"elsewhere"`, - out: &MyMessage{ - Count: Int32(42), - Name: String("My name is elsewhere"), - }, - }, - - // Quoted string with escaped apostrophe - { - in: `count:42 name: "HOLIDAY - New Year\'s Day"`, - out: &MyMessage{ - Count: Int32(42), - Name: String("HOLIDAY - New Year's Day"), - }, - }, - - // Quoted string with single quote - { - in: `count:42 name: 'Roger "The Ramster" Ramjet'`, - out: &MyMessage{ - Count: Int32(42), - Name: String(`Roger "The Ramster" Ramjet`), - }, - }, - - // Quoted string with all the accepted special characters from the C++ test - { - in: `count:42 name: ` + "\"\\\"A string with \\' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"", - out: &MyMessage{ - Count: Int32(42), - Name: String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces"), - }, - }, - - // Quoted string with quoted backslash - { - in: `count:42 name: "\\'xyz"`, - out: &MyMessage{ - Count: Int32(42), - Name: String(`\'xyz`), - }, - }, - - // Quoted string with UTF-8 bytes. - { - in: "count:42 name: '\303\277\302\201\xAB'", - out: &MyMessage{ - Count: Int32(42), - Name: String("\303\277\302\201\xAB"), - }, - }, - - // Bad quoted string - { - in: `inner: < host: "\0" >` + "\n", - err: `line 1.15: invalid quoted string "\0"`, - }, - - // Number too large for int64 - { - in: "count: 1 others { key: 123456789012345678901 }", - err: "line 1.23: invalid int64: 123456789012345678901", - }, - - // Number too large for int32 - { - in: "count: 1234567890123", - err: "line 1.7: invalid int32: 1234567890123", - }, - - // Number in hexadecimal - { - in: "count: 0x2beef", - out: &MyMessage{ - Count: Int32(0x2beef), - }, - }, - - // Number in octal - { - in: "count: 024601", - out: &MyMessage{ - Count: Int32(024601), - }, - }, - - // Floating point number with "f" suffix - { - in: "count: 4 others:< weight: 17.0f >", - out: &MyMessage{ - Count: Int32(4), - Others: []*OtherMessage{ - { - Weight: Float32(17), - }, - }, - }, - }, - - // Floating point positive infinity - { - in: "count: 4 bigfloat: inf", - out: &MyMessage{ - Count: Int32(4), - Bigfloat: Float64(math.Inf(1)), - }, - }, - - // Floating point negative infinity - { - in: "count: 4 bigfloat: -inf", - out: &MyMessage{ - Count: Int32(4), - Bigfloat: Float64(math.Inf(-1)), - }, - }, - - // Number too large for float32 - { - in: "others:< weight: 12345678901234567890123456789012345678901234567890 >", - err: "line 1.17: invalid float32: 12345678901234567890123456789012345678901234567890", - }, - - // Number posing as a quoted string - { - in: `inner: < host: 12 >` + "\n", - err: `line 1.15: invalid string: 12`, - }, - - // Quoted string posing as int32 - { - in: `count: "12"`, - err: `line 1.7: invalid int32: "12"`, - }, - - // Quoted string posing a float32 - { - in: `others:< weight: "17.4" >`, - err: `line 1.17: invalid float32: "17.4"`, - }, - - // Enum - { - in: `count:42 bikeshed: BLUE`, - out: &MyMessage{ - Count: Int32(42), - Bikeshed: MyMessage_BLUE.Enum(), - }, - }, - - // Repeated field - { - in: `count:42 pet: "horsey" pet:"bunny"`, - out: &MyMessage{ - Count: Int32(42), - Pet: []string{"horsey", "bunny"}, - }, - }, - - // Repeated message with/without colon and <>/{} - { - in: `count:42 others:{} others{} others:<> others:{}`, - out: &MyMessage{ - Count: Int32(42), - Others: []*OtherMessage{ - {}, - {}, - {}, - {}, - }, - }, - }, - - // Missing colon for inner message - { - in: `count:42 inner < host: "cauchy.syd" >`, - out: &MyMessage{ - Count: Int32(42), - Inner: &InnerMessage{ - Host: String("cauchy.syd"), - }, - }, - }, - - // Missing colon for string field - { - in: `name "Dave"`, - err: `line 1.5: expected ':', found "\"Dave\""`, - }, - - // Missing colon for int32 field - { - in: `count 42`, - err: `line 1.6: expected ':', found "42"`, - }, - - // Missing required field - { - in: `name: "Pawel"`, - err: `proto: required field "testdata.MyMessage.count" not set`, - out: &MyMessage{ - Name: String("Pawel"), - }, - }, - - // Repeated non-repeated field - { - in: `name: "Rob" name: "Russ"`, - err: `line 1.12: non-repeated field "name" was repeated`, - }, - - // Group - { - in: `count: 17 SomeGroup { group_field: 12 }`, - out: &MyMessage{ - Count: Int32(17), - Somegroup: &MyMessage_SomeGroup{ - GroupField: Int32(12), - }, - }, - }, - - // Semicolon between fields - { - in: `count:3;name:"Calvin"`, - out: &MyMessage{ - Count: Int32(3), - Name: String("Calvin"), - }, - }, - // Comma between fields - { - in: `count:4,name:"Ezekiel"`, - out: &MyMessage{ - Count: Int32(4), - Name: String("Ezekiel"), - }, - }, - - // Extension - buildExtStructTest(`count: 42 [testdata.Ext.more]:`), - buildExtStructTest(`count: 42 [testdata.Ext.more] {data:"Hello, world!"}`), - buildExtDataTest(`count: 42 [testdata.Ext.text]:"Hello, world!" [testdata.Ext.number]:1729`), - buildExtRepStringTest(`count: 42 [testdata.greeting]:"bula" [testdata.greeting]:"hola"`), - - // Big all-in-one - { - in: "count:42 # Meaning\n" + - `name:"Dave" ` + - `quote:"\"I didn't want to go.\"" ` + - `pet:"bunny" ` + - `pet:"kitty" ` + - `pet:"horsey" ` + - `inner:<` + - ` host:"footrest.syd" ` + - ` port:7001 ` + - ` connected:true ` + - `> ` + - `others:<` + - ` key:3735928559 ` + - ` value:"\x01A\a\f" ` + - `> ` + - `others:<` + - " weight:58.9 # Atomic weight of Co\n" + - ` inner:<` + - ` host:"lesha.mtv" ` + - ` port:8002 ` + - ` >` + - `>`, - out: &MyMessage{ - Count: Int32(42), - Name: String("Dave"), - Quote: String(`"I didn't want to go."`), - Pet: []string{"bunny", "kitty", "horsey"}, - Inner: &InnerMessage{ - Host: String("footrest.syd"), - Port: Int32(7001), - Connected: Bool(true), - }, - Others: []*OtherMessage{ - { - Key: Int64(3735928559), - Value: []byte{0x1, 'A', '\a', '\f'}, - }, - { - Weight: Float32(58.9), - Inner: &InnerMessage{ - Host: String("lesha.mtv"), - Port: Int32(8002), - }, - }, - }, - }, - }, -} - -func TestUnmarshalText(t *testing.T) { - for i, test := range unMarshalTextTests { - pb := new(MyMessage) - err := UnmarshalText(test.in, pb) - if test.err == "" { - // We don't expect failure. - if err != nil { - t.Errorf("Test %d: Unexpected error: %v", i, err) - } else if !reflect.DeepEqual(pb, test.out) { - t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v", - i, pb, test.out) - } - } else { - // We do expect failure. - if err == nil { - t.Errorf("Test %d: Didn't get expected error: %v", i, test.err) - } else if err.Error() != test.err { - t.Errorf("Test %d: Incorrect error.\nHave: %v\nWant: %v", - i, err.Error(), test.err) - } else if _, ok := err.(*RequiredNotSetError); ok && test.out != nil && !reflect.DeepEqual(pb, test.out) { - t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v", - i, pb, test.out) - } - } - } -} - -func TestUnmarshalTextCustomMessage(t *testing.T) { - msg := &textMessage{} - if err := UnmarshalText("custom", msg); err != nil { - t.Errorf("Unexpected error from custom unmarshal: %v", err) - } - if UnmarshalText("not custom", msg) == nil { - t.Errorf("Didn't get expected error from custom unmarshal") - } -} - -// Regression test; this caused a panic. -func TestRepeatedEnum(t *testing.T) { - pb := new(RepeatedEnum) - if err := UnmarshalText("color: RED", pb); err != nil { - t.Fatal(err) - } - exp := &RepeatedEnum{ - Color: []RepeatedEnum_Color{RepeatedEnum_RED}, - } - if !Equal(pb, exp) { - t.Errorf("Incorrect populated \nHave: %v\nWant: %v", pb, exp) - } -} - -func TestProto3TextParsing(t *testing.T) { - m := new(proto3pb.Message) - const in = `name: "Wallace" true_scotsman: true` - want := &proto3pb.Message{ - Name: "Wallace", - TrueScotsman: true, - } - if err := UnmarshalText(in, m); err != nil { - t.Fatal(err) - } - if !Equal(m, want) { - t.Errorf("\n got %v\nwant %v", m, want) - } -} - -func TestMapParsing(t *testing.T) { - m := new(MessageWithMap) - const in = `name_mapping: name_mapping:` + - `msg_mapping:>` + - `msg_mapping>` + // no colon after "value" - `byte_mapping:` - want := &MessageWithMap{ - NameMapping: map[int32]string{ - 1: "Beatles", - 1234: "Feist", - }, - MsgMapping: map[int64]*FloatingPoint{ - -4: {F: Float64(2.0)}, - -2: {F: Float64(4.0)}, - }, - ByteMapping: map[bool][]byte{ - true: []byte("so be it"), - }, - } - if err := UnmarshalText(in, m); err != nil { - t.Fatal(err) - } - if !Equal(m, want) { - t.Errorf("\n got %v\nwant %v", m, want) - } -} - -var benchInput string - -func init() { - benchInput = "count: 4\n" - for i := 0; i < 1000; i++ { - benchInput += "pet: \"fido\"\n" - } - - // Check it is valid input. - pb := new(MyMessage) - err := UnmarshalText(benchInput, pb) - if err != nil { - panic("Bad benchmark input: " + err.Error()) - } -} - -func BenchmarkUnmarshalText(b *testing.B) { - pb := new(MyMessage) - for i := 0; i < b.N; i++ { - UnmarshalText(benchInput, pb) - } - b.SetBytes(int64(len(benchInput))) -} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_test.go deleted file mode 100644 index 03e9a98ce9..0000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_test.go +++ /dev/null @@ -1,436 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "bytes" - "errors" - "io/ioutil" - "math" - "strings" - "testing" - - "github.com/golang/protobuf/proto" - - proto3pb "github.com/golang/protobuf/proto/proto3_proto" - pb "github.com/golang/protobuf/proto/testdata" -) - -// textMessage implements the methods that allow it to marshal and unmarshal -// itself as text. -type textMessage struct { -} - -func (*textMessage) MarshalText() ([]byte, error) { - return []byte("custom"), nil -} - -func (*textMessage) UnmarshalText(bytes []byte) error { - if string(bytes) != "custom" { - return errors.New("expected 'custom'") - } - return nil -} - -func (*textMessage) Reset() {} -func (*textMessage) String() string { return "" } -func (*textMessage) ProtoMessage() {} - -func newTestMessage() *pb.MyMessage { - msg := &pb.MyMessage{ - Count: proto.Int32(42), - Name: proto.String("Dave"), - Quote: proto.String(`"I didn't want to go."`), - Pet: []string{"bunny", "kitty", "horsey"}, - Inner: &pb.InnerMessage{ - Host: proto.String("footrest.syd"), - Port: proto.Int32(7001), - Connected: proto.Bool(true), - }, - Others: []*pb.OtherMessage{ - { - Key: proto.Int64(0xdeadbeef), - Value: []byte{1, 65, 7, 12}, - }, - { - Weight: proto.Float32(6.022), - Inner: &pb.InnerMessage{ - Host: proto.String("lesha.mtv"), - Port: proto.Int32(8002), - }, - }, - }, - Bikeshed: pb.MyMessage_BLUE.Enum(), - Somegroup: &pb.MyMessage_SomeGroup{ - GroupField: proto.Int32(8), - }, - // One normally wouldn't do this. - // This is an undeclared tag 13, as a varint (wire type 0) with value 4. - XXX_unrecognized: []byte{13<<3 | 0, 4}, - } - ext := &pb.Ext{ - Data: proto.String("Big gobs for big rats"), - } - if err := proto.SetExtension(msg, pb.E_Ext_More, ext); err != nil { - panic(err) - } - greetings := []string{"adg", "easy", "cow"} - if err := proto.SetExtension(msg, pb.E_Greeting, greetings); err != nil { - panic(err) - } - - // Add an unknown extension. We marshal a pb.Ext, and fake the ID. - b, err := proto.Marshal(&pb.Ext{Data: proto.String("3G skiing")}) - if err != nil { - panic(err) - } - b = append(proto.EncodeVarint(201<<3|proto.WireBytes), b...) - proto.SetRawExtension(msg, 201, b) - - // Extensions can be plain fields, too, so let's test that. - b = append(proto.EncodeVarint(202<<3|proto.WireVarint), 19) - proto.SetRawExtension(msg, 202, b) - - return msg -} - -const text = `count: 42 -name: "Dave" -quote: "\"I didn't want to go.\"" -pet: "bunny" -pet: "kitty" -pet: "horsey" -inner: < - host: "footrest.syd" - port: 7001 - connected: true -> -others: < - key: 3735928559 - value: "\001A\007\014" -> -others: < - weight: 6.022 - inner: < - host: "lesha.mtv" - port: 8002 - > -> -bikeshed: BLUE -SomeGroup { - group_field: 8 -} -/* 2 unknown bytes */ -13: 4 -[testdata.Ext.more]: < - data: "Big gobs for big rats" -> -[testdata.greeting]: "adg" -[testdata.greeting]: "easy" -[testdata.greeting]: "cow" -/* 13 unknown bytes */ -201: "\t3G skiing" -/* 3 unknown bytes */ -202: 19 -` - -func TestMarshalText(t *testing.T) { - buf := new(bytes.Buffer) - if err := proto.MarshalText(buf, newTestMessage()); err != nil { - t.Fatalf("proto.MarshalText: %v", err) - } - s := buf.String() - if s != text { - t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", s, text) - } -} - -func TestMarshalTextCustomMessage(t *testing.T) { - buf := new(bytes.Buffer) - if err := proto.MarshalText(buf, &textMessage{}); err != nil { - t.Fatalf("proto.MarshalText: %v", err) - } - s := buf.String() - if s != "custom" { - t.Errorf("Got %q, expected %q", s, "custom") - } -} -func TestMarshalTextNil(t *testing.T) { - want := "" - tests := []proto.Message{nil, (*pb.MyMessage)(nil)} - for i, test := range tests { - buf := new(bytes.Buffer) - if err := proto.MarshalText(buf, test); err != nil { - t.Fatal(err) - } - if got := buf.String(); got != want { - t.Errorf("%d: got %q want %q", i, got, want) - } - } -} - -func TestMarshalTextUnknownEnum(t *testing.T) { - // The Color enum only specifies values 0-2. - m := &pb.MyMessage{Bikeshed: pb.MyMessage_Color(3).Enum()} - got := m.String() - const want = `bikeshed:3 ` - if got != want { - t.Errorf("\n got %q\nwant %q", got, want) - } -} - -func BenchmarkMarshalTextBuffered(b *testing.B) { - buf := new(bytes.Buffer) - m := newTestMessage() - for i := 0; i < b.N; i++ { - buf.Reset() - proto.MarshalText(buf, m) - } -} - -func BenchmarkMarshalTextUnbuffered(b *testing.B) { - w := ioutil.Discard - m := newTestMessage() - for i := 0; i < b.N; i++ { - proto.MarshalText(w, m) - } -} - -func compact(src string) string { - // s/[ \n]+/ /g; s/ $//; - dst := make([]byte, len(src)) - space, comment := false, false - j := 0 - for i := 0; i < len(src); i++ { - if strings.HasPrefix(src[i:], "/*") { - comment = true - i++ - continue - } - if comment && strings.HasPrefix(src[i:], "*/") { - comment = false - i++ - continue - } - if comment { - continue - } - c := src[i] - if c == ' ' || c == '\n' { - space = true - continue - } - if j > 0 && (dst[j-1] == ':' || dst[j-1] == '<' || dst[j-1] == '{') { - space = false - } - if c == '{' { - space = false - } - if space { - dst[j] = ' ' - j++ - space = false - } - dst[j] = c - j++ - } - if space { - dst[j] = ' ' - j++ - } - return string(dst[0:j]) -} - -var compactText = compact(text) - -func TestCompactText(t *testing.T) { - s := proto.CompactTextString(newTestMessage()) - if s != compactText { - t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v\n===\n", s, compactText) - } -} - -func TestStringEscaping(t *testing.T) { - testCases := []struct { - in *pb.Strings - out string - }{ - { - // Test data from C++ test (TextFormatTest.StringEscape). - // Single divergence: we don't escape apostrophes. - &pb.Strings{StringField: proto.String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces")}, - "string_field: \"\\\"A string with ' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"\n", - }, - { - // Test data from the same C++ test. - &pb.Strings{StringField: proto.String("\350\260\267\346\255\214")}, - "string_field: \"\\350\\260\\267\\346\\255\\214\"\n", - }, - { - // Some UTF-8. - &pb.Strings{StringField: proto.String("\x00\x01\xff\x81")}, - `string_field: "\000\001\377\201"` + "\n", - }, - } - - for i, tc := range testCases { - var buf bytes.Buffer - if err := proto.MarshalText(&buf, tc.in); err != nil { - t.Errorf("proto.MarsalText: %v", err) - continue - } - s := buf.String() - if s != tc.out { - t.Errorf("#%d: Got:\n%s\nExpected:\n%s\n", i, s, tc.out) - continue - } - - // Check round-trip. - pb := new(pb.Strings) - if err := proto.UnmarshalText(s, pb); err != nil { - t.Errorf("#%d: UnmarshalText: %v", i, err) - continue - } - if !proto.Equal(pb, tc.in) { - t.Errorf("#%d: Round-trip failed:\nstart: %v\n end: %v", i, tc.in, pb) - } - } -} - -// A limitedWriter accepts some output before it fails. -// This is a proxy for something like a nearly-full or imminently-failing disk, -// or a network connection that is about to die. -type limitedWriter struct { - b bytes.Buffer - limit int -} - -var outOfSpace = errors.New("proto: insufficient space") - -func (w *limitedWriter) Write(p []byte) (n int, err error) { - var avail = w.limit - w.b.Len() - if avail <= 0 { - return 0, outOfSpace - } - if len(p) <= avail { - return w.b.Write(p) - } - n, _ = w.b.Write(p[:avail]) - return n, outOfSpace -} - -func TestMarshalTextFailing(t *testing.T) { - // Try lots of different sizes to exercise more error code-paths. - for lim := 0; lim < len(text); lim++ { - buf := new(limitedWriter) - buf.limit = lim - err := proto.MarshalText(buf, newTestMessage()) - // We expect a certain error, but also some partial results in the buffer. - if err != outOfSpace { - t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", err, outOfSpace) - } - s := buf.b.String() - x := text[:buf.limit] - if s != x { - t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", s, x) - } - } -} - -func TestFloats(t *testing.T) { - tests := []struct { - f float64 - want string - }{ - {0, "0"}, - {4.7, "4.7"}, - {math.Inf(1), "inf"}, - {math.Inf(-1), "-inf"}, - {math.NaN(), "nan"}, - } - for _, test := range tests { - msg := &pb.FloatingPoint{F: &test.f} - got := strings.TrimSpace(msg.String()) - want := `f:` + test.want - if got != want { - t.Errorf("f=%f: got %q, want %q", test.f, got, want) - } - } -} - -func TestRepeatedNilText(t *testing.T) { - m := &pb.MessageList{ - Message: []*pb.MessageList_Message{ - nil, - &pb.MessageList_Message{ - Name: proto.String("Horse"), - }, - nil, - }, - } - want := `Message -Message { - name: "Horse" -} -Message -` - if s := proto.MarshalTextString(m); s != want { - t.Errorf(" got: %s\nwant: %s", s, want) - } -} - -func TestProto3Text(t *testing.T) { - tests := []struct { - m proto.Message - want string - }{ - // zero message - {&proto3pb.Message{}, ``}, - // zero message except for an empty byte slice - {&proto3pb.Message{Data: []byte{}}, ``}, - // trivial case - {&proto3pb.Message{Name: "Rob", HeightInCm: 175}, `name:"Rob" height_in_cm:175`}, - // empty map - {&pb.MessageWithMap{}, ``}, - // non-empty map; current map format is the same as a repeated struct - { - &pb.MessageWithMap{NameMapping: map[int32]string{1234: "Feist"}}, - `name_mapping:`, - }, - } - for _, test := range tests { - got := strings.TrimSpace(test.m.String()) - if got != test.want { - t.Errorf("\n got %s\nwant %s", got, test.want) - } - } -} diff --git a/Godeps/_workspace/src/golang.org/x/net/context/context.go b/Godeps/_workspace/src/golang.org/x/net/context/context.go index 66aff7cb4a..ef2f3e86fe 100644 --- a/Godeps/_workspace/src/golang.org/x/net/context/context.go +++ b/Godeps/_workspace/src/golang.org/x/net/context/context.go @@ -64,18 +64,21 @@ type Context interface { // // Done is provided for use in select statements: // - // // DoSomething calls DoSomethingSlow and returns as soon as - // // it returns or ctx.Done is closed. - // func DoSomething(ctx context.Context) (Result, error) { - // c := make(chan Result, 1) - // go func() { c <- DoSomethingSlow(ctx) }() - // select { - // case res := <-c: - // return res, nil - // case <-ctx.Done(): - // return nil, ctx.Err() - // } - // } + // // Stream generates values with DoSomething and sends them to out + // // until DoSomething returns an error or ctx.Done is closed. + // func Stream(ctx context.Context, out <-chan Value) error { + // for { + // v, err := DoSomething(ctx) + // if err != nil { + // return err + // } + // select { + // case <-ctx.Done(): + // return ctx.Err() + // case out <- v: + // } + // } + // } // // See http://blog.golang.org/pipelines for more examples of how to use // a Done channel for cancelation. @@ -202,6 +205,9 @@ type CancelFunc func() // WithCancel returns a copy of parent with a new Done channel. The returned // context's Done channel is closed when the returned cancel function is called // or when the parent context's Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { c := newCancelCtx(parent) propagateCancel(parent, &c) @@ -262,6 +268,19 @@ func parentCancelCtx(parent Context) (*cancelCtx, bool) { } } +// removeChild removes a context from its parent. +func removeChild(parent Context, child canceler) { + p, ok := parentCancelCtx(parent) + if !ok { + return + } + p.mu.Lock() + if p.children != nil { + delete(p.children, child) + } + p.mu.Unlock() +} + // A canceler is a context type that can be canceled directly. The // implementations are *cancelCtx and *timerCtx. type canceler interface { @@ -316,13 +335,7 @@ func (c *cancelCtx) cancel(removeFromParent bool, err error) { c.mu.Unlock() if removeFromParent { - if p, ok := parentCancelCtx(c.Context); ok { - p.mu.Lock() - if p.children != nil { - delete(p.children, c) - } - p.mu.Unlock() - } + removeChild(c.Context, c) } } @@ -333,9 +346,8 @@ func (c *cancelCtx) cancel(removeFromParent bool, err error) { // cancel function is called, or when the parent context's Done channel is // closed, whichever happens first. // -// Canceling this context releases resources associated with the deadline -// timer, so code should call cancel as soon as the operations running in this -// Context complete. +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { // The current deadline is already sooner than the new one. @@ -380,7 +392,11 @@ func (c *timerCtx) String() string { } func (c *timerCtx) cancel(removeFromParent bool, err error) { - c.cancelCtx.cancel(removeFromParent, err) + c.cancelCtx.cancel(false, err) + if removeFromParent { + // Remove this timerCtx from its parent cancelCtx's children. + removeChild(c.cancelCtx.Context, c) + } c.mu.Lock() if c.timer != nil { c.timer.Stop() @@ -391,9 +407,8 @@ func (c *timerCtx) cancel(removeFromParent bool, err error) { // WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). // -// Canceling this context releases resources associated with the deadline -// timer, so code should call cancel as soon as the operations running in this -// Context complete: +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete: // // func slowOperationWithTimeout(ctx context.Context) (Result, error) { // ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) diff --git a/Godeps/_workspace/src/golang.org/x/net/context/context_test.go b/Godeps/_workspace/src/golang.org/x/net/context/context_test.go deleted file mode 100644 index 82d2494a49..0000000000 --- a/Godeps/_workspace/src/golang.org/x/net/context/context_test.go +++ /dev/null @@ -1,553 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package context - -import ( - "fmt" - "math/rand" - "runtime" - "strings" - "sync" - "testing" - "time" -) - -// otherContext is a Context that's not one of the types defined in context.go. -// This lets us test code paths that differ based on the underlying type of the -// Context. -type otherContext struct { - Context -} - -func TestBackground(t *testing.T) { - c := Background() - if c == nil { - t.Fatalf("Background returned nil") - } - select { - case x := <-c.Done(): - t.Errorf("<-c.Done() == %v want nothing (it should block)", x) - default: - } - if got, want := fmt.Sprint(c), "context.Background"; got != want { - t.Errorf("Background().String() = %q want %q", got, want) - } -} - -func TestTODO(t *testing.T) { - c := TODO() - if c == nil { - t.Fatalf("TODO returned nil") - } - select { - case x := <-c.Done(): - t.Errorf("<-c.Done() == %v want nothing (it should block)", x) - default: - } - if got, want := fmt.Sprint(c), "context.TODO"; got != want { - t.Errorf("TODO().String() = %q want %q", got, want) - } -} - -func TestWithCancel(t *testing.T) { - c1, cancel := WithCancel(Background()) - - if got, want := fmt.Sprint(c1), "context.Background.WithCancel"; got != want { - t.Errorf("c1.String() = %q want %q", got, want) - } - - o := otherContext{c1} - c2, _ := WithCancel(o) - contexts := []Context{c1, o, c2} - - for i, c := range contexts { - if d := c.Done(); d == nil { - t.Errorf("c[%d].Done() == %v want non-nil", i, d) - } - if e := c.Err(); e != nil { - t.Errorf("c[%d].Err() == %v want nil", i, e) - } - - select { - case x := <-c.Done(): - t.Errorf("<-c.Done() == %v want nothing (it should block)", x) - default: - } - } - - cancel() - time.Sleep(100 * time.Millisecond) // let cancelation propagate - - for i, c := range contexts { - select { - case <-c.Done(): - default: - t.Errorf("<-c[%d].Done() blocked, but shouldn't have", i) - } - if e := c.Err(); e != Canceled { - t.Errorf("c[%d].Err() == %v want %v", i, e, Canceled) - } - } -} - -func TestParentFinishesChild(t *testing.T) { - // Context tree: - // parent -> cancelChild - // parent -> valueChild -> timerChild - parent, cancel := WithCancel(Background()) - cancelChild, stop := WithCancel(parent) - defer stop() - valueChild := WithValue(parent, "key", "value") - timerChild, stop := WithTimeout(valueChild, 10000*time.Hour) - defer stop() - - select { - case x := <-parent.Done(): - t.Errorf("<-parent.Done() == %v want nothing (it should block)", x) - case x := <-cancelChild.Done(): - t.Errorf("<-cancelChild.Done() == %v want nothing (it should block)", x) - case x := <-timerChild.Done(): - t.Errorf("<-timerChild.Done() == %v want nothing (it should block)", x) - case x := <-valueChild.Done(): - t.Errorf("<-valueChild.Done() == %v want nothing (it should block)", x) - default: - } - - // The parent's children should contain the two cancelable children. - pc := parent.(*cancelCtx) - cc := cancelChild.(*cancelCtx) - tc := timerChild.(*timerCtx) - pc.mu.Lock() - if len(pc.children) != 2 || !pc.children[cc] || !pc.children[tc] { - t.Errorf("bad linkage: pc.children = %v, want %v and %v", - pc.children, cc, tc) - } - pc.mu.Unlock() - - if p, ok := parentCancelCtx(cc.Context); !ok || p != pc { - t.Errorf("bad linkage: parentCancelCtx(cancelChild.Context) = %v, %v want %v, true", p, ok, pc) - } - if p, ok := parentCancelCtx(tc.Context); !ok || p != pc { - t.Errorf("bad linkage: parentCancelCtx(timerChild.Context) = %v, %v want %v, true", p, ok, pc) - } - - cancel() - - pc.mu.Lock() - if len(pc.children) != 0 { - t.Errorf("pc.cancel didn't clear pc.children = %v", pc.children) - } - pc.mu.Unlock() - - // parent and children should all be finished. - check := func(ctx Context, name string) { - select { - case <-ctx.Done(): - default: - t.Errorf("<-%s.Done() blocked, but shouldn't have", name) - } - if e := ctx.Err(); e != Canceled { - t.Errorf("%s.Err() == %v want %v", name, e, Canceled) - } - } - check(parent, "parent") - check(cancelChild, "cancelChild") - check(valueChild, "valueChild") - check(timerChild, "timerChild") - - // WithCancel should return a canceled context on a canceled parent. - precanceledChild := WithValue(parent, "key", "value") - select { - case <-precanceledChild.Done(): - default: - t.Errorf("<-precanceledChild.Done() blocked, but shouldn't have") - } - if e := precanceledChild.Err(); e != Canceled { - t.Errorf("precanceledChild.Err() == %v want %v", e, Canceled) - } -} - -func TestChildFinishesFirst(t *testing.T) { - cancelable, stop := WithCancel(Background()) - defer stop() - for _, parent := range []Context{Background(), cancelable} { - child, cancel := WithCancel(parent) - - select { - case x := <-parent.Done(): - t.Errorf("<-parent.Done() == %v want nothing (it should block)", x) - case x := <-child.Done(): - t.Errorf("<-child.Done() == %v want nothing (it should block)", x) - default: - } - - cc := child.(*cancelCtx) - pc, pcok := parent.(*cancelCtx) // pcok == false when parent == Background() - if p, ok := parentCancelCtx(cc.Context); ok != pcok || (ok && pc != p) { - t.Errorf("bad linkage: parentCancelCtx(cc.Context) = %v, %v want %v, %v", p, ok, pc, pcok) - } - - if pcok { - pc.mu.Lock() - if len(pc.children) != 1 || !pc.children[cc] { - t.Errorf("bad linkage: pc.children = %v, cc = %v", pc.children, cc) - } - pc.mu.Unlock() - } - - cancel() - - if pcok { - pc.mu.Lock() - if len(pc.children) != 0 { - t.Errorf("child's cancel didn't remove self from pc.children = %v", pc.children) - } - pc.mu.Unlock() - } - - // child should be finished. - select { - case <-child.Done(): - default: - t.Errorf("<-child.Done() blocked, but shouldn't have") - } - if e := child.Err(); e != Canceled { - t.Errorf("child.Err() == %v want %v", e, Canceled) - } - - // parent should not be finished. - select { - case x := <-parent.Done(): - t.Errorf("<-parent.Done() == %v want nothing (it should block)", x) - default: - } - if e := parent.Err(); e != nil { - t.Errorf("parent.Err() == %v want nil", e) - } - } -} - -func testDeadline(c Context, wait time.Duration, t *testing.T) { - select { - case <-time.After(wait): - t.Fatalf("context should have timed out") - case <-c.Done(): - } - if e := c.Err(); e != DeadlineExceeded { - t.Errorf("c.Err() == %v want %v", e, DeadlineExceeded) - } -} - -func TestDeadline(t *testing.T) { - c, _ := WithDeadline(Background(), time.Now().Add(100*time.Millisecond)) - if got, prefix := fmt.Sprint(c), "context.Background.WithDeadline("; !strings.HasPrefix(got, prefix) { - t.Errorf("c.String() = %q want prefix %q", got, prefix) - } - testDeadline(c, 200*time.Millisecond, t) - - c, _ = WithDeadline(Background(), time.Now().Add(100*time.Millisecond)) - o := otherContext{c} - testDeadline(o, 200*time.Millisecond, t) - - c, _ = WithDeadline(Background(), time.Now().Add(100*time.Millisecond)) - o = otherContext{c} - c, _ = WithDeadline(o, time.Now().Add(300*time.Millisecond)) - testDeadline(c, 200*time.Millisecond, t) -} - -func TestTimeout(t *testing.T) { - c, _ := WithTimeout(Background(), 100*time.Millisecond) - if got, prefix := fmt.Sprint(c), "context.Background.WithDeadline("; !strings.HasPrefix(got, prefix) { - t.Errorf("c.String() = %q want prefix %q", got, prefix) - } - testDeadline(c, 200*time.Millisecond, t) - - c, _ = WithTimeout(Background(), 100*time.Millisecond) - o := otherContext{c} - testDeadline(o, 200*time.Millisecond, t) - - c, _ = WithTimeout(Background(), 100*time.Millisecond) - o = otherContext{c} - c, _ = WithTimeout(o, 300*time.Millisecond) - testDeadline(c, 200*time.Millisecond, t) -} - -func TestCanceledTimeout(t *testing.T) { - c, _ := WithTimeout(Background(), 200*time.Millisecond) - o := otherContext{c} - c, cancel := WithTimeout(o, 400*time.Millisecond) - cancel() - time.Sleep(100 * time.Millisecond) // let cancelation propagate - select { - case <-c.Done(): - default: - t.Errorf("<-c.Done() blocked, but shouldn't have") - } - if e := c.Err(); e != Canceled { - t.Errorf("c.Err() == %v want %v", e, Canceled) - } -} - -type key1 int -type key2 int - -var k1 = key1(1) -var k2 = key2(1) // same int as k1, different type -var k3 = key2(3) // same type as k2, different int - -func TestValues(t *testing.T) { - check := func(c Context, nm, v1, v2, v3 string) { - if v, ok := c.Value(k1).(string); ok == (len(v1) == 0) || v != v1 { - t.Errorf(`%s.Value(k1).(string) = %q, %t want %q, %t`, nm, v, ok, v1, len(v1) != 0) - } - if v, ok := c.Value(k2).(string); ok == (len(v2) == 0) || v != v2 { - t.Errorf(`%s.Value(k2).(string) = %q, %t want %q, %t`, nm, v, ok, v2, len(v2) != 0) - } - if v, ok := c.Value(k3).(string); ok == (len(v3) == 0) || v != v3 { - t.Errorf(`%s.Value(k3).(string) = %q, %t want %q, %t`, nm, v, ok, v3, len(v3) != 0) - } - } - - c0 := Background() - check(c0, "c0", "", "", "") - - c1 := WithValue(Background(), k1, "c1k1") - check(c1, "c1", "c1k1", "", "") - - if got, want := fmt.Sprint(c1), `context.Background.WithValue(1, "c1k1")`; got != want { - t.Errorf("c.String() = %q want %q", got, want) - } - - c2 := WithValue(c1, k2, "c2k2") - check(c2, "c2", "c1k1", "c2k2", "") - - c3 := WithValue(c2, k3, "c3k3") - check(c3, "c2", "c1k1", "c2k2", "c3k3") - - c4 := WithValue(c3, k1, nil) - check(c4, "c4", "", "c2k2", "c3k3") - - o0 := otherContext{Background()} - check(o0, "o0", "", "", "") - - o1 := otherContext{WithValue(Background(), k1, "c1k1")} - check(o1, "o1", "c1k1", "", "") - - o2 := WithValue(o1, k2, "o2k2") - check(o2, "o2", "c1k1", "o2k2", "") - - o3 := otherContext{c4} - check(o3, "o3", "", "c2k2", "c3k3") - - o4 := WithValue(o3, k3, nil) - check(o4, "o4", "", "c2k2", "") -} - -func TestAllocs(t *testing.T) { - bg := Background() - for _, test := range []struct { - desc string - f func() - limit float64 - gccgoLimit float64 - }{ - { - desc: "Background()", - f: func() { Background() }, - limit: 0, - gccgoLimit: 0, - }, - { - desc: fmt.Sprintf("WithValue(bg, %v, nil)", k1), - f: func() { - c := WithValue(bg, k1, nil) - c.Value(k1) - }, - limit: 3, - gccgoLimit: 3, - }, - { - desc: "WithTimeout(bg, 15*time.Millisecond)", - f: func() { - c, _ := WithTimeout(bg, 15*time.Millisecond) - <-c.Done() - }, - limit: 8, - gccgoLimit: 13, - }, - { - desc: "WithCancel(bg)", - f: func() { - c, cancel := WithCancel(bg) - cancel() - <-c.Done() - }, - limit: 5, - gccgoLimit: 8, - }, - { - desc: "WithTimeout(bg, 100*time.Millisecond)", - f: func() { - c, cancel := WithTimeout(bg, 100*time.Millisecond) - cancel() - <-c.Done() - }, - limit: 8, - gccgoLimit: 25, - }, - } { - limit := test.limit - if runtime.Compiler == "gccgo" { - // gccgo does not yet do escape analysis. - // TOOD(iant): Remove this when gccgo does do escape analysis. - limit = test.gccgoLimit - } - if n := testing.AllocsPerRun(100, test.f); n > limit { - t.Errorf("%s allocs = %f want %d", test.desc, n, int(limit)) - } - } -} - -func TestSimultaneousCancels(t *testing.T) { - root, cancel := WithCancel(Background()) - m := map[Context]CancelFunc{root: cancel} - q := []Context{root} - // Create a tree of contexts. - for len(q) != 0 && len(m) < 100 { - parent := q[0] - q = q[1:] - for i := 0; i < 4; i++ { - ctx, cancel := WithCancel(parent) - m[ctx] = cancel - q = append(q, ctx) - } - } - // Start all the cancels in a random order. - var wg sync.WaitGroup - wg.Add(len(m)) - for _, cancel := range m { - go func(cancel CancelFunc) { - cancel() - wg.Done() - }(cancel) - } - // Wait on all the contexts in a random order. - for ctx := range m { - select { - case <-ctx.Done(): - case <-time.After(1 * time.Second): - buf := make([]byte, 10<<10) - n := runtime.Stack(buf, true) - t.Fatalf("timed out waiting for <-ctx.Done(); stacks:\n%s", buf[:n]) - } - } - // Wait for all the cancel functions to return. - done := make(chan struct{}) - go func() { - wg.Wait() - close(done) - }() - select { - case <-done: - case <-time.After(1 * time.Second): - buf := make([]byte, 10<<10) - n := runtime.Stack(buf, true) - t.Fatalf("timed out waiting for cancel functions; stacks:\n%s", buf[:n]) - } -} - -func TestInterlockedCancels(t *testing.T) { - parent, cancelParent := WithCancel(Background()) - child, cancelChild := WithCancel(parent) - go func() { - parent.Done() - cancelChild() - }() - cancelParent() - select { - case <-child.Done(): - case <-time.After(1 * time.Second): - buf := make([]byte, 10<<10) - n := runtime.Stack(buf, true) - t.Fatalf("timed out waiting for child.Done(); stacks:\n%s", buf[:n]) - } -} - -func TestLayersCancel(t *testing.T) { - testLayers(t, time.Now().UnixNano(), false) -} - -func TestLayersTimeout(t *testing.T) { - testLayers(t, time.Now().UnixNano(), true) -} - -func testLayers(t *testing.T, seed int64, testTimeout bool) { - rand.Seed(seed) - errorf := func(format string, a ...interface{}) { - t.Errorf(fmt.Sprintf("seed=%d: %s", seed, format), a...) - } - const ( - timeout = 200 * time.Millisecond - minLayers = 30 - ) - type value int - var ( - vals []*value - cancels []CancelFunc - numTimers int - ctx = Background() - ) - for i := 0; i < minLayers || numTimers == 0 || len(cancels) == 0 || len(vals) == 0; i++ { - switch rand.Intn(3) { - case 0: - v := new(value) - ctx = WithValue(ctx, v, v) - vals = append(vals, v) - case 1: - var cancel CancelFunc - ctx, cancel = WithCancel(ctx) - cancels = append(cancels, cancel) - case 2: - var cancel CancelFunc - ctx, cancel = WithTimeout(ctx, timeout) - cancels = append(cancels, cancel) - numTimers++ - } - } - checkValues := func(when string) { - for _, key := range vals { - if val := ctx.Value(key).(*value); key != val { - errorf("%s: ctx.Value(%p) = %p want %p", when, key, val, key) - } - } - } - select { - case <-ctx.Done(): - errorf("ctx should not be canceled yet") - default: - } - if s, prefix := fmt.Sprint(ctx), "context.Background."; !strings.HasPrefix(s, prefix) { - t.Errorf("ctx.String() = %q want prefix %q", s, prefix) - } - t.Log(ctx) - checkValues("before cancel") - if testTimeout { - select { - case <-ctx.Done(): - case <-time.After(timeout + timeout/10): - errorf("ctx should have timed out") - } - checkValues("after timeout") - } else { - cancel := cancels[rand.Intn(len(cancels))] - cancel() - select { - case <-ctx.Done(): - default: - errorf("ctx should be canceled") - } - checkValues("after cancel") - } -} diff --git a/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/cancelreq.go b/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/cancelreq.go new file mode 100644 index 0000000000..48610e3627 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/cancelreq.go @@ -0,0 +1,18 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.5 + +package ctxhttp + +import "net/http" + +func canceler(client *http.Client, req *http.Request) func() { + ch := make(chan struct{}) + req.Cancel = ch + + return func() { + close(ch) + } +} diff --git a/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/cancelreq_go14.go b/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/cancelreq_go14.go new file mode 100644 index 0000000000..56bcbadb85 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/cancelreq_go14.go @@ -0,0 +1,23 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.5 + +package ctxhttp + +import "net/http" + +type requestCanceler interface { + CancelRequest(*http.Request) +} + +func canceler(client *http.Client, req *http.Request) func() { + rc, ok := client.Transport.(requestCanceler) + if !ok { + return func() {} + } + return func() { + rc.CancelRequest(req) + } +} diff --git a/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/ctxhttp.go b/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/ctxhttp.go new file mode 100644 index 0000000000..9f34888132 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/ctxhttp.go @@ -0,0 +1,79 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ctxhttp provides helper functions for performing context-aware HTTP requests. +package ctxhttp + +import ( + "io" + "net/http" + "net/url" + "strings" + + "golang.org/x/net/context" +) + +// Do sends an HTTP request with the provided http.Client and returns an HTTP response. +// If the client is nil, http.DefaultClient is used. +// If the context is canceled or times out, ctx.Err() will be returned. +func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { + if client == nil { + client = http.DefaultClient + } + + // Request cancelation changed in Go 1.5, see cancelreq.go and cancelreq_go14.go. + cancel := canceler(client, req) + + type responseAndError struct { + resp *http.Response + err error + } + result := make(chan responseAndError, 1) + + go func() { + resp, err := client.Do(req) + result <- responseAndError{resp, err} + }() + + select { + case <-ctx.Done(): + cancel() + return nil, ctx.Err() + case r := <-result: + return r.resp, r.err + } +} + +// Get issues a GET request via the Do function. +func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) { + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + return Do(ctx, client, req) +} + +// Head issues a HEAD request via the Do function. +func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) { + req, err := http.NewRequest("HEAD", url, nil) + if err != nil { + return nil, err + } + return Do(ctx, client, req) +} + +// Post issues a POST request via the Do function. +func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) { + req, err := http.NewRequest("POST", url, body) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", bodyType) + return Do(ctx, client, req) +} + +// PostForm issues a POST request via the Do function. +func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) { + return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) +} diff --git a/Godeps/_workspace/src/golang.org/x/net/context/withtimeout_test.go b/Godeps/_workspace/src/golang.org/x/net/context/withtimeout_test.go deleted file mode 100644 index a6754dc368..0000000000 --- a/Godeps/_workspace/src/golang.org/x/net/context/withtimeout_test.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package context_test - -import ( - "fmt" - "time" - - "golang.org/x/net/context" -) - -func ExampleWithTimeout() { - // Pass a context with a timeout to tell a blocking function that it - // should abandon its work after the timeout elapses. - ctx, _ := context.WithTimeout(context.Background(), 100*time.Millisecond) - select { - case <-time.After(200 * time.Millisecond): - fmt.Println("overslept") - case <-ctx.Done(): - fmt.Println(ctx.Err()) // prints "context deadline exceeded" - } - // Output: - // context deadline exceeded -} diff --git a/Godeps/_workspace/src/golang.org/x/net/http2/.gitignore b/Godeps/_workspace/src/golang.org/x/net/http2/.gitignore new file mode 100644 index 0000000000..190f12234a --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/net/http2/.gitignore @@ -0,0 +1,2 @@ +*~ +h2i/h2i diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/Dockerfile b/Godeps/_workspace/src/golang.org/x/net/http2/Dockerfile similarity index 100% rename from Godeps/_workspace/src/github.com/bradfitz/http2/Dockerfile rename to Godeps/_workspace/src/golang.org/x/net/http2/Dockerfile diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/Makefile b/Godeps/_workspace/src/golang.org/x/net/http2/Makefile similarity index 100% rename from Godeps/_workspace/src/github.com/bradfitz/http2/Makefile rename to Godeps/_workspace/src/golang.org/x/net/http2/Makefile diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/README b/Godeps/_workspace/src/golang.org/x/net/http2/README similarity index 68% rename from Godeps/_workspace/src/github.com/bradfitz/http2/README rename to Godeps/_workspace/src/golang.org/x/net/http2/README index 16cb516a69..360d5aa379 100644 --- a/Godeps/_workspace/src/github.com/bradfitz/http2/README +++ b/Godeps/_workspace/src/golang.org/x/net/http2/README @@ -10,8 +10,11 @@ Status: * The client work has just started but shares a lot of code is coming along much quicker. -Docs are at https://godoc.org/github.com/bradfitz/http2 +Docs are at https://godoc.org/golang.org/x/net/http2 Demo test server at https://http2.golang.org/ -Help & bug reports welcome. +Help & bug reports welcome! + +Contributing: https://golang.org/doc/contribute.html +Bugs: https://golang.org/issue/new?title=x/net/http2:+ diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/buffer.go b/Godeps/_workspace/src/golang.org/x/net/http2/buffer.go similarity index 89% rename from Godeps/_workspace/src/github.com/bradfitz/http2/buffer.go rename to Godeps/_workspace/src/golang.org/x/net/http2/buffer.go index 48d7561ecf..c43954cf04 100644 --- a/Godeps/_workspace/src/github.com/bradfitz/http2/buffer.go +++ b/Godeps/_workspace/src/golang.org/x/net/http2/buffer.go @@ -19,8 +19,9 @@ type buffer struct { } var ( - errReadEmpty = errors.New("read from empty buffer") - errWriteFull = errors.New("write on full buffer") + errReadEmpty = errors.New("read from empty buffer") + errWriteClosed = errors.New("write on closed buffer") + errWriteFull = errors.New("write on full buffer") ) // Read copies bytes from the buffer into p. @@ -45,7 +46,7 @@ func (b *buffer) Len() int { // It is an error to write more data than the buffer can hold. func (b *buffer) Write(p []byte) (n int, err error) { if b.closed { - return 0, errors.New("closed") + return 0, errWriteClosed } // Slide existing data to beginning. diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/errors.go b/Godeps/_workspace/src/golang.org/x/net/http2/errors.go similarity index 100% rename from Godeps/_workspace/src/github.com/bradfitz/http2/errors.go rename to Godeps/_workspace/src/golang.org/x/net/http2/errors.go diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/flow.go b/Godeps/_workspace/src/golang.org/x/net/http2/flow.go similarity index 100% rename from Godeps/_workspace/src/github.com/bradfitz/http2/flow.go rename to Godeps/_workspace/src/golang.org/x/net/http2/flow.go diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/frame.go b/Godeps/_workspace/src/golang.org/x/net/http2/frame.go similarity index 99% rename from Godeps/_workspace/src/github.com/bradfitz/http2/frame.go rename to Godeps/_workspace/src/golang.org/x/net/http2/frame.go index 94c5e44ae5..e8b872a19b 100644 --- a/Godeps/_workspace/src/github.com/bradfitz/http2/frame.go +++ b/Godeps/_workspace/src/golang.org/x/net/http2/frame.go @@ -85,8 +85,8 @@ const ( // Continuation Frame FlagContinuationEndHeaders Flags = 0x4 - FlagPushPromiseEndHeaders = 0x4 - FlagPushPromisePadded = 0x8 + FlagPushPromiseEndHeaders Flags = 0x4 + FlagPushPromisePadded Flags = 0x8 ) var flagName = map[FrameType]map[Flags]string{ diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/gotrack.go b/Godeps/_workspace/src/golang.org/x/net/http2/gotrack.go similarity index 96% rename from Godeps/_workspace/src/github.com/bradfitz/http2/gotrack.go rename to Godeps/_workspace/src/golang.org/x/net/http2/gotrack.go index 6e1a3823ca..7dc2ef90db 100644 --- a/Godeps/_workspace/src/github.com/bradfitz/http2/gotrack.go +++ b/Godeps/_workspace/src/golang.org/x/net/http2/gotrack.go @@ -14,16 +14,20 @@ import ( "bytes" "errors" "fmt" + "os" "runtime" "strconv" "sync" ) -var DebugGoroutines = false +var DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1" type goroutineLock uint64 func newGoroutineLock() goroutineLock { + if !DebugGoroutines { + return 0 + } return goroutineLock(curGoroutineID()) } diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/.gitignore b/Godeps/_workspace/src/golang.org/x/net/http2/h2demo/.gitignore similarity index 100% rename from Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/.gitignore rename to Godeps/_workspace/src/golang.org/x/net/http2/h2demo/.gitignore diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/Makefile b/Godeps/_workspace/src/golang.org/x/net/http2/h2demo/Makefile similarity index 100% rename from Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/Makefile rename to Godeps/_workspace/src/golang.org/x/net/http2/h2demo/Makefile diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/README b/Godeps/_workspace/src/golang.org/x/net/http2/h2demo/README similarity index 100% rename from Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/README rename to Godeps/_workspace/src/golang.org/x/net/http2/h2demo/README diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/h2demo.go b/Godeps/_workspace/src/golang.org/x/net/http2/h2demo/h2demo.go similarity index 93% rename from Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/h2demo.go rename to Godeps/_workspace/src/golang.org/x/net/http2/h2demo/h2demo.go index de6caaa749..50480449d9 100644 --- a/Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/h2demo.go +++ b/Godeps/_workspace/src/golang.org/x/net/http2/h2demo/h2demo.go @@ -31,7 +31,7 @@ import ( "camlistore.org/pkg/googlestorage" "camlistore.org/pkg/singleflight" - "github.com/bradfitz/http2" + "golang.org/x/net/http2" ) var ( @@ -51,7 +51,7 @@ func homeOldHTTP(w http.ResponseWriter, r *http.Request) {
  • Use Firefox Nightly or go to about:config and enable "network.http.spdy.enabled.http2draft"
  • Use Google Chrome Canary and/or go to chrome://flags/#enable-spdy4 to Enable SPDY/4 (Chrome's name for HTTP/2)
  • -

    See code & instructions for connecting at https://github.com/bradfitz/http2.

    +

    See code & instructions for connecting at https://github.com/golang/net/tree/master/http2.

    `) } @@ -73,12 +73,12 @@ href="https://http2.github.io/">HTTP/2 demo & interop server.

    This server exists for others in the HTTP/2 community to test their HTTP/2 client implementations and point out flaws in our server.

    The code is currently at github.com/bradfitz/http2 +href="https://golang.org/x/net/http2">golang.org/x/net/http2 but will move to the Go standard library at some point in the future (enabled by default, without users needing to change their code).

    Contact info: bradfitz@golang.org, or file a bug.

    +href="https://golang.org/issues">file a bug.

    Handlers for testing

      @@ -287,7 +287,7 @@ func newGopherTilesHandler() http.Handler { return } } - io.WriteString(w, "") + io.WriteString(w, "") fmt.Fprintf(w, "A grid of %d tiled images is below. Compare:

      ", xt*yt) for _, ms := range []int{0, 30, 200, 1000} { d := time.Duration(ms) * nanosPerMilli @@ -305,7 +305,16 @@ func newGopherTilesHandler() http.Handler { } io.WriteString(w, "
      \n") } - io.WriteString(w, "


      << Back to Go HTTP/2 demo server") + io.WriteString(w, `

      + +
      << Back to Go HTTP/2 demo server`) }) } diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/launch.go b/Godeps/_workspace/src/golang.org/x/net/http2/h2demo/launch.go similarity index 86% rename from Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/launch.go rename to Godeps/_workspace/src/golang.org/x/net/http2/h2demo/launch.go index 2eb709bdd3..746661543e 100644 --- a/Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/launch.go +++ b/Godeps/_workspace/src/golang.org/x/net/http2/h2demo/launch.go @@ -20,8 +20,9 @@ import ( "strings" "time" - "code.google.com/p/goauth2/oauth" - compute "code.google.com/p/google-api-go-client/compute/v1" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + compute "google.golang.org/api/compute/v1" ) var ( @@ -44,19 +45,18 @@ func readFile(v string) string { return strings.TrimSpace(string(slurp)) } -var config = &oauth.Config{ +var config = &oauth2.Config{ // The client-id and secret should be for an "Installed Application" when using // the CLI. Later we'll use a web application with a callback. - ClientId: readFile("client-id.dat"), + ClientID: readFile("client-id.dat"), ClientSecret: readFile("client-secret.dat"), - Scope: strings.Join([]string{ + Endpoint: google.Endpoint, + Scopes: []string{ compute.DevstorageFull_controlScope, compute.ComputeScope, "https://www.googleapis.com/auth/sqlservice", "https://www.googleapis.com/auth/sqlservice.admin", - }, " "), - AuthURL: "https://accounts.google.com/o/oauth2/auth", - TokenURL: "https://accounts.google.com/o/oauth2/token", + }, RedirectURL: "urn:ietf:wg:oauth:2.0:oob", } @@ -88,31 +88,32 @@ func main() { prefix := "https://www.googleapis.com/compute/v1/projects/" + *proj machType := prefix + "/zones/" + *zone + "/machineTypes/" + *mach - tr := &oauth.Transport{ - Config: config, - } - - tokenCache := oauth.CacheFile("token.dat") - token, err := tokenCache.Token() + const tokenFileName = "token.dat" + tokenFile := tokenCacheFile(tokenFileName) + tokenSource := oauth2.ReuseTokenSource(nil, tokenFile) + token, err := tokenSource.Token() if err != nil { if *writeObject != "" { log.Fatalf("Can't use --write_object without a valid token.dat file already cached.") } - log.Printf("Error getting token from %s: %v", string(tokenCache), err) + log.Printf("Error getting token from %s: %v", tokenFileName, err) log.Printf("Get auth code from %v", config.AuthCodeURL("my-state")) fmt.Print("\nEnter auth code: ") sc := bufio.NewScanner(os.Stdin) sc.Scan() authCode := strings.TrimSpace(sc.Text()) - token, err = tr.Exchange(authCode) + token, err = config.Exchange(oauth2.NoContext, authCode) if err != nil { log.Fatalf("Error exchanging auth code for a token: %v", err) } - tokenCache.PutToken(token) + if err := tokenFile.WriteToken(token); err != nil { + log.Fatalf("Error writing to %s: %v", tokenFileName, err) + } + tokenSource = oauth2.ReuseTokenSource(token, nil) } - tr.Token = token - oauthClient := &http.Client{Transport: tr} + oauthClient := oauth2.NewClient(oauth2.NoContext, tokenSource) + if *writeObject != "" { writeCloudStorageObject(oauthClient) return @@ -277,3 +278,25 @@ func writeCloudStorageObject(httpClient *http.Client) { log.Printf("Success.") os.Exit(0) } + +type tokenCacheFile string + +func (f tokenCacheFile) Token() (*oauth2.Token, error) { + slurp, err := ioutil.ReadFile(string(f)) + if err != nil { + return nil, err + } + t := new(oauth2.Token) + if err := json.Unmarshal(slurp, t); err != nil { + return nil, err + } + return t, nil +} + +func (f tokenCacheFile) WriteToken(t *oauth2.Token) error { + jt, err := json.Marshal(t) + if err != nil { + return err + } + return ioutil.WriteFile(string(f), jt, 0600) +} diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/rootCA.key b/Godeps/_workspace/src/golang.org/x/net/http2/h2demo/rootCA.key similarity index 100% rename from Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/rootCA.key rename to Godeps/_workspace/src/golang.org/x/net/http2/h2demo/rootCA.key diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/rootCA.pem b/Godeps/_workspace/src/golang.org/x/net/http2/h2demo/rootCA.pem similarity index 100% rename from Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/rootCA.pem rename to Godeps/_workspace/src/golang.org/x/net/http2/h2demo/rootCA.pem diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/rootCA.srl b/Godeps/_workspace/src/golang.org/x/net/http2/h2demo/rootCA.srl similarity index 100% rename from Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/rootCA.srl rename to Godeps/_workspace/src/golang.org/x/net/http2/h2demo/rootCA.srl diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/server.crt b/Godeps/_workspace/src/golang.org/x/net/http2/h2demo/server.crt similarity index 100% rename from Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/server.crt rename to Godeps/_workspace/src/golang.org/x/net/http2/h2demo/server.crt diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/server.key b/Godeps/_workspace/src/golang.org/x/net/http2/h2demo/server.key similarity index 100% rename from Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/server.key rename to Godeps/_workspace/src/golang.org/x/net/http2/h2demo/server.key diff --git a/Godeps/_workspace/src/golang.org/x/net/http2/h2i/README.md b/Godeps/_workspace/src/golang.org/x/net/http2/h2i/README.md new file mode 100644 index 0000000000..fb5c5efb0f --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/net/http2/h2i/README.md @@ -0,0 +1,97 @@ +# h2i + +**h2i** is an interactive HTTP/2 ("h2") console debugger. Miss the good ol' +days of telnetting to your HTTP/1.n servers? We're bringing you +back. + +Features: +- send raw HTTP/2 frames + - PING + - SETTINGS + - HEADERS + - etc +- type in HTTP/1.n and have it auto-HPACK/frame-ify it for HTTP/2 +- pretty print all received HTTP/2 frames from the peer (including HPACK decoding) +- tab completion of commands, options + +Not yet features, but soon: +- unnecessary CONTINUATION frames on short boundaries, to test peer implementations +- request bodies (DATA frames) +- send invalid frames for testing server implementations (supported by underlying Framer) + +Later: +- act like a server + +## Installation + +``` +$ go get golang.org/x/net/http2/h2i +$ h2i +``` + +## Demo + +``` +$ h2i +Usage: h2i + + -insecure + Whether to skip TLS cert validation + -nextproto string + Comma-separated list of NPN/ALPN protocol names to negotiate. (default "h2,h2-14") + +$ h2i google.com +Connecting to google.com:443 ... +Connected to 74.125.224.41:443 +Negotiated protocol "h2-14" +[FrameHeader SETTINGS len=18] + [MAX_CONCURRENT_STREAMS = 100] + [INITIAL_WINDOW_SIZE = 1048576] + [MAX_FRAME_SIZE = 16384] +[FrameHeader WINDOW_UPDATE len=4] + Window-Increment = 983041 + +h2i> PING h2iSayHI +[FrameHeader PING flags=ACK len=8] + Data = "h2iSayHI" +h2i> headers +(as HTTP/1.1)> GET / HTTP/1.1 +(as HTTP/1.1)> Host: ip.appspot.com +(as HTTP/1.1)> User-Agent: h2i/brad-n-blake +(as HTTP/1.1)> +Opening Stream-ID 1: + :authority = ip.appspot.com + :method = GET + :path = / + :scheme = https + user-agent = h2i/brad-n-blake +[FrameHeader HEADERS flags=END_HEADERS stream=1 len=77] + :status = "200" + alternate-protocol = "443:quic,p=1" + content-length = "15" + content-type = "text/html" + date = "Fri, 01 May 2015 23:06:56 GMT" + server = "Google Frontend" +[FrameHeader DATA flags=END_STREAM stream=1 len=15] + "173.164.155.78\n" +[FrameHeader PING len=8] + Data = "\x00\x00\x00\x00\x00\x00\x00\x00" +h2i> ping +[FrameHeader PING flags=ACK len=8] + Data = "h2i_ping" +h2i> ping +[FrameHeader PING flags=ACK len=8] + Data = "h2i_ping" +h2i> ping +[FrameHeader GOAWAY len=22] + Last-Stream-ID = 1; Error-Code = PROTOCOL_ERROR (1) + +ReadFrame: EOF +``` + +## Status + +Quick few hour hack. So much yet to do. Feel free to file issues for +bugs or wishlist items, but [@bmizerany](https://github.com/bmizerany/) +and I aren't yet accepting pull requests until things settle down. + diff --git a/Godeps/_workspace/src/golang.org/x/net/http2/h2i/h2i.go b/Godeps/_workspace/src/golang.org/x/net/http2/h2i/h2i.go new file mode 100644 index 0000000000..73efe86011 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/net/http2/h2i/h2i.go @@ -0,0 +1,489 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// See https://code.google.com/p/go/source/browse/CONTRIBUTORS +// Licensed under the same terms as Go itself: +// https://code.google.com/p/go/source/browse/LICENSE + +/* +The h2i command is an interactive HTTP/2 console. + +Usage: + $ h2i [flags] + +Interactive commands in the console: (all parts case-insensitive) + + ping [data] + settings ack + settings FOO=n BAR=z + headers (open a new stream by typing HTTP/1.1) +*/ +package main + +import ( + "bufio" + "bytes" + "crypto/tls" + "errors" + "flag" + "fmt" + "io" + "log" + "net" + "net/http" + "os" + "regexp" + "strconv" + "strings" + + "golang.org/x/crypto/ssh/terminal" + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" +) + +// Flags +var ( + flagNextProto = flag.String("nextproto", "h2,h2-14", "Comma-separated list of NPN/ALPN protocol names to negotiate.") + flagInsecure = flag.Bool("insecure", false, "Whether to skip TLS cert validation") +) + +type command struct { + run func(*h2i, []string) error // required + + // complete optionally specifies tokens (case-insensitive) which are + // valid for this subcommand. + complete func() []string +} + +var commands = map[string]command{ + "ping": command{run: (*h2i).cmdPing}, + "settings": command{ + run: (*h2i).cmdSettings, + complete: func() []string { + return []string{ + "ACK", + http2.SettingHeaderTableSize.String(), + http2.SettingEnablePush.String(), + http2.SettingMaxConcurrentStreams.String(), + http2.SettingInitialWindowSize.String(), + http2.SettingMaxFrameSize.String(), + http2.SettingMaxHeaderListSize.String(), + } + }, + }, + "quit": command{run: (*h2i).cmdQuit}, + "headers": command{run: (*h2i).cmdHeaders}, +} + +func usage() { + fmt.Fprintf(os.Stderr, "Usage: h2i \n\n") + flag.PrintDefaults() + os.Exit(1) +} + +// withPort adds ":443" if another port isn't already present. +func withPort(host string) string { + if _, _, err := net.SplitHostPort(host); err != nil { + return net.JoinHostPort(host, "443") + } + return host +} + +// h2i is the app's state. +type h2i struct { + host string + tc *tls.Conn + framer *http2.Framer + term *terminal.Terminal + + // owned by the command loop: + streamID uint32 + hbuf bytes.Buffer + henc *hpack.Encoder + + // owned by the readFrames loop: + peerSetting map[http2.SettingID]uint32 + hdec *hpack.Decoder +} + +func main() { + flag.Usage = usage + flag.Parse() + if flag.NArg() != 1 { + usage() + } + log.SetFlags(0) + + host := flag.Arg(0) + app := &h2i{ + host: host, + peerSetting: make(map[http2.SettingID]uint32), + } + app.henc = hpack.NewEncoder(&app.hbuf) + + if err := app.Main(); err != nil { + if app.term != nil { + app.logf("%v\n", err) + } else { + fmt.Fprintf(os.Stderr, "%v\n", err) + } + os.Exit(1) + } + fmt.Fprintf(os.Stdout, "\n") +} + +func (app *h2i) Main() error { + cfg := &tls.Config{ + ServerName: app.host, + NextProtos: strings.Split(*flagNextProto, ","), + InsecureSkipVerify: *flagInsecure, + } + + hostAndPort := withPort(app.host) + log.Printf("Connecting to %s ...", hostAndPort) + tc, err := tls.Dial("tcp", hostAndPort, cfg) + if err != nil { + return fmt.Errorf("Error dialing %s: %v", withPort(app.host), err) + } + log.Printf("Connected to %v", tc.RemoteAddr()) + defer tc.Close() + + if err := tc.Handshake(); err != nil { + return fmt.Errorf("TLS handshake: %v", err) + } + if !*flagInsecure { + if err := tc.VerifyHostname(app.host); err != nil { + return fmt.Errorf("VerifyHostname: %v", err) + } + } + state := tc.ConnectionState() + log.Printf("Negotiated protocol %q", state.NegotiatedProtocol) + if !state.NegotiatedProtocolIsMutual || state.NegotiatedProtocol == "" { + return fmt.Errorf("Could not negotiate protocol mutually") + } + + if _, err := io.WriteString(tc, http2.ClientPreface); err != nil { + return err + } + + app.framer = http2.NewFramer(tc, tc) + + oldState, err := terminal.MakeRaw(0) + if err != nil { + return err + } + defer terminal.Restore(0, oldState) + + var screen = struct { + io.Reader + io.Writer + }{os.Stdin, os.Stdout} + + app.term = terminal.NewTerminal(screen, "h2i> ") + lastWord := regexp.MustCompile(`.+\W(\w+)$`) + app.term.AutoCompleteCallback = func(line string, pos int, key rune) (newLine string, newPos int, ok bool) { + if key != '\t' { + return + } + if pos != len(line) { + // TODO: we're being lazy for now, only supporting tab completion at the end. + return + } + // Auto-complete for the command itself. + if !strings.Contains(line, " ") { + var name string + name, _, ok = lookupCommand(line) + if !ok { + return + } + return name, len(name), true + } + _, c, ok := lookupCommand(line[:strings.IndexByte(line, ' ')]) + if !ok || c.complete == nil { + return + } + if strings.HasSuffix(line, " ") { + app.logf("%s", strings.Join(c.complete(), " ")) + return line, pos, true + } + m := lastWord.FindStringSubmatch(line) + if m == nil { + return line, len(line), true + } + soFar := m[1] + var match []string + for _, cand := range c.complete() { + if len(soFar) > len(cand) || !strings.EqualFold(cand[:len(soFar)], soFar) { + continue + } + match = append(match, cand) + } + if len(match) == 0 { + return + } + if len(match) > 1 { + // TODO: auto-complete any common prefix + app.logf("%s", strings.Join(match, " ")) + return line, pos, true + } + newLine = line[:len(line)-len(soFar)] + match[0] + return newLine, len(newLine), true + + } + + errc := make(chan error, 2) + go func() { errc <- app.readFrames() }() + go func() { errc <- app.readConsole() }() + return <-errc +} + +func (app *h2i) logf(format string, args ...interface{}) { + fmt.Fprintf(app.term, format+"\n", args...) +} + +func (app *h2i) readConsole() error { + for { + line, err := app.term.ReadLine() + if err == io.EOF { + return nil + } + if err != nil { + return fmt.Errorf("terminal.ReadLine: %v", err) + } + f := strings.Fields(line) + if len(f) == 0 { + continue + } + cmd, args := f[0], f[1:] + if _, c, ok := lookupCommand(cmd); ok { + err = c.run(app, args) + } else { + app.logf("Unknown command %q", line) + } + if err == errExitApp { + return nil + } + if err != nil { + return err + } + } +} + +func lookupCommand(prefix string) (name string, c command, ok bool) { + prefix = strings.ToLower(prefix) + if c, ok = commands[prefix]; ok { + return prefix, c, ok + } + + for full, candidate := range commands { + if strings.HasPrefix(full, prefix) { + if c.run != nil { + return "", command{}, false // ambiguous + } + c = candidate + name = full + } + } + return name, c, c.run != nil +} + +var errExitApp = errors.New("internal sentinel error value to quit the console reading loop") + +func (a *h2i) cmdQuit(args []string) error { + if len(args) > 0 { + a.logf("the QUIT command takes no argument") + return nil + } + return errExitApp +} + +func (a *h2i) cmdSettings(args []string) error { + if len(args) == 1 && strings.EqualFold(args[0], "ACK") { + return a.framer.WriteSettingsAck() + } + var settings []http2.Setting + for _, arg := range args { + if strings.EqualFold(arg, "ACK") { + a.logf("Error: ACK must be only argument with the SETTINGS command") + return nil + } + eq := strings.Index(arg, "=") + if eq == -1 { + a.logf("Error: invalid argument %q (expected SETTING_NAME=nnnn)", arg) + return nil + } + sid, ok := settingByName(arg[:eq]) + if !ok { + a.logf("Error: unknown setting name %q", arg[:eq]) + return nil + } + val, err := strconv.ParseUint(arg[eq+1:], 10, 32) + if err != nil { + a.logf("Error: invalid argument %q (expected SETTING_NAME=nnnn)", arg) + return nil + } + settings = append(settings, http2.Setting{ + ID: sid, + Val: uint32(val), + }) + } + a.logf("Sending: %v", settings) + return a.framer.WriteSettings(settings...) +} + +func settingByName(name string) (http2.SettingID, bool) { + for _, sid := range [...]http2.SettingID{ + http2.SettingHeaderTableSize, + http2.SettingEnablePush, + http2.SettingMaxConcurrentStreams, + http2.SettingInitialWindowSize, + http2.SettingMaxFrameSize, + http2.SettingMaxHeaderListSize, + } { + if strings.EqualFold(sid.String(), name) { + return sid, true + } + } + return 0, false +} + +func (app *h2i) cmdPing(args []string) error { + if len(args) > 1 { + app.logf("invalid PING usage: only accepts 0 or 1 args") + return nil // nil means don't end the program + } + var data [8]byte + if len(args) == 1 { + copy(data[:], args[0]) + } else { + copy(data[:], "h2i_ping") + } + return app.framer.WritePing(false, data) +} + +func (app *h2i) cmdHeaders(args []string) error { + if len(args) > 0 { + app.logf("Error: HEADERS doesn't yet take arguments.") + // TODO: flags for restricting window size, to force CONTINUATION + // frames. + return nil + } + var h1req bytes.Buffer + app.term.SetPrompt("(as HTTP/1.1)> ") + defer app.term.SetPrompt("h2i> ") + for { + line, err := app.term.ReadLine() + if err != nil { + return err + } + h1req.WriteString(line) + h1req.WriteString("\r\n") + if line == "" { + break + } + } + req, err := http.ReadRequest(bufio.NewReader(&h1req)) + if err != nil { + app.logf("Invalid HTTP/1.1 request: %v", err) + return nil + } + if app.streamID == 0 { + app.streamID = 1 + } else { + app.streamID += 2 + } + app.logf("Opening Stream-ID %d:", app.streamID) + hbf := app.encodeHeaders(req) + if len(hbf) > 16<<10 { + app.logf("TODO: h2i doesn't yet write CONTINUATION frames. Copy it from transport.go") + return nil + } + return app.framer.WriteHeaders(http2.HeadersFrameParam{ + StreamID: app.streamID, + BlockFragment: hbf, + EndStream: req.Method == "GET" || req.Method == "HEAD", // good enough for now + EndHeaders: true, // for now + }) +} + +func (app *h2i) readFrames() error { + for { + f, err := app.framer.ReadFrame() + if err != nil { + return fmt.Errorf("ReadFrame: %v", err) + } + app.logf("%v", f) + switch f := f.(type) { + case *http2.PingFrame: + app.logf(" Data = %q", f.Data) + case *http2.SettingsFrame: + f.ForeachSetting(func(s http2.Setting) error { + app.logf(" %v", s) + app.peerSetting[s.ID] = s.Val + return nil + }) + case *http2.WindowUpdateFrame: + app.logf(" Window-Increment = %v\n", f.Increment) + case *http2.GoAwayFrame: + app.logf(" Last-Stream-ID = %d; Error-Code = %v (%d)\n", f.LastStreamID, f.ErrCode, f.ErrCode) + case *http2.DataFrame: + app.logf(" %q", f.Data()) + case *http2.HeadersFrame: + if f.HasPriority() { + app.logf(" PRIORITY = %v", f.Priority) + } + if app.hdec == nil { + // TODO: if the user uses h2i to send a SETTINGS frame advertising + // something larger, we'll need to respect SETTINGS_HEADER_TABLE_SIZE + // and stuff here instead of using the 4k default. But for now: + tableSize := uint32(4 << 10) + app.hdec = hpack.NewDecoder(tableSize, app.onNewHeaderField) + } + app.hdec.Write(f.HeaderBlockFragment()) + } + } +} + +// called from readLoop +func (app *h2i) onNewHeaderField(f hpack.HeaderField) { + if f.Sensitive { + app.logf(" %s = %q (SENSITIVE)", f.Name, f.Value) + } + app.logf(" %s = %q", f.Name, f.Value) +} + +func (app *h2i) encodeHeaders(req *http.Request) []byte { + app.hbuf.Reset() + + // TODO(bradfitz): figure out :authority-vs-Host stuff between http2 and Go + host := req.Host + if host == "" { + host = req.URL.Host + } + + path := req.URL.Path + if path == "" { + path = "/" + } + + app.writeHeader(":authority", host) // probably not right for all sites + app.writeHeader(":method", req.Method) + app.writeHeader(":path", path) + app.writeHeader(":scheme", "https") + + for k, vv := range req.Header { + lowKey := strings.ToLower(k) + if lowKey == "host" { + continue + } + for _, v := range vv { + app.writeHeader(lowKey, v) + } + } + return app.hbuf.Bytes() +} + +func (app *h2i) writeHeader(name, value string) { + app.henc.WriteField(hpack.HeaderField{Name: name, Value: value}) + app.logf(" %s = %s", name, value) +} diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/headermap.go b/Godeps/_workspace/src/golang.org/x/net/http2/headermap.go similarity index 100% rename from Godeps/_workspace/src/github.com/bradfitz/http2/headermap.go rename to Godeps/_workspace/src/golang.org/x/net/http2/headermap.go diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/hpack/encode.go b/Godeps/_workspace/src/golang.org/x/net/http2/hpack/encode.go similarity index 100% rename from Godeps/_workspace/src/github.com/bradfitz/http2/hpack/encode.go rename to Godeps/_workspace/src/golang.org/x/net/http2/hpack/encode.go diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/hpack/hpack.go b/Godeps/_workspace/src/golang.org/x/net/http2/hpack/hpack.go similarity index 100% rename from Godeps/_workspace/src/github.com/bradfitz/http2/hpack/hpack.go rename to Godeps/_workspace/src/golang.org/x/net/http2/hpack/hpack.go diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/hpack/huffman.go b/Godeps/_workspace/src/golang.org/x/net/http2/hpack/huffman.go similarity index 100% rename from Godeps/_workspace/src/github.com/bradfitz/http2/hpack/huffman.go rename to Godeps/_workspace/src/golang.org/x/net/http2/hpack/huffman.go diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/hpack/tables.go b/Godeps/_workspace/src/golang.org/x/net/http2/hpack/tables.go similarity index 100% rename from Godeps/_workspace/src/github.com/bradfitz/http2/hpack/tables.go rename to Godeps/_workspace/src/golang.org/x/net/http2/hpack/tables.go diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/http2.go b/Godeps/_workspace/src/golang.org/x/net/http2/http2.go similarity index 100% rename from Godeps/_workspace/src/github.com/bradfitz/http2/http2.go rename to Godeps/_workspace/src/golang.org/x/net/http2/http2.go diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/pipe.go b/Godeps/_workspace/src/golang.org/x/net/http2/pipe.go similarity index 100% rename from Godeps/_workspace/src/github.com/bradfitz/http2/pipe.go rename to Godeps/_workspace/src/golang.org/x/net/http2/pipe.go diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/server.go b/Godeps/_workspace/src/golang.org/x/net/http2/server.go similarity index 99% rename from Godeps/_workspace/src/github.com/bradfitz/http2/server.go rename to Godeps/_workspace/src/golang.org/x/net/http2/server.go index 6ac4c4aa2c..99cc673cc2 100644 --- a/Godeps/_workspace/src/github.com/bradfitz/http2/server.go +++ b/Godeps/_workspace/src/golang.org/x/net/http2/server.go @@ -55,7 +55,7 @@ import ( "sync" "time" - "github.com/bradfitz/http2/hpack" + "golang.org/x/net/http2/hpack" ) const ( @@ -631,6 +631,9 @@ func (sc *serverConn) serve() { case wm := <-sc.wantWriteFrameCh: sc.writeFrame(wm) case <-sc.wroteFrameCh: + if sc.writingFrame != true { + panic("internal error: expected to be already writing a frame") + } sc.writingFrame = false sc.scheduleFrameWrite() case fg, ok := <-sc.readFrameCh: @@ -752,6 +755,7 @@ func (sc *serverConn) startFrameWrite(wm frameWriteMsg) { if sc.writingFrame { panic("internal error: can only be writing one frame at a time") } + sc.writingFrame = true st := wm.stream if st != nil { @@ -768,7 +772,6 @@ func (sc *serverConn) startFrameWrite(wm frameWriteMsg) { } } - sc.writingFrame = true sc.needsFrameFlush = true if endsStream(wm.write) { if st == nil { diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/transport.go b/Godeps/_workspace/src/golang.org/x/net/http2/transport.go similarity index 99% rename from Godeps/_workspace/src/github.com/bradfitz/http2/transport.go rename to Godeps/_workspace/src/golang.org/x/net/http2/transport.go index ea62188db5..73f358eefe 100644 --- a/Godeps/_workspace/src/github.com/bradfitz/http2/transport.go +++ b/Godeps/_workspace/src/golang.org/x/net/http2/transport.go @@ -19,7 +19,7 @@ import ( "strings" "sync" - "github.com/bradfitz/http2/hpack" + "golang.org/x/net/http2/hpack" ) type Transport struct { @@ -186,7 +186,7 @@ func (t *Transport) newClientConn(host, port, key string) (*clientConn, error) { NextProtos: []string{NextProtoTLS}, InsecureSkipVerify: t.InsecureTLSDial, } - tconn, err := tls.Dial("tcp", host+":"+port, cfg) + tconn, err := tls.Dial("tcp", net.JoinHostPort(host, port), cfg) if err != nil { return nil, err } diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/write.go b/Godeps/_workspace/src/golang.org/x/net/http2/write.go similarity index 99% rename from Godeps/_workspace/src/github.com/bradfitz/http2/write.go rename to Godeps/_workspace/src/golang.org/x/net/http2/write.go index 7b9bdd3a6a..02f0743de6 100644 --- a/Godeps/_workspace/src/github.com/bradfitz/http2/write.go +++ b/Godeps/_workspace/src/golang.org/x/net/http2/write.go @@ -13,7 +13,7 @@ import ( "net/http" "time" - "github.com/bradfitz/http2/hpack" + "golang.org/x/net/http2/hpack" ) // writeFramer is implemented by any type that is used to write frames. diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/writesched.go b/Godeps/_workspace/src/golang.org/x/net/http2/writesched.go similarity index 100% rename from Godeps/_workspace/src/github.com/bradfitz/http2/writesched.go rename to Godeps/_workspace/src/golang.org/x/net/http2/writesched.go diff --git a/Godeps/_workspace/src/golang.org/x/net/internal/timeseries/timeseries.go b/Godeps/_workspace/src/golang.org/x/net/internal/timeseries/timeseries.go new file mode 100644 index 0000000000..3f90b7300d --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/net/internal/timeseries/timeseries.go @@ -0,0 +1,525 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package timeseries implements a time series structure for stats collection. +package timeseries + +import ( + "fmt" + "log" + "time" +) + +const ( + timeSeriesNumBuckets = 64 + minuteHourSeriesNumBuckets = 60 +) + +var timeSeriesResolutions = []time.Duration{ + 1 * time.Second, + 10 * time.Second, + 1 * time.Minute, + 10 * time.Minute, + 1 * time.Hour, + 6 * time.Hour, + 24 * time.Hour, // 1 day + 7 * 24 * time.Hour, // 1 week + 4 * 7 * 24 * time.Hour, // 4 weeks + 16 * 7 * 24 * time.Hour, // 16 weeks +} + +var minuteHourSeriesResolutions = []time.Duration{ + 1 * time.Second, + 1 * time.Minute, +} + +// An Observable is a kind of data that can be aggregated in a time series. +type Observable interface { + Multiply(ratio float64) // Multiplies the data in self by a given ratio + Add(other Observable) // Adds the data from a different observation to self + Clear() // Clears the observation so it can be reused. + CopyFrom(other Observable) // Copies the contents of a given observation to self +} + +// Float attaches the methods of Observable to a float64. +type Float float64 + +// NewFloat returns a Float. +func NewFloat() Observable { + f := Float(0) + return &f +} + +// String returns the float as a string. +func (f *Float) String() string { return fmt.Sprintf("%g", f.Value()) } + +// Value returns the float's value. +func (f *Float) Value() float64 { return float64(*f) } + +func (f *Float) Multiply(ratio float64) { *f *= Float(ratio) } + +func (f *Float) Add(other Observable) { + o := other.(*Float) + *f += *o +} + +func (f *Float) Clear() { *f = 0 } + +func (f *Float) CopyFrom(other Observable) { + o := other.(*Float) + *f = *o +} + +// A Clock tells the current time. +type Clock interface { + Time() time.Time +} + +type defaultClock int + +var defaultClockInstance defaultClock + +func (defaultClock) Time() time.Time { return time.Now() } + +// Information kept per level. Each level consists of a circular list of +// observations. The start of the level may be derived from end and the +// len(buckets) * sizeInMillis. +type tsLevel struct { + oldest int // index to oldest bucketed Observable + newest int // index to newest bucketed Observable + end time.Time // end timestamp for this level + size time.Duration // duration of the bucketed Observable + buckets []Observable // collections of observations + provider func() Observable // used for creating new Observable +} + +func (l *tsLevel) Clear() { + l.oldest = 0 + l.newest = len(l.buckets) - 1 + l.end = time.Time{} + for i := range l.buckets { + if l.buckets[i] != nil { + l.buckets[i].Clear() + l.buckets[i] = nil + } + } +} + +func (l *tsLevel) InitLevel(size time.Duration, numBuckets int, f func() Observable) { + l.size = size + l.provider = f + l.buckets = make([]Observable, numBuckets) +} + +// Keeps a sequence of levels. Each level is responsible for storing data at +// a given resolution. For example, the first level stores data at a one +// minute resolution while the second level stores data at a one hour +// resolution. + +// Each level is represented by a sequence of buckets. Each bucket spans an +// interval equal to the resolution of the level. New observations are added +// to the last bucket. +type timeSeries struct { + provider func() Observable // make more Observable + numBuckets int // number of buckets in each level + levels []*tsLevel // levels of bucketed Observable + lastAdd time.Time // time of last Observable tracked + total Observable // convenient aggregation of all Observable + clock Clock // Clock for getting current time + pending Observable // observations not yet bucketed + pendingTime time.Time // what time are we keeping in pending + dirty bool // if there are pending observations +} + +// init initializes a level according to the supplied criteria. +func (ts *timeSeries) init(resolutions []time.Duration, f func() Observable, numBuckets int, clock Clock) { + ts.provider = f + ts.numBuckets = numBuckets + ts.clock = clock + ts.levels = make([]*tsLevel, len(resolutions)) + + for i := range resolutions { + if i > 0 && resolutions[i-1] >= resolutions[i] { + log.Print("timeseries: resolutions must be monotonically increasing") + break + } + newLevel := new(tsLevel) + newLevel.InitLevel(resolutions[i], ts.numBuckets, ts.provider) + ts.levels[i] = newLevel + } + + ts.Clear() +} + +// Clear removes all observations from the time series. +func (ts *timeSeries) Clear() { + ts.lastAdd = time.Time{} + ts.total = ts.resetObservation(ts.total) + ts.pending = ts.resetObservation(ts.pending) + ts.pendingTime = time.Time{} + ts.dirty = false + + for i := range ts.levels { + ts.levels[i].Clear() + } +} + +// Add records an observation at the current time. +func (ts *timeSeries) Add(observation Observable) { + ts.AddWithTime(observation, ts.clock.Time()) +} + +// AddWithTime records an observation at the specified time. +func (ts *timeSeries) AddWithTime(observation Observable, t time.Time) { + + smallBucketDuration := ts.levels[0].size + + if t.After(ts.lastAdd) { + ts.lastAdd = t + } + + if t.After(ts.pendingTime) { + ts.advance(t) + ts.mergePendingUpdates() + ts.pendingTime = ts.levels[0].end + ts.pending.CopyFrom(observation) + ts.dirty = true + } else if t.After(ts.pendingTime.Add(-1 * smallBucketDuration)) { + // The observation is close enough to go into the pending bucket. + // This compensates for clock skewing and small scheduling delays + // by letting the update stay in the fast path. + ts.pending.Add(observation) + ts.dirty = true + } else { + ts.mergeValue(observation, t) + } +} + +// mergeValue inserts the observation at the specified time in the past into all levels. +func (ts *timeSeries) mergeValue(observation Observable, t time.Time) { + for _, level := range ts.levels { + index := (ts.numBuckets - 1) - int(level.end.Sub(t)/level.size) + if 0 <= index && index < ts.numBuckets { + bucketNumber := (level.oldest + index) % ts.numBuckets + if level.buckets[bucketNumber] == nil { + level.buckets[bucketNumber] = level.provider() + } + level.buckets[bucketNumber].Add(observation) + } + } + ts.total.Add(observation) +} + +// mergePendingUpdates applies the pending updates into all levels. +func (ts *timeSeries) mergePendingUpdates() { + if ts.dirty { + ts.mergeValue(ts.pending, ts.pendingTime) + ts.pending = ts.resetObservation(ts.pending) + ts.dirty = false + } +} + +// advance cycles the buckets at each level until the latest bucket in +// each level can hold the time specified. +func (ts *timeSeries) advance(t time.Time) { + if !t.After(ts.levels[0].end) { + return + } + for i := 0; i < len(ts.levels); i++ { + level := ts.levels[i] + if !level.end.Before(t) { + break + } + + // If the time is sufficiently far, just clear the level and advance + // directly. + if !t.Before(level.end.Add(level.size * time.Duration(ts.numBuckets))) { + for _, b := range level.buckets { + ts.resetObservation(b) + } + level.end = time.Unix(0, (t.UnixNano()/level.size.Nanoseconds())*level.size.Nanoseconds()) + } + + for t.After(level.end) { + level.end = level.end.Add(level.size) + level.newest = level.oldest + level.oldest = (level.oldest + 1) % ts.numBuckets + ts.resetObservation(level.buckets[level.newest]) + } + + t = level.end + } +} + +// Latest returns the sum of the num latest buckets from the level. +func (ts *timeSeries) Latest(level, num int) Observable { + now := ts.clock.Time() + if ts.levels[0].end.Before(now) { + ts.advance(now) + } + + ts.mergePendingUpdates() + + result := ts.provider() + l := ts.levels[level] + index := l.newest + + for i := 0; i < num; i++ { + if l.buckets[index] != nil { + result.Add(l.buckets[index]) + } + if index == 0 { + index = ts.numBuckets + } + index-- + } + + return result +} + +// LatestBuckets returns a copy of the num latest buckets from level. +func (ts *timeSeries) LatestBuckets(level, num int) []Observable { + if level < 0 || level > len(ts.levels) { + log.Print("timeseries: bad level argument: ", level) + return nil + } + if num < 0 || num >= ts.numBuckets { + log.Print("timeseries: bad num argument: ", num) + return nil + } + + results := make([]Observable, num) + now := ts.clock.Time() + if ts.levels[0].end.Before(now) { + ts.advance(now) + } + + ts.mergePendingUpdates() + + l := ts.levels[level] + index := l.newest + + for i := 0; i < num; i++ { + result := ts.provider() + results[i] = result + if l.buckets[index] != nil { + result.CopyFrom(l.buckets[index]) + } + + if index == 0 { + index = ts.numBuckets + } + index -= 1 + } + return results +} + +// ScaleBy updates observations by scaling by factor. +func (ts *timeSeries) ScaleBy(factor float64) { + for _, l := range ts.levels { + for i := 0; i < ts.numBuckets; i++ { + l.buckets[i].Multiply(factor) + } + } + + ts.total.Multiply(factor) + ts.pending.Multiply(factor) +} + +// Range returns the sum of observations added over the specified time range. +// If start or finish times don't fall on bucket boundaries of the same +// level, then return values are approximate answers. +func (ts *timeSeries) Range(start, finish time.Time) Observable { + return ts.ComputeRange(start, finish, 1)[0] +} + +// Recent returns the sum of observations from the last delta. +func (ts *timeSeries) Recent(delta time.Duration) Observable { + now := ts.clock.Time() + return ts.Range(now.Add(-delta), now) +} + +// Total returns the total of all observations. +func (ts *timeSeries) Total() Observable { + ts.mergePendingUpdates() + return ts.total +} + +// ComputeRange computes a specified number of values into a slice using +// the observations recorded over the specified time period. The return +// values are approximate if the start or finish times don't fall on the +// bucket boundaries at the same level or if the number of buckets spanning +// the range is not an integral multiple of num. +func (ts *timeSeries) ComputeRange(start, finish time.Time, num int) []Observable { + if start.After(finish) { + log.Printf("timeseries: start > finish, %v>%v", start, finish) + return nil + } + + if num < 0 { + log.Printf("timeseries: num < 0, %v", num) + return nil + } + + results := make([]Observable, num) + + for _, l := range ts.levels { + if !start.Before(l.end.Add(-l.size * time.Duration(ts.numBuckets))) { + ts.extract(l, start, finish, num, results) + return results + } + } + + // Failed to find a level that covers the desired range. So just + // extract from the last level, even if it doesn't cover the entire + // desired range. + ts.extract(ts.levels[len(ts.levels)-1], start, finish, num, results) + + return results +} + +// RecentList returns the specified number of values in slice over the most +// recent time period of the specified range. +func (ts *timeSeries) RecentList(delta time.Duration, num int) []Observable { + if delta < 0 { + return nil + } + now := ts.clock.Time() + return ts.ComputeRange(now.Add(-delta), now, num) +} + +// extract returns a slice of specified number of observations from a given +// level over a given range. +func (ts *timeSeries) extract(l *tsLevel, start, finish time.Time, num int, results []Observable) { + ts.mergePendingUpdates() + + srcInterval := l.size + dstInterval := finish.Sub(start) / time.Duration(num) + dstStart := start + srcStart := l.end.Add(-srcInterval * time.Duration(ts.numBuckets)) + + srcIndex := 0 + + // Where should scanning start? + if dstStart.After(srcStart) { + advance := dstStart.Sub(srcStart) / srcInterval + srcIndex += int(advance) + srcStart = srcStart.Add(advance * srcInterval) + } + + // The i'th value is computed as show below. + // interval = (finish/start)/num + // i'th value = sum of observation in range + // [ start + i * interval, + // start + (i + 1) * interval ) + for i := 0; i < num; i++ { + results[i] = ts.resetObservation(results[i]) + dstEnd := dstStart.Add(dstInterval) + for srcIndex < ts.numBuckets && srcStart.Before(dstEnd) { + srcEnd := srcStart.Add(srcInterval) + if srcEnd.After(ts.lastAdd) { + srcEnd = ts.lastAdd + } + + if !srcEnd.Before(dstStart) { + srcValue := l.buckets[(srcIndex+l.oldest)%ts.numBuckets] + if !srcStart.Before(dstStart) && !srcEnd.After(dstEnd) { + // dst completely contains src. + if srcValue != nil { + results[i].Add(srcValue) + } + } else { + // dst partially overlaps src. + overlapStart := maxTime(srcStart, dstStart) + overlapEnd := minTime(srcEnd, dstEnd) + base := srcEnd.Sub(srcStart) + fraction := overlapEnd.Sub(overlapStart).Seconds() / base.Seconds() + + used := ts.provider() + if srcValue != nil { + used.CopyFrom(srcValue) + } + used.Multiply(fraction) + results[i].Add(used) + } + + if srcEnd.After(dstEnd) { + break + } + } + srcIndex++ + srcStart = srcStart.Add(srcInterval) + } + dstStart = dstStart.Add(dstInterval) + } +} + +// resetObservation clears the content so the struct may be reused. +func (ts *timeSeries) resetObservation(observation Observable) Observable { + if observation == nil { + observation = ts.provider() + } else { + observation.Clear() + } + return observation +} + +// TimeSeries tracks data at granularities from 1 second to 16 weeks. +type TimeSeries struct { + timeSeries +} + +// NewTimeSeries creates a new TimeSeries using the function provided for creating new Observable. +func NewTimeSeries(f func() Observable) *TimeSeries { + return NewTimeSeriesWithClock(f, defaultClockInstance) +} + +// NewTimeSeriesWithClock creates a new TimeSeries using the function provided for creating new Observable and the clock for +// assigning timestamps. +func NewTimeSeriesWithClock(f func() Observable, clock Clock) *TimeSeries { + ts := new(TimeSeries) + ts.timeSeries.init(timeSeriesResolutions, f, timeSeriesNumBuckets, clock) + return ts +} + +// MinuteHourSeries tracks data at granularities of 1 minute and 1 hour. +type MinuteHourSeries struct { + timeSeries +} + +// NewMinuteHourSeries creates a new MinuteHourSeries using the function provided for creating new Observable. +func NewMinuteHourSeries(f func() Observable) *MinuteHourSeries { + return NewMinuteHourSeriesWithClock(f, defaultClockInstance) +} + +// NewMinuteHourSeriesWithClock creates a new MinuteHourSeries using the function provided for creating new Observable and the clock for +// assigning timestamps. +func NewMinuteHourSeriesWithClock(f func() Observable, clock Clock) *MinuteHourSeries { + ts := new(MinuteHourSeries) + ts.timeSeries.init(minuteHourSeriesResolutions, f, + minuteHourSeriesNumBuckets, clock) + return ts +} + +func (ts *MinuteHourSeries) Minute() Observable { + return ts.timeSeries.Latest(0, 60) +} + +func (ts *MinuteHourSeries) Hour() Observable { + return ts.timeSeries.Latest(1, 60) +} + +func minTime(a, b time.Time) time.Time { + if a.Before(b) { + return a + } + return b +} + +func maxTime(a, b time.Time) time.Time { + if a.After(b) { + return a + } + return b +} diff --git a/Godeps/_workspace/src/golang.org/x/net/trace/events.go b/Godeps/_workspace/src/golang.org/x/net/trace/events.go new file mode 100644 index 0000000000..e66c7e3282 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/net/trace/events.go @@ -0,0 +1,524 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "bytes" + "fmt" + "html/template" + "io" + "log" + "net/http" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "text/tabwriter" + "time" +) + +var eventsTmpl = template.Must(template.New("events").Funcs(template.FuncMap{ + "elapsed": elapsed, + "trimSpace": strings.TrimSpace, +}).Parse(eventsHTML)) + +const maxEventsPerLog = 100 + +type bucket struct { + MaxErrAge time.Duration + String string +} + +var buckets = []bucket{ + {0, "total"}, + {10 * time.Second, "errs<10s"}, + {1 * time.Minute, "errs<1m"}, + {10 * time.Minute, "errs<10m"}, + {1 * time.Hour, "errs<1h"}, + {10 * time.Hour, "errs<10h"}, + {24000 * time.Hour, "errors"}, +} + +// RenderEvents renders the HTML page typically served at /debug/events. +// It does not do any auth checking; see AuthRequest for the default auth check +// used by the handler registered on http.DefaultServeMux. +// req may be nil. +func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) { + now := time.Now() + data := &struct { + Families []string // family names + Buckets []bucket + Counts [][]int // eventLog count per family/bucket + + // Set when a bucket has been selected. + Family string + Bucket int + EventLogs eventLogs + Expanded bool + }{ + Buckets: buckets, + } + + data.Families = make([]string, 0, len(families)) + famMu.RLock() + for name := range families { + data.Families = append(data.Families, name) + } + famMu.RUnlock() + sort.Strings(data.Families) + + // Count the number of eventLogs in each family for each error age. + data.Counts = make([][]int, len(data.Families)) + for i, name := range data.Families { + // TODO(sameer): move this loop under the family lock. + f := getEventFamily(name) + data.Counts[i] = make([]int, len(data.Buckets)) + for j, b := range data.Buckets { + data.Counts[i][j] = f.Count(now, b.MaxErrAge) + } + } + + if req != nil { + var ok bool + data.Family, data.Bucket, ok = parseEventsArgs(req) + if !ok { + // No-op + } else { + data.EventLogs = getEventFamily(data.Family).Copy(now, buckets[data.Bucket].MaxErrAge) + } + if data.EventLogs != nil { + defer data.EventLogs.Free() + sort.Sort(data.EventLogs) + } + if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { + data.Expanded = exp + } + } + + famMu.RLock() + defer famMu.RUnlock() + if err := eventsTmpl.Execute(w, data); err != nil { + log.Printf("net/trace: Failed executing template: %v", err) + } +} + +func parseEventsArgs(req *http.Request) (fam string, b int, ok bool) { + fam, bStr := req.FormValue("fam"), req.FormValue("b") + if fam == "" || bStr == "" { + return "", 0, false + } + b, err := strconv.Atoi(bStr) + if err != nil || b < 0 || b >= len(buckets) { + return "", 0, false + } + return fam, b, true +} + +// An EventLog provides a log of events associated with a specific object. +type EventLog interface { + // Printf formats its arguments with fmt.Sprintf and adds the + // result to the event log. + Printf(format string, a ...interface{}) + + // Errorf is like Printf, but it marks this event as an error. + Errorf(format string, a ...interface{}) + + // Finish declares that this event log is complete. + // The event log should not be used after calling this method. + Finish() +} + +// NewEventLog returns a new EventLog with the specified family name +// and title. +func NewEventLog(family, title string) EventLog { + el := newEventLog() + el.ref() + el.Family, el.Title = family, title + el.Start = time.Now() + el.events = make([]logEntry, 0, maxEventsPerLog) + el.stack = make([]uintptr, 32) + n := runtime.Callers(2, el.stack) + el.stack = el.stack[:n] + + getEventFamily(family).add(el) + return el +} + +func (el *eventLog) Finish() { + getEventFamily(el.Family).remove(el) + el.unref() // matches ref in New +} + +var ( + famMu sync.RWMutex + families = make(map[string]*eventFamily) // family name => family +) + +func getEventFamily(fam string) *eventFamily { + famMu.Lock() + defer famMu.Unlock() + f := families[fam] + if f == nil { + f = &eventFamily{} + families[fam] = f + } + return f +} + +type eventFamily struct { + mu sync.RWMutex + eventLogs eventLogs +} + +func (f *eventFamily) add(el *eventLog) { + f.mu.Lock() + f.eventLogs = append(f.eventLogs, el) + f.mu.Unlock() +} + +func (f *eventFamily) remove(el *eventLog) { + f.mu.Lock() + defer f.mu.Unlock() + for i, el0 := range f.eventLogs { + if el == el0 { + copy(f.eventLogs[i:], f.eventLogs[i+1:]) + f.eventLogs = f.eventLogs[:len(f.eventLogs)-1] + return + } + } +} + +func (f *eventFamily) Count(now time.Time, maxErrAge time.Duration) (n int) { + f.mu.RLock() + defer f.mu.RUnlock() + for _, el := range f.eventLogs { + if el.hasRecentError(now, maxErrAge) { + n++ + } + } + return +} + +func (f *eventFamily) Copy(now time.Time, maxErrAge time.Duration) (els eventLogs) { + f.mu.RLock() + defer f.mu.RUnlock() + els = make(eventLogs, 0, len(f.eventLogs)) + for _, el := range f.eventLogs { + if el.hasRecentError(now, maxErrAge) { + el.ref() + els = append(els, el) + } + } + return +} + +type eventLogs []*eventLog + +// Free calls unref on each element of the list. +func (els eventLogs) Free() { + for _, el := range els { + el.unref() + } +} + +// eventLogs may be sorted in reverse chronological order. +func (els eventLogs) Len() int { return len(els) } +func (els eventLogs) Less(i, j int) bool { return els[i].Start.After(els[j].Start) } +func (els eventLogs) Swap(i, j int) { els[i], els[j] = els[j], els[i] } + +// A logEntry is a timestamped log entry in an event log. +type logEntry struct { + When time.Time + Elapsed time.Duration // since previous event in log + NewDay bool // whether this event is on a different day to the previous event + What string + IsErr bool +} + +// WhenString returns a string representation of the elapsed time of the event. +// It will include the date if midnight was crossed. +func (e logEntry) WhenString() string { + if e.NewDay { + return e.When.Format("2006/01/02 15:04:05.000000") + } + return e.When.Format("15:04:05.000000") +} + +// An eventLog represents an active event log. +type eventLog struct { + // Family is the top-level grouping of event logs to which this belongs. + Family string + + // Title is the title of this event log. + Title string + + // Timing information. + Start time.Time + + // Call stack where this event log was created. + stack []uintptr + + // Append-only sequence of events. + // + // TODO(sameer): change this to a ring buffer to avoid the array copy + // when we hit maxEventsPerLog. + mu sync.RWMutex + events []logEntry + LastErrorTime time.Time + discarded int + + refs int32 // how many buckets this is in +} + +func (el *eventLog) reset() { + // Clear all but the mutex. Mutexes may not be copied, even when unlocked. + el.Family = "" + el.Title = "" + el.Start = time.Time{} + el.stack = nil + el.events = nil + el.LastErrorTime = time.Time{} + el.discarded = 0 + el.refs = 0 +} + +func (el *eventLog) hasRecentError(now time.Time, maxErrAge time.Duration) bool { + if maxErrAge == 0 { + return true + } + el.mu.RLock() + defer el.mu.RUnlock() + return now.Sub(el.LastErrorTime) < maxErrAge +} + +// delta returns the elapsed time since the last event or the log start, +// and whether it spans midnight. +// L >= el.mu +func (el *eventLog) delta(t time.Time) (time.Duration, bool) { + if len(el.events) == 0 { + return t.Sub(el.Start), false + } + prev := el.events[len(el.events)-1].When + return t.Sub(prev), prev.Day() != t.Day() + +} + +func (el *eventLog) Printf(format string, a ...interface{}) { + el.printf(false, format, a...) +} + +func (el *eventLog) Errorf(format string, a ...interface{}) { + el.printf(true, format, a...) +} + +func (el *eventLog) printf(isErr bool, format string, a ...interface{}) { + e := logEntry{When: time.Now(), IsErr: isErr, What: fmt.Sprintf(format, a...)} + el.mu.Lock() + e.Elapsed, e.NewDay = el.delta(e.When) + if len(el.events) < maxEventsPerLog { + el.events = append(el.events, e) + } else { + // Discard the oldest event. + if el.discarded == 0 { + // el.discarded starts at two to count for the event it + // is replacing, plus the next one that we are about to + // drop. + el.discarded = 2 + } else { + el.discarded++ + } + // TODO(sameer): if this causes allocations on a critical path, + // change eventLog.What to be a fmt.Stringer, as in trace.go. + el.events[0].What = fmt.Sprintf("(%d events discarded)", el.discarded) + // The timestamp of the discarded meta-event should be + // the time of the last event it is representing. + el.events[0].When = el.events[1].When + copy(el.events[1:], el.events[2:]) + el.events[maxEventsPerLog-1] = e + } + if e.IsErr { + el.LastErrorTime = e.When + } + el.mu.Unlock() +} + +func (el *eventLog) ref() { + atomic.AddInt32(&el.refs, 1) +} + +func (el *eventLog) unref() { + if atomic.AddInt32(&el.refs, -1) == 0 { + freeEventLog(el) + } +} + +func (el *eventLog) When() string { + return el.Start.Format("2006/01/02 15:04:05.000000") +} + +func (el *eventLog) ElapsedTime() string { + elapsed := time.Since(el.Start) + return fmt.Sprintf("%.6f", elapsed.Seconds()) +} + +func (el *eventLog) Stack() string { + buf := new(bytes.Buffer) + tw := tabwriter.NewWriter(buf, 1, 8, 1, '\t', 0) + printStackRecord(tw, el.stack) + tw.Flush() + return buf.String() +} + +// printStackRecord prints the function + source line information +// for a single stack trace. +// Adapted from runtime/pprof/pprof.go. +func printStackRecord(w io.Writer, stk []uintptr) { + for _, pc := range stk { + f := runtime.FuncForPC(pc) + if f == nil { + continue + } + file, line := f.FileLine(pc) + name := f.Name() + // Hide runtime.goexit and any runtime functions at the beginning. + if strings.HasPrefix(name, "runtime.") { + continue + } + fmt.Fprintf(w, "# %s\t%s:%d\n", name, file, line) + } +} + +func (el *eventLog) Events() []logEntry { + el.mu.RLock() + defer el.mu.RUnlock() + return el.events +} + +// freeEventLogs is a freelist of *eventLog +var freeEventLogs = make(chan *eventLog, 1000) + +// newEventLog returns a event log ready to use. +func newEventLog() *eventLog { + select { + case el := <-freeEventLogs: + return el + default: + return new(eventLog) + } +} + +// freeEventLog adds el to freeEventLogs if there's room. +// This is non-blocking. +func freeEventLog(el *eventLog) { + el.reset() + select { + case freeEventLogs <- el: + default: + } +} + +const eventsHTML = ` + + + events + + + + +

      /debug/events

      + + + {{range $i, $fam := .Families}} + + + + {{range $j, $bucket := $.Buckets}} + {{$n := index $.Counts $i $j}} + + {{end}} + + {{end}} +
      {{$fam}} + {{if $n}}{{end}} + [{{$n}} {{$bucket.String}}] + {{if $n}}{{end}} +
      + +{{if $.EventLogs}} +
      +

      Family: {{$.Family}}

      + +{{if $.Expanded}}{{end}} +[Summary]{{if $.Expanded}}{{end}} + +{{if not $.Expanded}}{{end}} +[Expanded]{{if not $.Expanded}}{{end}} + + + + {{range $el := $.EventLogs}} + + + + + {{if $.Expanded}} + + + + + + {{range $el.Events}} + + + + + + {{end}} + {{end}} + {{end}} +
      WhenElapsed
      {{$el.When}}{{$el.ElapsedTime}}{{$el.Title}} +
      {{$el.Stack|trimSpace}}
      {{.WhenString}}{{elapsed .Elapsed}}.{{if .IsErr}}E{{else}}.{{end}}. {{.What}}
      +{{end}} + + +` diff --git a/Godeps/_workspace/src/golang.org/x/net/trace/histogram.go b/Godeps/_workspace/src/golang.org/x/net/trace/histogram.go new file mode 100644 index 0000000000..bb42aa5320 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/net/trace/histogram.go @@ -0,0 +1,356 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +// This file implements histogramming for RPC statistics collection. + +import ( + "bytes" + "fmt" + "html/template" + "log" + "math" + + "golang.org/x/net/internal/timeseries" +) + +const ( + bucketCount = 38 +) + +// histogram keeps counts of values in buckets that are spaced +// out in powers of 2: 0-1, 2-3, 4-7... +// histogram implements timeseries.Observable +type histogram struct { + sum int64 // running total of measurements + sumOfSquares float64 // square of running total + buckets []int64 // bucketed values for histogram + value int // holds a single value as an optimization + valueCount int64 // number of values recorded for single value +} + +// AddMeasurement records a value measurement observation to the histogram. +func (h *histogram) addMeasurement(value int64) { + // TODO: assert invariant + h.sum += value + h.sumOfSquares += float64(value) * float64(value) + + bucketIndex := getBucket(value) + + if h.valueCount == 0 || (h.valueCount > 0 && h.value == bucketIndex) { + h.value = bucketIndex + h.valueCount++ + } else { + h.allocateBuckets() + h.buckets[bucketIndex]++ + } +} + +func (h *histogram) allocateBuckets() { + if h.buckets == nil { + h.buckets = make([]int64, bucketCount) + h.buckets[h.value] = h.valueCount + h.value = 0 + h.valueCount = -1 + } +} + +func log2(i int64) int { + n := 0 + for ; i >= 0x100; i >>= 8 { + n += 8 + } + for ; i > 0; i >>= 1 { + n += 1 + } + return n +} + +func getBucket(i int64) (index int) { + index = log2(i) - 1 + if index < 0 { + index = 0 + } + if index >= bucketCount { + index = bucketCount - 1 + } + return +} + +// Total returns the number of recorded observations. +func (h *histogram) total() (total int64) { + if h.valueCount >= 0 { + total = h.valueCount + } + for _, val := range h.buckets { + total += int64(val) + } + return +} + +// Average returns the average value of recorded observations. +func (h *histogram) average() float64 { + t := h.total() + if t == 0 { + return 0 + } + return float64(h.sum) / float64(t) +} + +// Variance returns the variance of recorded observations. +func (h *histogram) variance() float64 { + t := float64(h.total()) + if t == 0 { + return 0 + } + s := float64(h.sum) / t + return h.sumOfSquares/t - s*s +} + +// StandardDeviation returns the standard deviation of recorded observations. +func (h *histogram) standardDeviation() float64 { + return math.Sqrt(h.variance()) +} + +// PercentileBoundary estimates the value that the given fraction of recorded +// observations are less than. +func (h *histogram) percentileBoundary(percentile float64) int64 { + total := h.total() + + // Corner cases (make sure result is strictly less than Total()) + if total == 0 { + return 0 + } else if total == 1 { + return int64(h.average()) + } + + percentOfTotal := round(float64(total) * percentile) + var runningTotal int64 + + for i := range h.buckets { + value := h.buckets[i] + runningTotal += value + if runningTotal == percentOfTotal { + // We hit an exact bucket boundary. If the next bucket has data, it is a + // good estimate of the value. If the bucket is empty, we interpolate the + // midpoint between the next bucket's boundary and the next non-zero + // bucket. If the remaining buckets are all empty, then we use the + // boundary for the next bucket as the estimate. + j := uint8(i + 1) + min := bucketBoundary(j) + if runningTotal < total { + for h.buckets[j] == 0 { + j++ + } + } + max := bucketBoundary(j) + return min + round(float64(max-min)/2) + } else if runningTotal > percentOfTotal { + // The value is in this bucket. Interpolate the value. + delta := runningTotal - percentOfTotal + percentBucket := float64(value-delta) / float64(value) + bucketMin := bucketBoundary(uint8(i)) + nextBucketMin := bucketBoundary(uint8(i + 1)) + bucketSize := nextBucketMin - bucketMin + return bucketMin + round(percentBucket*float64(bucketSize)) + } + } + return bucketBoundary(bucketCount - 1) +} + +// Median returns the estimated median of the observed values. +func (h *histogram) median() int64 { + return h.percentileBoundary(0.5) +} + +// Add adds other to h. +func (h *histogram) Add(other timeseries.Observable) { + o := other.(*histogram) + if o.valueCount == 0 { + // Other histogram is empty + } else if h.valueCount >= 0 && o.valueCount > 0 && h.value == o.value { + // Both have a single bucketed value, aggregate them + h.valueCount += o.valueCount + } else { + // Two different values necessitate buckets in this histogram + h.allocateBuckets() + if o.valueCount >= 0 { + h.buckets[o.value] += o.valueCount + } else { + for i := range h.buckets { + h.buckets[i] += o.buckets[i] + } + } + } + h.sumOfSquares += o.sumOfSquares + h.sum += o.sum +} + +// Clear resets the histogram to an empty state, removing all observed values. +func (h *histogram) Clear() { + h.buckets = nil + h.value = 0 + h.valueCount = 0 + h.sum = 0 + h.sumOfSquares = 0 +} + +// CopyFrom copies from other, which must be a *histogram, into h. +func (h *histogram) CopyFrom(other timeseries.Observable) { + o := other.(*histogram) + if o.valueCount == -1 { + h.allocateBuckets() + copy(h.buckets, o.buckets) + } + h.sum = o.sum + h.sumOfSquares = o.sumOfSquares + h.value = o.value + h.valueCount = o.valueCount +} + +// Multiply scales the histogram by the specified ratio. +func (h *histogram) Multiply(ratio float64) { + if h.valueCount == -1 { + for i := range h.buckets { + h.buckets[i] = int64(float64(h.buckets[i]) * ratio) + } + } else { + h.valueCount = int64(float64(h.valueCount) * ratio) + } + h.sum = int64(float64(h.sum) * ratio) + h.sumOfSquares = h.sumOfSquares * ratio +} + +// New creates a new histogram. +func (h *histogram) New() timeseries.Observable { + r := new(histogram) + r.Clear() + return r +} + +func (h *histogram) String() string { + return fmt.Sprintf("%d, %f, %d, %d, %v", + h.sum, h.sumOfSquares, h.value, h.valueCount, h.buckets) +} + +// round returns the closest int64 to the argument +func round(in float64) int64 { + return int64(math.Floor(in + 0.5)) +} + +// bucketBoundary returns the first value in the bucket. +func bucketBoundary(bucket uint8) int64 { + if bucket == 0 { + return 0 + } + return 1 << bucket +} + +// bucketData holds data about a specific bucket for use in distTmpl. +type bucketData struct { + Lower, Upper int64 + N int64 + Pct, CumulativePct float64 + GraphWidth int +} + +// data holds data about a Distribution for use in distTmpl. +type data struct { + Buckets []*bucketData + Count, Median int64 + Mean, StandardDeviation float64 +} + +// maxHTMLBarWidth is the maximum width of the HTML bar for visualizing buckets. +const maxHTMLBarWidth = 350.0 + +// newData returns data representing h for use in distTmpl. +func (h *histogram) newData() *data { + // Force the allocation of buckets to simplify the rendering implementation + h.allocateBuckets() + // We scale the bars on the right so that the largest bar is + // maxHTMLBarWidth pixels in width. + maxBucket := int64(0) + for _, n := range h.buckets { + if n > maxBucket { + maxBucket = n + } + } + total := h.total() + barsizeMult := maxHTMLBarWidth / float64(maxBucket) + var pctMult float64 + if total == 0 { + pctMult = 1.0 + } else { + pctMult = 100.0 / float64(total) + } + + buckets := make([]*bucketData, len(h.buckets)) + runningTotal := int64(0) + for i, n := range h.buckets { + if n == 0 { + continue + } + runningTotal += n + var upperBound int64 + if i < bucketCount-1 { + upperBound = bucketBoundary(uint8(i + 1)) + } else { + upperBound = math.MaxInt64 + } + buckets[i] = &bucketData{ + Lower: bucketBoundary(uint8(i)), + Upper: upperBound, + N: n, + Pct: float64(n) * pctMult, + CumulativePct: float64(runningTotal) * pctMult, + GraphWidth: int(float64(n) * barsizeMult), + } + } + return &data{ + Buckets: buckets, + Count: total, + Median: h.median(), + Mean: h.average(), + StandardDeviation: h.standardDeviation(), + } +} + +func (h *histogram) html() template.HTML { + buf := new(bytes.Buffer) + if err := distTmpl.Execute(buf, h.newData()); err != nil { + buf.Reset() + log.Printf("net/trace: couldn't execute template: %v", err) + } + return template.HTML(buf.String()) +} + +// Input: data +var distTmpl = template.Must(template.New("distTmpl").Parse(` + + + + + + + +
      Count: {{.Count}}Mean: {{printf "%.0f" .Mean}}StdDev: {{printf "%.0f" .StandardDeviation}}Median: {{.Median}}
      +
      + +{{range $b := .Buckets}} +{{if $b}} + + + + + + + + + +{{end}} +{{end}} +
      [{{.Lower}},{{.Upper}}){{.N}}{{printf "%#.3f" .Pct}}%{{printf "%#.3f" .CumulativePct}}%
      +`)) diff --git a/Godeps/_workspace/src/golang.org/x/net/trace/trace.go b/Godeps/_workspace/src/golang.org/x/net/trace/trace.go new file mode 100644 index 0000000000..c87290b76e --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/net/trace/trace.go @@ -0,0 +1,1057 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package trace implements tracing of requests and long-lived objects. +It exports HTTP interfaces on /debug/requests and /debug/events. + +A trace.Trace provides tracing for short-lived objects, usually requests. +A request handler might be implemented like this: + + func fooHandler(w http.ResponseWriter, req *http.Request) { + tr := trace.New("mypkg.Foo", req.URL.Path) + defer tr.Finish() + ... + tr.LazyPrintf("some event %q happened", str) + ... + if err := somethingImportant(); err != nil { + tr.LazyPrintf("somethingImportant failed: %v", err) + tr.SetError() + } + } + +The /debug/requests HTTP endpoint organizes the traces by family, +errors, and duration. It also provides histogram of request duration +for each family. + +A trace.EventLog provides tracing for long-lived objects, such as RPC +connections. + + // A Fetcher fetches URL paths for a single domain. + type Fetcher struct { + domain string + events trace.EventLog + } + + func NewFetcher(domain string) *Fetcher { + return &Fetcher{ + domain, + trace.NewEventLog("mypkg.Fetcher", domain), + } + } + + func (f *Fetcher) Fetch(path string) (string, error) { + resp, err := http.Get("http://" + f.domain + "/" + path) + if err != nil { + f.events.Errorf("Get(%q) = %v", path, err) + return "", err + } + f.events.Printf("Get(%q) = %s", path, resp.Status) + ... + } + + func (f *Fetcher) Close() error { + f.events.Finish() + return nil + } + +The /debug/events HTTP endpoint organizes the event logs by family and +by time since the last error. The expanded view displays recent log +entries and the log's call stack. +*/ +package trace + +import ( + "bytes" + "fmt" + "html/template" + "io" + "log" + "net" + "net/http" + "runtime" + "sort" + "strconv" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/internal/timeseries" +) + +// DebugUseAfterFinish controls whether to debug uses of Trace values after finishing. +// FOR DEBUGGING ONLY. This will slow down the program. +var DebugUseAfterFinish = false + +// AuthRequest determines whether a specific request is permitted to load the +// /debug/requests or /debug/events pages. +// +// It returns two bools; the first indicates whether the page may be viewed at all, +// and the second indicates whether sensitive events will be shown. +// +// AuthRequest may be replaced by a program to customise its authorisation requirements. +// +// The default AuthRequest function returns (true, true) iff the request comes from localhost/127.0.0.1/[::1]. +var AuthRequest = func(req *http.Request) (any, sensitive bool) { + host, _, err := net.SplitHostPort(req.RemoteAddr) + switch { + case err != nil: // Badly formed address; fail closed. + return false, false + case host == "localhost" || host == "127.0.0.1" || host == "::1": + return true, true + default: + return false, false + } +} + +func init() { + http.HandleFunc("/debug/requests", func(w http.ResponseWriter, req *http.Request) { + any, sensitive := AuthRequest(req) + if !any { + http.Error(w, "not allowed", http.StatusUnauthorized) + return + } + Render(w, req, sensitive) + }) + http.HandleFunc("/debug/events", func(w http.ResponseWriter, req *http.Request) { + any, sensitive := AuthRequest(req) + if !any { + http.Error(w, "not allowed", http.StatusUnauthorized) + return + } + RenderEvents(w, req, sensitive) + }) +} + +// Render renders the HTML page typically served at /debug/requests. +// It does not do any auth checking; see AuthRequest for the default auth check +// used by the handler registered on http.DefaultServeMux. +// req may be nil. +func Render(w io.Writer, req *http.Request, sensitive bool) { + data := &struct { + Families []string + ActiveTraceCount map[string]int + CompletedTraces map[string]*family + + // Set when a bucket has been selected. + Traces traceList + Family string + Bucket int + Expanded bool + Traced bool + Active bool + ShowSensitive bool // whether to show sensitive events + + Histogram template.HTML + HistogramWindow string // e.g. "last minute", "last hour", "all time" + + // If non-zero, the set of traces is a partial set, + // and this is the total number. + Total int + }{ + CompletedTraces: completedTraces, + } + + data.ShowSensitive = sensitive + if req != nil { + // Allow show_sensitive=0 to force hiding of sensitive data for testing. + // This only goes one way; you can't use show_sensitive=1 to see things. + if req.FormValue("show_sensitive") == "0" { + data.ShowSensitive = false + } + + if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { + data.Expanded = exp + } + if exp, err := strconv.ParseBool(req.FormValue("rtraced")); err == nil { + data.Traced = exp + } + } + + completedMu.RLock() + data.Families = make([]string, 0, len(completedTraces)) + for fam, _ := range completedTraces { + data.Families = append(data.Families, fam) + } + completedMu.RUnlock() + sort.Strings(data.Families) + + // We are careful here to minimize the time spent locking activeMu, + // since that lock is required every time an RPC starts and finishes. + data.ActiveTraceCount = make(map[string]int, len(data.Families)) + activeMu.RLock() + for fam, s := range activeTraces { + data.ActiveTraceCount[fam] = s.Len() + } + activeMu.RUnlock() + + var ok bool + data.Family, data.Bucket, ok = parseArgs(req) + switch { + case !ok: + // No-op + case data.Bucket == -1: + data.Active = true + n := data.ActiveTraceCount[data.Family] + data.Traces = getActiveTraces(data.Family) + if len(data.Traces) < n { + data.Total = n + } + case data.Bucket < bucketsPerFamily: + if b := lookupBucket(data.Family, data.Bucket); b != nil { + data.Traces = b.Copy(data.Traced) + } + default: + if f := getFamily(data.Family, false); f != nil { + var obs timeseries.Observable + f.LatencyMu.RLock() + switch o := data.Bucket - bucketsPerFamily; o { + case 0: + obs = f.Latency.Minute() + data.HistogramWindow = "last minute" + case 1: + obs = f.Latency.Hour() + data.HistogramWindow = "last hour" + case 2: + obs = f.Latency.Total() + data.HistogramWindow = "all time" + } + f.LatencyMu.RUnlock() + if obs != nil { + data.Histogram = obs.(*histogram).html() + } + } + } + + if data.Traces != nil { + defer data.Traces.Free() + sort.Sort(data.Traces) + } + + completedMu.RLock() + defer completedMu.RUnlock() + if err := pageTmpl.ExecuteTemplate(w, "Page", data); err != nil { + log.Printf("net/trace: Failed executing template: %v", err) + } +} + +func parseArgs(req *http.Request) (fam string, b int, ok bool) { + if req == nil { + return "", 0, false + } + fam, bStr := req.FormValue("fam"), req.FormValue("b") + if fam == "" || bStr == "" { + return "", 0, false + } + b, err := strconv.Atoi(bStr) + if err != nil || b < -1 { + return "", 0, false + } + + return fam, b, true +} + +func lookupBucket(fam string, b int) *traceBucket { + f := getFamily(fam, false) + if f == nil || b < 0 || b >= len(f.Buckets) { + return nil + } + return f.Buckets[b] +} + +type contextKeyT string + +var contextKey = contextKeyT("golang.org/x/net/trace.Trace") + +// NewContext returns a copy of the parent context +// and associates it with a Trace. +func NewContext(ctx context.Context, tr Trace) context.Context { + return context.WithValue(ctx, contextKey, tr) +} + +// FromContext returns the Trace bound to the context, if any. +func FromContext(ctx context.Context) (tr Trace, ok bool) { + tr, ok = ctx.Value(contextKey).(Trace) + return +} + +// Trace represents an active request. +type Trace interface { + // LazyLog adds x to the event log. It will be evaluated each time the + // /debug/requests page is rendered. Any memory referenced by x will be + // pinned until the trace is finished and later discarded. + LazyLog(x fmt.Stringer, sensitive bool) + + // LazyPrintf evaluates its arguments with fmt.Sprintf each time the + // /debug/requests page is rendered. Any memory referenced by a will be + // pinned until the trace is finished and later discarded. + LazyPrintf(format string, a ...interface{}) + + // SetError declares that this trace resulted in an error. + SetError() + + // SetRecycler sets a recycler for the trace. + // f will be called for each event passed to LazyLog at a time when + // it is no longer required, whether while the trace is still active + // and the event is discarded, or when a completed trace is discarded. + SetRecycler(f func(interface{})) + + // SetTraceInfo sets the trace info for the trace. + // This is currently unused. + SetTraceInfo(traceID, spanID uint64) + + // SetMaxEvents sets the maximum number of events that will be stored + // in the trace. This has no effect if any events have already been + // added to the trace. + SetMaxEvents(m int) + + // Finish declares that this trace is complete. + // The trace should not be used after calling this method. + Finish() +} + +type lazySprintf struct { + format string + a []interface{} +} + +func (l *lazySprintf) String() string { + return fmt.Sprintf(l.format, l.a...) +} + +// New returns a new Trace with the specified family and title. +func New(family, title string) Trace { + tr := newTrace() + tr.ref() + tr.Family, tr.Title = family, title + tr.Start = time.Now() + tr.events = make([]event, 0, maxEventsPerTrace) + + activeMu.RLock() + s := activeTraces[tr.Family] + activeMu.RUnlock() + if s == nil { + activeMu.Lock() + s = activeTraces[tr.Family] // check again + if s == nil { + s = new(traceSet) + activeTraces[tr.Family] = s + } + activeMu.Unlock() + } + s.Add(tr) + + // Trigger allocation of the completed trace structure for this family. + // This will cause the family to be present in the request page during + // the first trace of this family. We don't care about the return value, + // nor is there any need for this to run inline, so we execute it in its + // own goroutine, but only if the family isn't allocated yet. + completedMu.RLock() + if _, ok := completedTraces[tr.Family]; !ok { + go allocFamily(tr.Family) + } + completedMu.RUnlock() + + return tr +} + +func (tr *trace) Finish() { + tr.Elapsed = time.Now().Sub(tr.Start) + if DebugUseAfterFinish { + buf := make([]byte, 4<<10) // 4 KB should be enough + n := runtime.Stack(buf, false) + tr.finishStack = buf[:n] + } + + activeMu.RLock() + m := activeTraces[tr.Family] + activeMu.RUnlock() + m.Remove(tr) + + f := getFamily(tr.Family, true) + for _, b := range f.Buckets { + if b.Cond.match(tr) { + b.Add(tr) + } + } + // Add a sample of elapsed time as microseconds to the family's timeseries + h := new(histogram) + h.addMeasurement(tr.Elapsed.Nanoseconds() / 1e3) + f.LatencyMu.Lock() + f.Latency.Add(h) + f.LatencyMu.Unlock() + + tr.unref() // matches ref in New +} + +const ( + bucketsPerFamily = 9 + tracesPerBucket = 10 + maxActiveTraces = 20 // Maximum number of active traces to show. + maxEventsPerTrace = 10 + numHistogramBuckets = 38 +) + +var ( + // The active traces. + activeMu sync.RWMutex + activeTraces = make(map[string]*traceSet) // family -> traces + + // Families of completed traces. + completedMu sync.RWMutex + completedTraces = make(map[string]*family) // family -> traces +) + +type traceSet struct { + mu sync.RWMutex + m map[*trace]bool + + // We could avoid the entire map scan in FirstN by having a slice of all the traces + // ordered by start time, and an index into that from the trace struct, with a periodic + // repack of the slice after enough traces finish; we could also use a skip list or similar. + // However, that would shift some of the expense from /debug/requests time to RPC time, + // which is probably the wrong trade-off. +} + +func (ts *traceSet) Len() int { + ts.mu.RLock() + defer ts.mu.RUnlock() + return len(ts.m) +} + +func (ts *traceSet) Add(tr *trace) { + ts.mu.Lock() + if ts.m == nil { + ts.m = make(map[*trace]bool) + } + ts.m[tr] = true + ts.mu.Unlock() +} + +func (ts *traceSet) Remove(tr *trace) { + ts.mu.Lock() + delete(ts.m, tr) + ts.mu.Unlock() +} + +// FirstN returns the first n traces ordered by time. +func (ts *traceSet) FirstN(n int) traceList { + ts.mu.RLock() + defer ts.mu.RUnlock() + + if n > len(ts.m) { + n = len(ts.m) + } + trl := make(traceList, 0, n) + + // Fast path for when no selectivity is needed. + if n == len(ts.m) { + for tr := range ts.m { + tr.ref() + trl = append(trl, tr) + } + sort.Sort(trl) + return trl + } + + // Pick the oldest n traces. + // This is inefficient. See the comment in the traceSet struct. + for tr := range ts.m { + // Put the first n traces into trl in the order they occur. + // When we have n, sort trl, and thereafter maintain its order. + if len(trl) < n { + tr.ref() + trl = append(trl, tr) + if len(trl) == n { + // This is guaranteed to happen exactly once during this loop. + sort.Sort(trl) + } + continue + } + if tr.Start.After(trl[n-1].Start) { + continue + } + + // Find where to insert this one. + tr.ref() + i := sort.Search(n, func(i int) bool { return trl[i].Start.After(tr.Start) }) + trl[n-1].unref() + copy(trl[i+1:], trl[i:]) + trl[i] = tr + } + + return trl +} + +func getActiveTraces(fam string) traceList { + activeMu.RLock() + s := activeTraces[fam] + activeMu.RUnlock() + if s == nil { + return nil + } + return s.FirstN(maxActiveTraces) +} + +func getFamily(fam string, allocNew bool) *family { + completedMu.RLock() + f := completedTraces[fam] + completedMu.RUnlock() + if f == nil && allocNew { + f = allocFamily(fam) + } + return f +} + +func allocFamily(fam string) *family { + completedMu.Lock() + defer completedMu.Unlock() + f := completedTraces[fam] + if f == nil { + f = newFamily() + completedTraces[fam] = f + } + return f +} + +// family represents a set of trace buckets and associated latency information. +type family struct { + // traces may occur in multiple buckets. + Buckets [bucketsPerFamily]*traceBucket + + // latency time series + LatencyMu sync.RWMutex + Latency *timeseries.MinuteHourSeries +} + +func newFamily() *family { + return &family{ + Buckets: [bucketsPerFamily]*traceBucket{ + {Cond: minCond(0)}, + {Cond: minCond(50 * time.Millisecond)}, + {Cond: minCond(100 * time.Millisecond)}, + {Cond: minCond(200 * time.Millisecond)}, + {Cond: minCond(500 * time.Millisecond)}, + {Cond: minCond(1 * time.Second)}, + {Cond: minCond(10 * time.Second)}, + {Cond: minCond(100 * time.Second)}, + {Cond: errorCond{}}, + }, + Latency: timeseries.NewMinuteHourSeries(func() timeseries.Observable { return new(histogram) }), + } +} + +// traceBucket represents a size-capped bucket of historic traces, +// along with a condition for a trace to belong to the bucket. +type traceBucket struct { + Cond cond + + // Ring buffer implementation of a fixed-size FIFO queue. + mu sync.RWMutex + buf [tracesPerBucket]*trace + start int // < tracesPerBucket + length int // <= tracesPerBucket +} + +func (b *traceBucket) Add(tr *trace) { + b.mu.Lock() + defer b.mu.Unlock() + + i := b.start + b.length + if i >= tracesPerBucket { + i -= tracesPerBucket + } + if b.length == tracesPerBucket { + // "Remove" an element from the bucket. + b.buf[i].unref() + b.start++ + if b.start == tracesPerBucket { + b.start = 0 + } + } + b.buf[i] = tr + if b.length < tracesPerBucket { + b.length++ + } + tr.ref() +} + +// Copy returns a copy of the traces in the bucket. +// If tracedOnly is true, only the traces with trace information will be returned. +// The logs will be ref'd before returning; the caller should call +// the Free method when it is done with them. +// TODO(dsymonds): keep track of traced requests in separate buckets. +func (b *traceBucket) Copy(tracedOnly bool) traceList { + b.mu.RLock() + defer b.mu.RUnlock() + + trl := make(traceList, 0, b.length) + for i, x := 0, b.start; i < b.length; i++ { + tr := b.buf[x] + if !tracedOnly || tr.spanID != 0 { + tr.ref() + trl = append(trl, tr) + } + x++ + if x == b.length { + x = 0 + } + } + return trl +} + +func (b *traceBucket) Empty() bool { + b.mu.RLock() + defer b.mu.RUnlock() + return b.length == 0 +} + +// cond represents a condition on a trace. +type cond interface { + match(t *trace) bool + String() string +} + +type minCond time.Duration + +func (m minCond) match(t *trace) bool { return t.Elapsed >= time.Duration(m) } +func (m minCond) String() string { return fmt.Sprintf("≥%gs", time.Duration(m).Seconds()) } + +type errorCond struct{} + +func (e errorCond) match(t *trace) bool { return t.IsError } +func (e errorCond) String() string { return "errors" } + +type traceList []*trace + +// Free calls unref on each element of the list. +func (trl traceList) Free() { + for _, t := range trl { + t.unref() + } +} + +// traceList may be sorted in reverse chronological order. +func (trl traceList) Len() int { return len(trl) } +func (trl traceList) Less(i, j int) bool { return trl[i].Start.After(trl[j].Start) } +func (trl traceList) Swap(i, j int) { trl[i], trl[j] = trl[j], trl[i] } + +// An event is a timestamped log entry in a trace. +type event struct { + When time.Time + Elapsed time.Duration // since previous event in trace + NewDay bool // whether this event is on a different day to the previous event + Recyclable bool // whether this event was passed via LazyLog + What interface{} // string or fmt.Stringer + Sensitive bool // whether this event contains sensitive information +} + +// WhenString returns a string representation of the elapsed time of the event. +// It will include the date if midnight was crossed. +func (e event) WhenString() string { + if e.NewDay { + return e.When.Format("2006/01/02 15:04:05.000000") + } + return e.When.Format("15:04:05.000000") +} + +// discarded represents a number of discarded events. +// It is stored as *discarded to make it easier to update in-place. +type discarded int + +func (d *discarded) String() string { + return fmt.Sprintf("(%d events discarded)", int(*d)) +} + +// trace represents an active or complete request, +// either sent or received by this program. +type trace struct { + // Family is the top-level grouping of traces to which this belongs. + Family string + + // Title is the title of this trace. + Title string + + // Timing information. + Start time.Time + Elapsed time.Duration // zero while active + + // Trace information if non-zero. + traceID uint64 + spanID uint64 + + // Whether this trace resulted in an error. + IsError bool + + // Append-only sequence of events (modulo discards). + mu sync.RWMutex + events []event + + refs int32 // how many buckets this is in + recycler func(interface{}) + disc discarded // scratch space to avoid allocation + + finishStack []byte // where finish was called, if DebugUseAfterFinish is set +} + +func (tr *trace) reset() { + // Clear all but the mutex. Mutexes may not be copied, even when unlocked. + tr.Family = "" + tr.Title = "" + tr.Start = time.Time{} + tr.Elapsed = 0 + tr.traceID = 0 + tr.spanID = 0 + tr.IsError = false + tr.events = nil + tr.refs = 0 + tr.recycler = nil + tr.disc = 0 + tr.finishStack = nil +} + +// delta returns the elapsed time since the last event or the trace start, +// and whether it spans midnight. +// L >= tr.mu +func (tr *trace) delta(t time.Time) (time.Duration, bool) { + if len(tr.events) == 0 { + return t.Sub(tr.Start), false + } + prev := tr.events[len(tr.events)-1].When + return t.Sub(prev), prev.Day() != t.Day() +} + +func (tr *trace) addEvent(x interface{}, recyclable, sensitive bool) { + if DebugUseAfterFinish && tr.finishStack != nil { + buf := make([]byte, 4<<10) // 4 KB should be enough + n := runtime.Stack(buf, false) + log.Printf("net/trace: trace used after finish:\nFinished at:\n%s\nUsed at:\n%s", tr.finishStack, buf[:n]) + } + + /* + NOTE TO DEBUGGERS + + If you are here because your program panicked in this code, + it is almost definitely the fault of code using this package, + and very unlikely to be the fault of this code. + + The most likely scenario is that some code elsewhere is using + a requestz.Trace after its Finish method is called. + You can temporarily set the DebugUseAfterFinish var + to help discover where that is; do not leave that var set, + since it makes this package much less efficient. + */ + + e := event{When: time.Now(), What: x, Recyclable: recyclable, Sensitive: sensitive} + tr.mu.Lock() + e.Elapsed, e.NewDay = tr.delta(e.When) + if len(tr.events) < cap(tr.events) { + tr.events = append(tr.events, e) + } else { + // Discard the middle events. + di := int((cap(tr.events) - 1) / 2) + if d, ok := tr.events[di].What.(*discarded); ok { + (*d)++ + } else { + // disc starts at two to count for the event it is replacing, + // plus the next one that we are about to drop. + tr.disc = 2 + if tr.recycler != nil && tr.events[di].Recyclable { + go tr.recycler(tr.events[di].What) + } + tr.events[di].What = &tr.disc + } + // The timestamp of the discarded meta-event should be + // the time of the last event it is representing. + tr.events[di].When = tr.events[di+1].When + + if tr.recycler != nil && tr.events[di+1].Recyclable { + go tr.recycler(tr.events[di+1].What) + } + copy(tr.events[di+1:], tr.events[di+2:]) + tr.events[cap(tr.events)-1] = e + } + tr.mu.Unlock() +} + +func (tr *trace) LazyLog(x fmt.Stringer, sensitive bool) { + tr.addEvent(x, true, sensitive) +} + +func (tr *trace) LazyPrintf(format string, a ...interface{}) { + tr.addEvent(&lazySprintf{format, a}, false, false) +} + +func (tr *trace) SetError() { tr.IsError = true } + +func (tr *trace) SetRecycler(f func(interface{})) { + tr.recycler = f +} + +func (tr *trace) SetTraceInfo(traceID, spanID uint64) { + tr.traceID, tr.spanID = traceID, spanID +} + +func (tr *trace) SetMaxEvents(m int) { + // Always keep at least three events: first, discarded count, last. + if len(tr.events) == 0 && m > 3 { + tr.events = make([]event, 0, m) + } +} + +func (tr *trace) ref() { + atomic.AddInt32(&tr.refs, 1) +} + +func (tr *trace) unref() { + if atomic.AddInt32(&tr.refs, -1) == 0 { + if tr.recycler != nil { + // freeTrace clears tr, so we hold tr.recycler and tr.events here. + go func(f func(interface{}), es []event) { + for _, e := range es { + if e.Recyclable { + f(e.What) + } + } + }(tr.recycler, tr.events) + } + + freeTrace(tr) + } +} + +func (tr *trace) When() string { + return tr.Start.Format("2006/01/02 15:04:05.000000") +} + +func (tr *trace) ElapsedTime() string { + t := tr.Elapsed + if t == 0 { + // Active trace. + t = time.Since(tr.Start) + } + return fmt.Sprintf("%.6f", t.Seconds()) +} + +func (tr *trace) Events() []event { + tr.mu.RLock() + defer tr.mu.RUnlock() + return tr.events +} + +var traceFreeList = make(chan *trace, 1000) // TODO(dsymonds): Use sync.Pool? + +// newTrace returns a trace ready to use. +func newTrace() *trace { + select { + case tr := <-traceFreeList: + return tr + default: + return new(trace) + } +} + +// freeTrace adds tr to traceFreeList if there's room. +// This is non-blocking. +func freeTrace(tr *trace) { + if DebugUseAfterFinish { + return // never reuse + } + tr.reset() + select { + case traceFreeList <- tr: + default: + } +} + +func elapsed(d time.Duration) string { + b := []byte(fmt.Sprintf("%.6f", d.Seconds())) + + // For subsecond durations, blank all zeros before decimal point, + // and all zeros between the decimal point and the first non-zero digit. + if d < time.Second { + dot := bytes.IndexByte(b, '.') + for i := 0; i < dot; i++ { + b[i] = ' ' + } + for i := dot + 1; i < len(b); i++ { + if b[i] == '0' { + b[i] = ' ' + } else { + break + } + } + } + + return string(b) +} + +var pageTmpl = template.Must(template.New("Page").Funcs(template.FuncMap{ + "elapsed": elapsed, + "add": func(a, b int) int { return a + b }, +}).Parse(pageHTML)) + +const pageHTML = ` +{{template "Prolog" .}} +{{template "StatusTable" .}} +{{template "Epilog" .}} + +{{define "Prolog"}} + + + /debug/requests + + + + +

      /debug/requests

      +{{end}} {{/* end of Prolog */}} + +{{define "StatusTable"}} + + {{range $fam := .Families}} + + + + {{$n := index $.ActiveTraceCount $fam}} + + + {{$f := index $.CompletedTraces $fam}} + {{range $i, $b := $f.Buckets}} + {{$empty := $b.Empty}} + + {{end}} + + {{$nb := len $f.Buckets}} + + + + + + {{end}} +
      {{$fam}} + {{if $n}}{{end}} + [{{$n}} active] + {{if $n}}{{end}} + + {{if not $empty}}{{end}} + [{{.Cond}}] + {{if not $empty}}{{end}} + + [minute] + + [hour] + + [total] +
      +{{end}} {{/* end of StatusTable */}} + +{{define "Epilog"}} +{{if $.Traces}} +
      +

      Family: {{$.Family}}

      + +{{if or $.Expanded $.Traced}} + [Normal/Summary] +{{else}} + [Normal/Summary] +{{end}} + +{{if or (not $.Expanded) $.Traced}} + [Normal/Expanded] +{{else}} + [Normal/Expanded] +{{end}} + +{{if not $.Active}} + {{if or $.Expanded (not $.Traced)}} + [Traced/Summary] + {{else}} + [Traced/Summary] + {{end}} + {{if or (not $.Expanded) (not $.Traced)}} + [Traced/Expanded] + {{else}} + [Traced/Expanded] + {{end}} +{{end}} + +{{if $.Total}} +

      Showing {{len $.Traces}} of {{$.Total}} traces.

      +{{end}} + + + + + {{range $tr := $.Traces}} + + + + + {{/* TODO: include traceID/spanID */}} + + {{if $.Expanded}} + {{range $tr.Events}} + + + + + + {{end}} + {{end}} + {{end}} +
      + {{if $.Active}}Active{{else}}Completed{{end}} Requests +
      WhenElapsed (s)
      {{$tr.When}}{{$tr.ElapsedTime}}{{$tr.Title}}
      {{.WhenString}}{{elapsed .Elapsed}}{{if or $.ShowSensitive (not .Sensitive)}}... {{.What}}{{else}}[redacted]{{end}}
      +{{end}} {{/* if $.Traces */}} + +{{if $.Histogram}} +

      Latency (µs) of {{$.Family}} over {{$.HistogramWindow}}

      +{{$.Histogram}} +{{end}} {{/* if $.Histogram */}} + + + +{{end}} {{/* end of Epilog */}} +` diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/.travis.yml b/Godeps/_workspace/src/golang.org/x/oauth2/.travis.yml deleted file mode 100644 index a035125c35..0000000000 --- a/Godeps/_workspace/src/golang.org/x/oauth2/.travis.yml +++ /dev/null @@ -1,14 +0,0 @@ -language: go - -go: - - 1.3 - - 1.4 - -install: - - export GOPATH="$HOME/gopath" - - mkdir -p "$GOPATH/src/golang.org/x" - - mv "$TRAVIS_BUILD_DIR" "$GOPATH/src/golang.org/x/oauth2" - - go get -v -t -d golang.org/x/oauth2/... - -script: - - go test -v golang.org/x/oauth2/... diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/AUTHORS b/Godeps/_workspace/src/golang.org/x/oauth2/AUTHORS deleted file mode 100644 index 15167cd746..0000000000 --- a/Godeps/_workspace/src/golang.org/x/oauth2/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/CONTRIBUTING.md b/Godeps/_workspace/src/golang.org/x/oauth2/CONTRIBUTING.md deleted file mode 100644 index 46aa2b12dd..0000000000 --- a/Godeps/_workspace/src/golang.org/x/oauth2/CONTRIBUTING.md +++ /dev/null @@ -1,31 +0,0 @@ -# Contributing to Go - -Go is an open source project. - -It is the work of hundreds of contributors. We appreciate your help! - - -## Filing issues - -When [filing an issue](https://github.com/golang/oauth2/issues), make sure to answer these five questions: - -1. What version of Go are you using (`go version`)? -2. What operating system and processor architecture are you using? -3. What did you do? -4. What did you expect to see? -5. What did you see instead? - -General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. -The gophers there will answer or ask you to file an issue if you've tripped over a bug. - -## Contributing code - -Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) -before sending patches. - -**We do not accept GitHub pull requests** -(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review). - -Unless otherwise noted, the Go source files are distributed under -the BSD-style license found in the LICENSE file. - diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/CONTRIBUTORS b/Godeps/_workspace/src/golang.org/x/oauth2/CONTRIBUTORS deleted file mode 100644 index 1c4577e968..0000000000 --- a/Godeps/_workspace/src/golang.org/x/oauth2/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/LICENSE b/Godeps/_workspace/src/golang.org/x/oauth2/LICENSE deleted file mode 100644 index d02f24fd52..0000000000 --- a/Godeps/_workspace/src/golang.org/x/oauth2/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The oauth2 Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/README.md b/Godeps/_workspace/src/golang.org/x/oauth2/README.md deleted file mode 100644 index a5afeca221..0000000000 --- a/Godeps/_workspace/src/golang.org/x/oauth2/README.md +++ /dev/null @@ -1,64 +0,0 @@ -# OAuth2 for Go - -[![Build Status](https://travis-ci.org/golang/oauth2.svg?branch=master)](https://travis-ci.org/golang/oauth2) - -oauth2 package contains a client implementation for OAuth 2.0 spec. - -## Installation - -~~~~ -go get golang.org/x/oauth2 -~~~~ - -See godoc for further documentation and examples. - -* [godoc.org/golang.org/x/oauth2](http://godoc.org/golang.org/x/oauth2) -* [godoc.org/golang.org/x/oauth2/google](http://godoc.org/golang.org/x/oauth2/google) - - -## App Engine - -In change 96e89be (March 2015) we removed the `oauth2.Context2` type in favor -of the [`context.Context`](https://golang.org/x/net/context#Context) type from -the `golang.org/x/net/context` package - -This means its no longer possible to use the "Classic App Engine" -`appengine.Context` type with the `oauth2` package. (You're using -Classic App Engine if you import the package `"appengine"`.) - -To work around this, you may use the new `"google.golang.org/appengine"` -package. This package has almost the same API as the `"appengine"` package, -but it can be fetched with `go get` and used on "Managed VMs" and well as -Classic App Engine. - -See the [new `appengine` package's readme](https://github.com/golang/appengine#updating-a-go-app-engine-app) -for information on updating your app. - -If you don't want to update your entire app to use the new App Engine packages, -you may use both sets of packages in parallel, using only the new packages -with the `oauth2` package. - - import ( - "golang.org/x/net/context" - "golang.org/x/oauth2" - "golang.org/x/oauth2/google" - newappengine "google.golang.org/appengine" - newurlftech "google.golang.org/urlfetch" - - "appengine" - ) - - func handler(w http.ResponseWriter, r *http.Request) { - var c appengine.Context = appengine.NewContext(r) - c.Infof("Logging a message with the old package") - - var ctx context.Context = newappengine.NewContext(r) - client := &http.Client{ - Transport: &oauth2.Transport{ - Source: google.AppEngineTokenSource(ctx, "scope"), - Base: &newurlfetch.Transport{Context: ctx}, - }, - } - client.Get("...") - } - diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/client_appengine.go b/Godeps/_workspace/src/golang.org/x/oauth2/client_appengine.go deleted file mode 100644 index 10aaf917d6..0000000000 --- a/Godeps/_workspace/src/golang.org/x/oauth2/client_appengine.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2014 The oauth2 Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build appengine appenginevm - -// App Engine hooks. - -package oauth2 - -import ( - "net/http" - - "golang.org/x/net/context" - "google.golang.org/appengine/urlfetch" -) - -func init() { - registerContextClientFunc(contextClientAppEngine) -} - -func contextClientAppEngine(ctx context.Context) (*http.Client, error) { - return urlfetch.Client(ctx), nil -} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/example_test.go b/Godeps/_workspace/src/golang.org/x/oauth2/example_test.go deleted file mode 100644 index 8be2788556..0000000000 --- a/Godeps/_workspace/src/golang.org/x/oauth2/example_test.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2014 The oauth2 Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package oauth2_test - -import ( - "fmt" - "log" - - "golang.org/x/oauth2" -) - -func ExampleConfig() { - conf := &oauth2.Config{ - ClientID: "YOUR_CLIENT_ID", - ClientSecret: "YOUR_CLIENT_SECRET", - Scopes: []string{"SCOPE1", "SCOPE2"}, - Endpoint: oauth2.Endpoint{ - AuthURL: "https://provider.com/o/oauth2/auth", - TokenURL: "https://provider.com/o/oauth2/token", - }, - } - - // Redirect user to consent page to ask for permission - // for the scopes specified above. - url := conf.AuthCodeURL("state", oauth2.AccessTypeOffline) - fmt.Printf("Visit the URL for the auth dialog: %v", url) - - // Use the authorization code that is pushed to the redirect URL. - // NewTransportWithCode will do the handshake to retrieve - // an access token and initiate a Transport that is - // authorized and authenticated by the retrieved token. - var code string - if _, err := fmt.Scan(&code); err != nil { - log.Fatal(err) - } - tok, err := conf.Exchange(oauth2.NoContext, code) - if err != nil { - log.Fatal(err) - } - - client := conf.Client(oauth2.NoContext, tok) - client.Get("...") -} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/facebook/facebook.go b/Godeps/_workspace/src/golang.org/x/oauth2/facebook/facebook.go deleted file mode 100644 index 9c816ff805..0000000000 --- a/Godeps/_workspace/src/golang.org/x/oauth2/facebook/facebook.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2015 The oauth2 Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package facebook provides constants for using OAuth2 to access Facebook. -package facebook - -import ( - "golang.org/x/oauth2" -) - -// Endpoint is Facebook's OAuth 2.0 endpoint. -var Endpoint = oauth2.Endpoint{ - AuthURL: "https://www.facebook.com/dialog/oauth", - TokenURL: "https://graph.facebook.com/oauth/access_token", -} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/github/github.go b/Godeps/_workspace/src/golang.org/x/oauth2/github/github.go deleted file mode 100644 index 82ca623dd1..0000000000 --- a/Godeps/_workspace/src/golang.org/x/oauth2/github/github.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2014 The oauth2 Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package github provides constants for using OAuth2 to access Github. -package github - -import ( - "golang.org/x/oauth2" -) - -// Endpoint is Github's OAuth 2.0 endpoint. -var Endpoint = oauth2.Endpoint{ - AuthURL: "https://github.com/login/oauth/authorize", - TokenURL: "https://github.com/login/oauth/access_token", -} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/google/appengine.go b/Godeps/_workspace/src/golang.org/x/oauth2/google/appengine.go deleted file mode 100644 index 65dc347314..0000000000 --- a/Godeps/_workspace/src/golang.org/x/oauth2/google/appengine.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2014 The oauth2 Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package google - -import ( - "sort" - "strings" - "sync" - "time" - - "golang.org/x/net/context" - "golang.org/x/oauth2" -) - -// Set at init time by appengine_hook.go. If nil, we're not on App Engine. -var appengineTokenFunc func(c context.Context, scopes ...string) (token string, expiry time.Time, err error) - -// AppEngineTokenSource returns a token source that fetches tokens -// issued to the current App Engine application's service account. -// If you are implementing a 3-legged OAuth 2.0 flow on App Engine -// that involves user accounts, see oauth2.Config instead. -// -// The provided context must have come from appengine.NewContext. -func AppEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource { - if appengineTokenFunc == nil { - panic("google: AppEngineTokenSource can only be used on App Engine.") - } - scopes := append([]string{}, scope...) - sort.Strings(scopes) - return &appEngineTokenSource{ - ctx: ctx, - scopes: scopes, - key: strings.Join(scopes, " "), - } -} - -// aeTokens helps the fetched tokens to be reused until their expiration. -var ( - aeTokensMu sync.Mutex - aeTokens = make(map[string]*tokenLock) // key is space-separated scopes -) - -type tokenLock struct { - mu sync.Mutex // guards t; held while fetching or updating t - t *oauth2.Token -} - -type appEngineTokenSource struct { - ctx context.Context - scopes []string - key string // to aeTokens map; space-separated scopes -} - -func (ts *appEngineTokenSource) Token() (*oauth2.Token, error) { - if appengineTokenFunc == nil { - panic("google: AppEngineTokenSource can only be used on App Engine.") - } - - aeTokensMu.Lock() - tok, ok := aeTokens[ts.key] - if !ok { - tok = &tokenLock{} - aeTokens[ts.key] = tok - } - aeTokensMu.Unlock() - - tok.mu.Lock() - defer tok.mu.Unlock() - if tok.t.Valid() { - return tok.t, nil - } - access, exp, err := appengineTokenFunc(ts.ctx, ts.scopes...) - if err != nil { - return nil, err - } - tok.t = &oauth2.Token{ - AccessToken: access, - Expiry: exp, - } - return tok.t, nil -} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/google/appengine_hook.go b/Godeps/_workspace/src/golang.org/x/oauth2/google/appengine_hook.go deleted file mode 100644 index 2f9b15432f..0000000000 --- a/Godeps/_workspace/src/golang.org/x/oauth2/google/appengine_hook.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2015 The oauth2 Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build appengine appenginevm - -package google - -import "google.golang.org/appengine" - -func init() { - appengineTokenFunc = appengine.AccessToken -} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/google/default.go b/Godeps/_workspace/src/golang.org/x/oauth2/google/default.go deleted file mode 100644 index 78f8089853..0000000000 --- a/Godeps/_workspace/src/golang.org/x/oauth2/google/default.go +++ /dev/null @@ -1,154 +0,0 @@ -// Copyright 2015 The oauth2 Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package google - -import ( - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "net/http" - "os" - "path/filepath" - "runtime" - - "golang.org/x/net/context" - "golang.org/x/oauth2" - "golang.org/x/oauth2/jwt" - "google.golang.org/cloud/compute/metadata" -) - -// DefaultClient returns an HTTP Client that uses the -// DefaultTokenSource to obtain authentication credentials. -// -// This client should be used when developing services -// that run on Google App Engine or Google Compute Engine -// and use "Application Default Credentials." -// -// For more details, see: -// https://developers.google.com/accounts/docs/application-default-credentials -// -func DefaultClient(ctx context.Context, scope ...string) (*http.Client, error) { - ts, err := DefaultTokenSource(ctx, scope...) - if err != nil { - return nil, err - } - return oauth2.NewClient(ctx, ts), nil -} - -// DefaultTokenSource is a token source that uses -// "Application Default Credentials". -// -// It looks for credentials in the following places, -// preferring the first location found: -// -// 1. A JSON file whose path is specified by the -// GOOGLE_APPLICATION_CREDENTIALS environment variable. -// 2. A JSON file in a location known to the gcloud command-line tool. -// On Windows, this is %APPDATA%/gcloud/application_default_credentials.json. -// On other systems, $HOME/.config/gcloud/application_default_credentials.json. -// 3. On Google App Engine it uses the appengine.AccessToken function. -// 4. On Google Compute Engine, it fetches credentials from the metadata server. -// (In this final case any provided scopes are ignored.) -// -// For more details, see: -// https://developers.google.com/accounts/docs/application-default-credentials -// -func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSource, error) { - // First, try the environment variable. - const envVar = "GOOGLE_APPLICATION_CREDENTIALS" - if filename := os.Getenv(envVar); filename != "" { - ts, err := tokenSourceFromFile(ctx, filename, scope) - if err != nil { - return nil, fmt.Errorf("google: error getting credentials using %v environment variable: %v", envVar, err) - } - return ts, nil - } - - // Second, try a well-known file. - filename := wellKnownFile() - _, err := os.Stat(filename) - if err == nil { - ts, err2 := tokenSourceFromFile(ctx, filename, scope) - if err2 == nil { - return ts, nil - } - err = err2 - } else if os.IsNotExist(err) { - err = nil // ignore this error - } - if err != nil { - return nil, fmt.Errorf("google: error getting credentials using well-known file (%v): %v", filename, err) - } - - // Third, if we're on Google App Engine use those credentials. - if appengineTokenFunc != nil { - return AppEngineTokenSource(ctx, scope...), nil - } - - // Fourth, if we're on Google Compute Engine use the metadata server. - if metadata.OnGCE() { - return ComputeTokenSource(""), nil - } - - // None are found; return helpful error. - const url = "https://developers.google.com/accounts/docs/application-default-credentials" - return nil, fmt.Errorf("google: could not find default credentials. See %v for more information.", url) -} - -func wellKnownFile() string { - const f = "application_default_credentials.json" - if runtime.GOOS == "windows" { - return filepath.Join(os.Getenv("APPDATA"), "gcloud", f) - } - return filepath.Join(guessUnixHomeDir(), ".config", "gcloud", f) -} - -func tokenSourceFromFile(ctx context.Context, filename string, scopes []string) (oauth2.TokenSource, error) { - b, err := ioutil.ReadFile(filename) - if err != nil { - return nil, err - } - var d struct { - // Common fields - Type string - ClientID string `json:"client_id"` - - // User Credential fields - ClientSecret string `json:"client_secret"` - RefreshToken string `json:"refresh_token"` - - // Service Account fields - ClientEmail string `json:"client_email"` - PrivateKeyID string `json:"private_key_id"` - PrivateKey string `json:"private_key"` - } - if err := json.Unmarshal(b, &d); err != nil { - return nil, err - } - switch d.Type { - case "authorized_user": - cfg := &oauth2.Config{ - ClientID: d.ClientID, - ClientSecret: d.ClientSecret, - Scopes: append([]string{}, scopes...), // copy - Endpoint: Endpoint, - } - tok := &oauth2.Token{RefreshToken: d.RefreshToken} - return cfg.TokenSource(ctx, tok), nil - case "service_account": - cfg := &jwt.Config{ - Email: d.ClientEmail, - PrivateKey: []byte(d.PrivateKey), - Scopes: append([]string{}, scopes...), // copy - TokenURL: JWTTokenURL, - } - return cfg.TokenSource(ctx), nil - case "": - return nil, errors.New("missing 'type' field in credentials") - default: - return nil, fmt.Errorf("unknown credential type: %q", d.Type) - } -} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/google/example_test.go b/Godeps/_workspace/src/golang.org/x/oauth2/google/example_test.go deleted file mode 100644 index 17262802a9..0000000000 --- a/Godeps/_workspace/src/golang.org/x/oauth2/google/example_test.go +++ /dev/null @@ -1,150 +0,0 @@ -// Copyright 2014 The oauth2 Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build appenginevm !appengine - -package google_test - -import ( - "fmt" - "io/ioutil" - "log" - "net/http" - - "golang.org/x/oauth2" - "golang.org/x/oauth2/google" - "golang.org/x/oauth2/jwt" - "google.golang.org/appengine" - "google.golang.org/appengine/urlfetch" -) - -func ExampleDefaultClient() { - client, err := google.DefaultClient(oauth2.NoContext, - "https://www.googleapis.com/auth/devstorage.full_control") - if err != nil { - log.Fatal(err) - } - client.Get("...") -} - -func Example_webServer() { - // Your credentials should be obtained from the Google - // Developer Console (https://console.developers.google.com). - conf := &oauth2.Config{ - ClientID: "YOUR_CLIENT_ID", - ClientSecret: "YOUR_CLIENT_SECRET", - RedirectURL: "YOUR_REDIRECT_URL", - Scopes: []string{ - "https://www.googleapis.com/auth/bigquery", - "https://www.googleapis.com/auth/blogger", - }, - Endpoint: google.Endpoint, - } - // Redirect user to Google's consent page to ask for permission - // for the scopes specified above. - url := conf.AuthCodeURL("state") - fmt.Printf("Visit the URL for the auth dialog: %v", url) - - // Handle the exchange code to initiate a transport. - tok, err := conf.Exchange(oauth2.NoContext, "authorization-code") - if err != nil { - log.Fatal(err) - } - client := conf.Client(oauth2.NoContext, tok) - client.Get("...") -} - -func ExampleJWTConfigFromJSON() { - // Your credentials should be obtained from the Google - // Developer Console (https://console.developers.google.com). - // Navigate to your project, then see the "Credentials" page - // under "APIs & Auth". - // To create a service account client, click "Create new Client ID", - // select "Service Account", and click "Create Client ID". A JSON - // key file will then be downloaded to your computer. - data, err := ioutil.ReadFile("/path/to/your-project-key.json") - if err != nil { - log.Fatal(err) - } - conf, err := google.JWTConfigFromJSON(data, "https://www.googleapis.com/auth/bigquery") - if err != nil { - log.Fatal(err) - } - // Initiate an http.Client. The following GET request will be - // authorized and authenticated on the behalf of - // your service account. - client := conf.Client(oauth2.NoContext) - client.Get("...") -} - -func ExampleSDKConfig() { - // The credentials will be obtained from the first account that - // has been authorized with `gcloud auth login`. - conf, err := google.NewSDKConfig("") - if err != nil { - log.Fatal(err) - } - // Initiate an http.Client. The following GET request will be - // authorized and authenticated on the behalf of the SDK user. - client := conf.Client(oauth2.NoContext) - client.Get("...") -} - -func Example_serviceAccount() { - // Your credentials should be obtained from the Google - // Developer Console (https://console.developers.google.com). - conf := &jwt.Config{ - Email: "xxx@developer.gserviceaccount.com", - // The contents of your RSA private key or your PEM file - // that contains a private key. - // If you have a p12 file instead, you - // can use `openssl` to export the private key into a pem file. - // - // $ openssl pkcs12 -in key.p12 -passin pass:notasecret -out key.pem -nodes - // - // The field only supports PEM containers with no passphrase. - // The openssl command will convert p12 keys to passphrase-less PEM containers. - PrivateKey: []byte("-----BEGIN RSA PRIVATE KEY-----..."), - Scopes: []string{ - "https://www.googleapis.com/auth/bigquery", - "https://www.googleapis.com/auth/blogger", - }, - TokenURL: google.JWTTokenURL, - // If you would like to impersonate a user, you can - // create a transport with a subject. The following GET - // request will be made on the behalf of user@example.com. - // Optional. - Subject: "user@example.com", - } - // Initiate an http.Client, the following GET request will be - // authorized and authenticated on the behalf of user@example.com. - client := conf.Client(oauth2.NoContext) - client.Get("...") -} - -func ExampleAppEngineTokenSource() { - var req *http.Request // from the ServeHTTP handler - ctx := appengine.NewContext(req) - client := &http.Client{ - Transport: &oauth2.Transport{ - Source: google.AppEngineTokenSource(ctx, "https://www.googleapis.com/auth/bigquery"), - Base: &urlfetch.Transport{ - Context: ctx, - }, - }, - } - client.Get("...") -} - -func ExampleComputeTokenSource() { - client := &http.Client{ - Transport: &oauth2.Transport{ - // Fetch from Google Compute Engine's metadata server to retrieve - // an access token for the provided account. - // If no account is specified, "default" is used. - Source: google.ComputeTokenSource(""), - }, - } - client.Get("...") -} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/google/google.go b/Godeps/_workspace/src/golang.org/x/oauth2/google/google.go deleted file mode 100644 index 2077d9866f..0000000000 --- a/Godeps/_workspace/src/golang.org/x/oauth2/google/google.go +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright 2014 The oauth2 Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package google provides support for making OAuth2 authorized and -// authenticated HTTP requests to Google APIs. -// It supports the Web server flow, client-side credentials, service accounts, -// Google Compute Engine service accounts, and Google App Engine service -// accounts. -// -// For more information, please read -// https://developers.google.com/accounts/docs/OAuth2 -// and -// https://developers.google.com/accounts/docs/application-default-credentials. -package google - -import ( - "encoding/json" - "errors" - "fmt" - "strings" - "time" - - "golang.org/x/oauth2" - "golang.org/x/oauth2/jwt" - "google.golang.org/cloud/compute/metadata" -) - -// Endpoint is Google's OAuth 2.0 endpoint. -var Endpoint = oauth2.Endpoint{ - AuthURL: "https://accounts.google.com/o/oauth2/auth", - TokenURL: "https://accounts.google.com/o/oauth2/token", -} - -// JWTTokenURL is Google's OAuth 2.0 token URL to use with the JWT flow. -const JWTTokenURL = "https://accounts.google.com/o/oauth2/token" - -// ConfigFromJSON uses a Google Developers Console client_credentials.json -// file to construct a config. -// client_credentials.json can be downloadable from https://console.developers.google.com, -// under "APIs & Auth" > "Credentials". Download the Web application credentials in the -// JSON format and provide the contents of the file as jsonKey. -func ConfigFromJSON(jsonKey []byte, scope ...string) (*oauth2.Config, error) { - type cred struct { - ClientID string `json:"client_id"` - ClientSecret string `json:"client_secret"` - RedirectURIs []string `json:"redirect_uris"` - AuthURI string `json:"auth_uri"` - TokenURI string `json:"token_uri"` - } - var j struct { - Web *cred `json:"web"` - Installed *cred `json:"installed"` - } - if err := json.Unmarshal(jsonKey, &j); err != nil { - return nil, err - } - var c *cred - switch { - case j.Web != nil: - c = j.Web - case j.Installed != nil: - c = j.Installed - default: - return nil, fmt.Errorf("oauth2/google: no credentials found") - } - if len(c.RedirectURIs) < 1 { - return nil, errors.New("oauth2/google: missing redirect URL in the client_credentials.json") - } - return &oauth2.Config{ - ClientID: c.ClientID, - ClientSecret: c.ClientSecret, - RedirectURL: c.RedirectURIs[0], - Scopes: scope, - Endpoint: oauth2.Endpoint{ - AuthURL: c.AuthURI, - TokenURL: c.TokenURI, - }, - }, nil -} - -// JWTConfigFromJSON uses a Google Developers service account JSON key file to read -// the credentials that authorize and authenticate the requests. -// Create a service account on "Credentials" page under "APIs & Auth" for your -// project at https://console.developers.google.com to download a JSON key file. -func JWTConfigFromJSON(jsonKey []byte, scope ...string) (*jwt.Config, error) { - var key struct { - Email string `json:"client_email"` - PrivateKey string `json:"private_key"` - } - if err := json.Unmarshal(jsonKey, &key); err != nil { - return nil, err - } - return &jwt.Config{ - Email: key.Email, - PrivateKey: []byte(key.PrivateKey), - Scopes: scope, - TokenURL: JWTTokenURL, - }, nil -} - -// ComputeTokenSource returns a token source that fetches access tokens -// from Google Compute Engine (GCE)'s metadata server. It's only valid to use -// this token source if your program is running on a GCE instance. -// If no account is specified, "default" is used. -// Further information about retrieving access tokens from the GCE metadata -// server can be found at https://cloud.google.com/compute/docs/authentication. -func ComputeTokenSource(account string) oauth2.TokenSource { - return oauth2.ReuseTokenSource(nil, computeSource{account: account}) -} - -type computeSource struct { - account string -} - -func (cs computeSource) Token() (*oauth2.Token, error) { - if !metadata.OnGCE() { - return nil, errors.New("oauth2/google: can't get a token from the metadata service; not running on GCE") - } - acct := cs.account - if acct == "" { - acct = "default" - } - tokenJSON, err := metadata.Get("instance/service-accounts/" + acct + "/token") - if err != nil { - return nil, err - } - var res struct { - AccessToken string `json:"access_token"` - ExpiresInSec int `json:"expires_in"` - TokenType string `json:"token_type"` - } - err = json.NewDecoder(strings.NewReader(tokenJSON)).Decode(&res) - if err != nil { - return nil, fmt.Errorf("oauth2/google: invalid token JSON from metadata: %v", err) - } - if res.ExpiresInSec == 0 || res.AccessToken == "" { - return nil, fmt.Errorf("oauth2/google: incomplete token received from metadata") - } - return &oauth2.Token{ - AccessToken: res.AccessToken, - TokenType: res.TokenType, - Expiry: time.Now().Add(time.Duration(res.ExpiresInSec) * time.Second), - }, nil -} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/google/google_test.go b/Godeps/_workspace/src/golang.org/x/oauth2/google/google_test.go deleted file mode 100644 index 4cc01884b2..0000000000 --- a/Godeps/_workspace/src/golang.org/x/oauth2/google/google_test.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2015 The oauth2 Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package google - -import ( - "strings" - "testing" -) - -var webJSONKey = []byte(` -{ - "web": { - "auth_uri": "https://google.com/o/oauth2/auth", - "client_secret": "3Oknc4jS_wA2r9i", - "token_uri": "https://google.com/o/oauth2/token", - "client_email": "222-nprqovg5k43uum874cs9osjt2koe97g8@developer.gserviceaccount.com", - "redirect_uris": ["https://www.example.com/oauth2callback"], - "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/222-nprqovg5k43uum874cs9osjt2koe97g8@developer.gserviceaccount.com", - "client_id": "222-nprqovg5k43uum874cs9osjt2koe97g8.apps.googleusercontent.com", - "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", - "javascript_origins": ["https://www.example.com"] - } -}`) - -var installedJSONKey = []byte(`{ - "installed": { - "client_id": "222-installed.apps.googleusercontent.com", - "redirect_uris": ["https://www.example.com/oauth2callback"] - } -}`) - -func TestConfigFromJSON(t *testing.T) { - conf, err := ConfigFromJSON(webJSONKey, "scope1", "scope2") - if err != nil { - t.Error(err) - } - if got, want := conf.ClientID, "222-nprqovg5k43uum874cs9osjt2koe97g8.apps.googleusercontent.com"; got != want { - t.Errorf("ClientID = %q; want %q", got, want) - } - if got, want := conf.ClientSecret, "3Oknc4jS_wA2r9i"; got != want { - t.Errorf("ClientSecret = %q; want %q", got, want) - } - if got, want := conf.RedirectURL, "https://www.example.com/oauth2callback"; got != want { - t.Errorf("RedictURL = %q; want %q", got, want) - } - if got, want := strings.Join(conf.Scopes, ","), "scope1,scope2"; got != want { - t.Errorf("Scopes = %q; want %q", got, want) - } - if got, want := conf.Endpoint.AuthURL, "https://google.com/o/oauth2/auth"; got != want { - t.Errorf("AuthURL = %q; want %q", got, want) - } - if got, want := conf.Endpoint.TokenURL, "https://google.com/o/oauth2/token"; got != want { - t.Errorf("TokenURL = %q; want %q", got, want) - } -} - -func TestConfigFromJSON_Installed(t *testing.T) { - conf, err := ConfigFromJSON(installedJSONKey) - if err != nil { - t.Error(err) - } - if got, want := conf.ClientID, "222-installed.apps.googleusercontent.com"; got != want { - t.Errorf("ClientID = %q; want %q", got, want) - } -} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/google/sdk.go b/Godeps/_workspace/src/golang.org/x/oauth2/google/sdk.go deleted file mode 100644 index 01ba0ecb00..0000000000 --- a/Godeps/_workspace/src/golang.org/x/oauth2/google/sdk.go +++ /dev/null @@ -1,168 +0,0 @@ -// Copyright 2015 The oauth2 Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package google - -import ( - "encoding/json" - "errors" - "fmt" - "net/http" - "os" - "os/user" - "path/filepath" - "runtime" - "strings" - "time" - - "golang.org/x/net/context" - "golang.org/x/oauth2" - "golang.org/x/oauth2/internal" -) - -type sdkCredentials struct { - Data []struct { - Credential struct { - ClientID string `json:"client_id"` - ClientSecret string `json:"client_secret"` - AccessToken string `json:"access_token"` - RefreshToken string `json:"refresh_token"` - TokenExpiry *time.Time `json:"token_expiry"` - } `json:"credential"` - Key struct { - Account string `json:"account"` - Scope string `json:"scope"` - } `json:"key"` - } -} - -// An SDKConfig provides access to tokens from an account already -// authorized via the Google Cloud SDK. -type SDKConfig struct { - conf oauth2.Config - initialToken *oauth2.Token -} - -// NewSDKConfig creates an SDKConfig for the given Google Cloud SDK -// account. If account is empty, the account currently active in -// Google Cloud SDK properties is used. -// Google Cloud SDK credentials must be created by running `gcloud auth` -// before using this function. -// The Google Cloud SDK is available at https://cloud.google.com/sdk/. -func NewSDKConfig(account string) (*SDKConfig, error) { - configPath, err := sdkConfigPath() - if err != nil { - return nil, fmt.Errorf("oauth2/google: error getting SDK config path: %v", err) - } - credentialsPath := filepath.Join(configPath, "credentials") - f, err := os.Open(credentialsPath) - if err != nil { - return nil, fmt.Errorf("oauth2/google: failed to load SDK credentials: %v", err) - } - defer f.Close() - - var c sdkCredentials - if err := json.NewDecoder(f).Decode(&c); err != nil { - return nil, fmt.Errorf("oauth2/google: failed to decode SDK credentials from %q: %v", credentialsPath, err) - } - if len(c.Data) == 0 { - return nil, fmt.Errorf("oauth2/google: no credentials found in %q, run `gcloud auth login` to create one", credentialsPath) - } - if account == "" { - propertiesPath := filepath.Join(configPath, "properties") - f, err := os.Open(propertiesPath) - if err != nil { - return nil, fmt.Errorf("oauth2/google: failed to load SDK properties: %v", err) - } - defer f.Close() - ini, err := internal.ParseINI(f) - if err != nil { - return nil, fmt.Errorf("oauth2/google: failed to parse SDK properties %q: %v", propertiesPath, err) - } - core, ok := ini["core"] - if !ok { - return nil, fmt.Errorf("oauth2/google: failed to find [core] section in %v", ini) - } - active, ok := core["account"] - if !ok { - return nil, fmt.Errorf("oauth2/google: failed to find %q attribute in %v", "account", core) - } - account = active - } - - for _, d := range c.Data { - if account == "" || d.Key.Account == account { - if d.Credential.AccessToken == "" && d.Credential.RefreshToken == "" { - return nil, fmt.Errorf("oauth2/google: no token available for account %q", account) - } - var expiry time.Time - if d.Credential.TokenExpiry != nil { - expiry = *d.Credential.TokenExpiry - } - return &SDKConfig{ - conf: oauth2.Config{ - ClientID: d.Credential.ClientID, - ClientSecret: d.Credential.ClientSecret, - Scopes: strings.Split(d.Key.Scope, " "), - Endpoint: Endpoint, - RedirectURL: "oob", - }, - initialToken: &oauth2.Token{ - AccessToken: d.Credential.AccessToken, - RefreshToken: d.Credential.RefreshToken, - Expiry: expiry, - }, - }, nil - } - } - return nil, fmt.Errorf("oauth2/google: no such credentials for account %q", account) -} - -// Client returns an HTTP client using Google Cloud SDK credentials to -// authorize requests. The token will auto-refresh as necessary. The -// underlying http.RoundTripper will be obtained using the provided -// context. The returned client and its Transport should not be -// modified. -func (c *SDKConfig) Client(ctx context.Context) *http.Client { - return &http.Client{ - Transport: &oauth2.Transport{ - Source: c.TokenSource(ctx), - }, - } -} - -// TokenSource returns an oauth2.TokenSource that retrieve tokens from -// Google Cloud SDK credentials using the provided context. -// It will returns the current access token stored in the credentials, -// and refresh it when it expires, but it won't update the credentials -// with the new access token. -func (c *SDKConfig) TokenSource(ctx context.Context) oauth2.TokenSource { - return c.conf.TokenSource(ctx, c.initialToken) -} - -// Scopes are the OAuth 2.0 scopes the current account is authorized for. -func (c *SDKConfig) Scopes() []string { - return c.conf.Scopes -} - -// sdkConfigPath tries to guess where the gcloud config is located. -// It can be overridden during tests. -var sdkConfigPath = func() (string, error) { - if runtime.GOOS == "windows" { - return filepath.Join(os.Getenv("APPDATA"), "gcloud"), nil - } - homeDir := guessUnixHomeDir() - if homeDir == "" { - return "", errors.New("unable to get current user home directory: os/user lookup failed; $HOME is empty") - } - return filepath.Join(homeDir, ".config", "gcloud"), nil -} - -func guessUnixHomeDir() string { - usr, err := user.Current() - if err == nil { - return usr.HomeDir - } - return os.Getenv("HOME") -} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/google/sdk_test.go b/Godeps/_workspace/src/golang.org/x/oauth2/google/sdk_test.go deleted file mode 100644 index 79df889644..0000000000 --- a/Godeps/_workspace/src/golang.org/x/oauth2/google/sdk_test.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2015 The oauth2 Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package google - -import "testing" - -func TestSDKConfig(t *testing.T) { - sdkConfigPath = func() (string, error) { - return "testdata/gcloud", nil - } - - tests := []struct { - account string - accessToken string - err bool - }{ - {"", "bar_access_token", false}, - {"foo@example.com", "foo_access_token", false}, - {"bar@example.com", "bar_access_token", false}, - {"baz@serviceaccount.example.com", "", true}, - } - for _, tt := range tests { - c, err := NewSDKConfig(tt.account) - if got, want := err != nil, tt.err; got != want { - if !tt.err { - t.Errorf("expected no error, got error: %v", tt.err, err) - } else { - t.Errorf("expected error, got none") - } - continue - } - if err != nil { - continue - } - tok := c.initialToken - if tok == nil { - t.Errorf("expected token %q, got: nil", tt.accessToken) - continue - } - if tok.AccessToken != tt.accessToken { - t.Errorf("expected token %q, got: %q", tt.accessToken, tok.AccessToken) - } - } -} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/google/testdata/gcloud/credentials b/Godeps/_workspace/src/golang.org/x/oauth2/google/testdata/gcloud/credentials deleted file mode 100644 index ff5eefbd0a..0000000000 --- a/Godeps/_workspace/src/golang.org/x/oauth2/google/testdata/gcloud/credentials +++ /dev/null @@ -1,122 +0,0 @@ -{ - "data": [ - { - "credential": { - "_class": "OAuth2Credentials", - "_module": "oauth2client.client", - "access_token": "foo_access_token", - "client_id": "foo_client_id", - "client_secret": "foo_client_secret", - "id_token": { - "at_hash": "foo_at_hash", - "aud": "foo_aud", - "azp": "foo_azp", - "cid": "foo_cid", - "email": "foo@example.com", - "email_verified": true, - "exp": 1420573614, - "iat": 1420569714, - "id": "1337", - "iss": "accounts.google.com", - "sub": "1337", - "token_hash": "foo_token_hash", - "verified_email": true - }, - "invalid": false, - "refresh_token": "foo_refresh_token", - "revoke_uri": "https://accounts.google.com/o/oauth2/revoke", - "token_expiry": "2015-01-09T00:51:51Z", - "token_response": { - "access_token": "foo_access_token", - "expires_in": 3600, - "id_token": "foo_id_token", - "token_type": "Bearer" - }, - "token_uri": "https://accounts.google.com/o/oauth2/token", - "user_agent": "Cloud SDK Command Line Tool" - }, - "key": { - "account": "foo@example.com", - "clientId": "foo_client_id", - "scope": "https://www.googleapis.com/auth/appengine.admin https://www.googleapis.com/auth/bigquery https://www.googleapis.com/auth/compute https://www.googleapis.com/auth/devstorage.full_control https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/ndev.cloudman https://www.googleapis.com/auth/cloud-platform https://www.googleapis.com/auth/sqlservice.admin https://www.googleapis.com/auth/prediction https://www.googleapis.com/auth/projecthosting", - "type": "google-cloud-sdk" - } - }, - { - "credential": { - "_class": "OAuth2Credentials", - "_module": "oauth2client.client", - "access_token": "bar_access_token", - "client_id": "bar_client_id", - "client_secret": "bar_client_secret", - "id_token": { - "at_hash": "bar_at_hash", - "aud": "bar_aud", - "azp": "bar_azp", - "cid": "bar_cid", - "email": "bar@example.com", - "email_verified": true, - "exp": 1420573614, - "iat": 1420569714, - "id": "1337", - "iss": "accounts.google.com", - "sub": "1337", - "token_hash": "bar_token_hash", - "verified_email": true - }, - "invalid": false, - "refresh_token": "bar_refresh_token", - "revoke_uri": "https://accounts.google.com/o/oauth2/revoke", - "token_expiry": "2015-01-09T00:51:51Z", - "token_response": { - "access_token": "bar_access_token", - "expires_in": 3600, - "id_token": "bar_id_token", - "token_type": "Bearer" - }, - "token_uri": "https://accounts.google.com/o/oauth2/token", - "user_agent": "Cloud SDK Command Line Tool" - }, - "key": { - "account": "bar@example.com", - "clientId": "bar_client_id", - "scope": "https://www.googleapis.com/auth/appengine.admin https://www.googleapis.com/auth/bigquery https://www.googleapis.com/auth/compute https://www.googleapis.com/auth/devstorage.full_control https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/ndev.cloudman https://www.googleapis.com/auth/cloud-platform https://www.googleapis.com/auth/sqlservice.admin https://www.googleapis.com/auth/prediction https://www.googleapis.com/auth/projecthosting", - "type": "google-cloud-sdk" - } - }, - { - "credential": { - "_class": "ServiceAccountCredentials", - "_kwargs": {}, - "_module": "oauth2client.client", - "_private_key_id": "00000000000000000000000000000000", - "_private_key_pkcs8_text": "-----BEGIN RSA PRIVATE KEY-----\nMIICWwIBAAKBgQCt3fpiynPSaUhWSIKMGV331zudwJ6GkGmvQtwsoK2S2LbvnSwU\nNxgj4fp08kIDR5p26wF4+t/HrKydMwzftXBfZ9UmLVJgRdSswmS5SmChCrfDS5OE\nvFFcN5+6w1w8/Nu657PF/dse8T0bV95YrqyoR0Osy8WHrUOMSIIbC3hRuwIDAQAB\nAoGAJrGE/KFjn0sQ7yrZ6sXmdLawrM3mObo/2uI9T60+k7SpGbBX0/Pi6nFrJMWZ\nTVONG7P3Mu5aCPzzuVRYJB0j8aldSfzABTY3HKoWCczqw1OztJiEseXGiYz4QOyr\nYU3qDyEpdhS6q6wcoLKGH+hqRmz6pcSEsc8XzOOu7s4xW8kCQQDkc75HjhbarCnd\nJJGMe3U76+6UGmdK67ltZj6k6xoB5WbTNChY9TAyI2JC+ppYV89zv3ssj4L+02u3\nHIHFGxsHAkEAwtU1qYb1tScpchPobnYUFiVKJ7KA8EZaHVaJJODW/cghTCV7BxcJ\nbgVvlmk4lFKn3lPKAgWw7PdQsBTVBUcCrQJATPwoIirizrv3u5soJUQxZIkENAqV\nxmybZx9uetrzP7JTrVbFRf0SScMcyN90hdLJiQL8+i4+gaszgFht7sNMnwJAAbfj\nq0UXcauQwALQ7/h2oONfTg5S+MuGC/AxcXPSMZbMRGGoPh3D5YaCv27aIuS/ukQ+\n6dmm/9AGlCb64fsIWQJAPaokbjIifo+LwC5gyK73Mc4t8nAOSZDenzd/2f6TCq76\nS1dcnKiPxaED7W/y6LJiuBT2rbZiQ2L93NJpFZD/UA==\n-----END RSA PRIVATE KEY-----\n", - "_revoke_uri": "https://accounts.google.com/o/oauth2/revoke", - "_scopes": "https://www.googleapis.com/auth/appengine.admin https://www.googleapis.com/auth/bigquery https://www.googleapis.com/auth/compute https://www.googleapis.com/auth/devstorage.full_control https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/ndev.cloudman https://www.googleapis.com/auth/cloud-platform https://www.googleapis.com/auth/sqlservice.admin https://www.googleapis.com/auth/prediction https://www.googleapis.com/auth/projecthosting", - "_service_account_email": "baz@serviceaccount.example.com", - "_service_account_id": "baz.serviceaccount.example.com", - "_token_uri": "https://accounts.google.com/o/oauth2/token", - "_user_agent": "Cloud SDK Command Line Tool", - "access_token": null, - "assertion_type": null, - "client_id": null, - "client_secret": null, - "id_token": null, - "invalid": false, - "refresh_token": null, - "revoke_uri": "https://accounts.google.com/o/oauth2/revoke", - "service_account_name": "baz@serviceaccount.example.com", - "token_expiry": null, - "token_response": null, - "user_agent": "Cloud SDK Command Line Tool" - }, - "key": { - "account": "baz@serviceaccount.example.com", - "clientId": "baz_client_id", - "scope": "https://www.googleapis.com/auth/appengine.admin https://www.googleapis.com/auth/bigquery https://www.googleapis.com/auth/compute https://www.googleapis.com/auth/devstorage.full_control https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/ndev.cloudman https://www.googleapis.com/auth/cloud-platform https://www.googleapis.com/auth/sqlservice.admin https://www.googleapis.com/auth/prediction https://www.googleapis.com/auth/projecthosting", - "type": "google-cloud-sdk" - } - } - ], - "file_version": 1 -} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/google/testdata/gcloud/properties b/Godeps/_workspace/src/golang.org/x/oauth2/google/testdata/gcloud/properties deleted file mode 100644 index ba3ed9b7ad..0000000000 --- a/Godeps/_workspace/src/golang.org/x/oauth2/google/testdata/gcloud/properties +++ /dev/null @@ -1,2 +0,0 @@ -[core] -account = bar@example.com diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/internal/oauth2.go b/Godeps/_workspace/src/golang.org/x/oauth2/internal/oauth2.go deleted file mode 100644 index 37571a1292..0000000000 --- a/Godeps/_workspace/src/golang.org/x/oauth2/internal/oauth2.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2014 The oauth2 Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package internal contains support packages for oauth2 package. -package internal - -import ( - "bufio" - "crypto/rsa" - "crypto/x509" - "encoding/pem" - "errors" - "fmt" - "io" - "strings" -) - -// ParseKey converts the binary contents of a private key file -// to an *rsa.PrivateKey. It detects whether the private key is in a -// PEM container or not. If so, it extracts the the private key -// from PEM container before conversion. It only supports PEM -// containers with no passphrase. -func ParseKey(key []byte) (*rsa.PrivateKey, error) { - block, _ := pem.Decode(key) - if block != nil { - key = block.Bytes - } - parsedKey, err := x509.ParsePKCS8PrivateKey(key) - if err != nil { - parsedKey, err = x509.ParsePKCS1PrivateKey(key) - if err != nil { - return nil, fmt.Errorf("private key should be a PEM or plain PKSC1 or PKCS8; parse error: %v", err) - } - } - parsed, ok := parsedKey.(*rsa.PrivateKey) - if !ok { - return nil, errors.New("private key is invalid") - } - return parsed, nil -} - -func ParseINI(ini io.Reader) (map[string]map[string]string, error) { - result := map[string]map[string]string{ - "": map[string]string{}, // root section - } - scanner := bufio.NewScanner(ini) - currentSection := "" - for scanner.Scan() { - line := strings.TrimSpace(scanner.Text()) - if strings.HasPrefix(line, ";") { - // comment. - continue - } - if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") { - currentSection = strings.TrimSpace(line[1 : len(line)-1]) - result[currentSection] = map[string]string{} - continue - } - parts := strings.SplitN(line, "=", 2) - if len(parts) == 2 && parts[0] != "" { - result[currentSection][strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1]) - } - } - if err := scanner.Err(); err != nil { - return nil, fmt.Errorf("error scanning ini: %v", err) - } - return result, nil -} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/internal/oauth2_test.go b/Godeps/_workspace/src/golang.org/x/oauth2/internal/oauth2_test.go deleted file mode 100644 index 014a351e00..0000000000 --- a/Godeps/_workspace/src/golang.org/x/oauth2/internal/oauth2_test.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2014 The oauth2 Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package internal contains support packages for oauth2 package. -package internal - -import ( - "reflect" - "strings" - "testing" -) - -func TestParseINI(t *testing.T) { - tests := []struct { - ini string - want map[string]map[string]string - }{ - { - `root = toor -[foo] -bar = hop -ini = nin -`, - map[string]map[string]string{ - "": map[string]string{"root": "toor"}, - "foo": map[string]string{"bar": "hop", "ini": "nin"}, - }, - }, - { - `[empty] -[section] -empty= -`, - map[string]map[string]string{ - "": map[string]string{}, - "empty": map[string]string{}, - "section": map[string]string{"empty": ""}, - }, - }, - { - `ignore -[invalid -=stuff -;comment=true -`, - map[string]map[string]string{ - "": map[string]string{}, - }, - }, - } - for _, tt := range tests { - result, err := ParseINI(strings.NewReader(tt.ini)) - if err != nil { - t.Errorf("ParseINI(%q) error %v, want: no error", tt.ini, err) - continue - } - if !reflect.DeepEqual(result, tt.want) { - t.Errorf("ParseINI(%q) = %#v, want: %#v", tt.ini, result, tt.want) - } - } -} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/jws/jws.go b/Godeps/_workspace/src/golang.org/x/oauth2/jws/jws.go deleted file mode 100644 index 396b3fac82..0000000000 --- a/Godeps/_workspace/src/golang.org/x/oauth2/jws/jws.go +++ /dev/null @@ -1,160 +0,0 @@ -// Copyright 2014 The oauth2 Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package jws provides encoding and decoding utilities for -// signed JWS messages. -package jws - -import ( - "bytes" - "crypto" - "crypto/rand" - "crypto/rsa" - "crypto/sha256" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "strings" - "time" -) - -// ClaimSet contains information about the JWT signature including the -// permissions being requested (scopes), the target of the token, the issuer, -// the time the token was issued, and the lifetime of the token. -type ClaimSet struct { - Iss string `json:"iss"` // email address of the client_id of the application making the access token request - Scope string `json:"scope,omitempty"` // space-delimited list of the permissions the application requests - Aud string `json:"aud"` // descriptor of the intended target of the assertion (Optional). - Exp int64 `json:"exp"` // the expiration time of the assertion - Iat int64 `json:"iat"` // the time the assertion was issued. - Typ string `json:"typ,omitempty"` // token type (Optional). - - // Email for which the application is requesting delegated access (Optional). - Sub string `json:"sub,omitempty"` - - // The old name of Sub. Client keeps setting Prn to be - // complaint with legacy OAuth 2.0 providers. (Optional) - Prn string `json:"prn,omitempty"` - - // See http://tools.ietf.org/html/draft-jones-json-web-token-10#section-4.3 - // This array is marshalled using custom code (see (c *ClaimSet) encode()). - PrivateClaims map[string]interface{} `json:"-"` - - exp time.Time - iat time.Time -} - -func (c *ClaimSet) encode() (string, error) { - if c.exp.IsZero() || c.iat.IsZero() { - // Reverting time back for machines whose time is not perfectly in sync. - // If client machine's time is in the future according - // to Google servers, an access token will not be issued. - now := time.Now().Add(-10 * time.Second) - c.iat = now - c.exp = now.Add(time.Hour) - } - - c.Exp = c.exp.Unix() - c.Iat = c.iat.Unix() - - b, err := json.Marshal(c) - if err != nil { - return "", err - } - - if len(c.PrivateClaims) == 0 { - return base64Encode(b), nil - } - - // Marshal private claim set and then append it to b. - prv, err := json.Marshal(c.PrivateClaims) - if err != nil { - return "", fmt.Errorf("jws: invalid map of private claims %v", c.PrivateClaims) - } - - // Concatenate public and private claim JSON objects. - if !bytes.HasSuffix(b, []byte{'}'}) { - return "", fmt.Errorf("jws: invalid JSON %s", b) - } - if !bytes.HasPrefix(prv, []byte{'{'}) { - return "", fmt.Errorf("jws: invalid JSON %s", prv) - } - b[len(b)-1] = ',' // Replace closing curly brace with a comma. - b = append(b, prv[1:]...) // Append private claims. - return base64Encode(b), nil -} - -// Header represents the header for the signed JWS payloads. -type Header struct { - // The algorithm used for signature. - Algorithm string `json:"alg"` - - // Represents the token type. - Typ string `json:"typ"` -} - -func (h *Header) encode() (string, error) { - b, err := json.Marshal(h) - if err != nil { - return "", err - } - return base64Encode(b), nil -} - -// Decode decodes a claim set from a JWS payload. -func Decode(payload string) (*ClaimSet, error) { - // decode returned id token to get expiry - s := strings.Split(payload, ".") - if len(s) < 2 { - // TODO(jbd): Provide more context about the error. - return nil, errors.New("jws: invalid token received") - } - decoded, err := base64Decode(s[1]) - if err != nil { - return nil, err - } - c := &ClaimSet{} - err = json.NewDecoder(bytes.NewBuffer(decoded)).Decode(c) - return c, err -} - -// Encode encodes a signed JWS with provided header and claim set. -func Encode(header *Header, c *ClaimSet, signature *rsa.PrivateKey) (string, error) { - head, err := header.encode() - if err != nil { - return "", err - } - cs, err := c.encode() - if err != nil { - return "", err - } - ss := fmt.Sprintf("%s.%s", head, cs) - h := sha256.New() - h.Write([]byte(ss)) - b, err := rsa.SignPKCS1v15(rand.Reader, signature, crypto.SHA256, h.Sum(nil)) - if err != nil { - return "", err - } - sig := base64Encode(b) - return fmt.Sprintf("%s.%s", ss, sig), nil -} - -// base64Encode returns and Base64url encoded version of the input string with any -// trailing "=" stripped. -func base64Encode(b []byte) string { - return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") -} - -// base64Decode decodes the Base64url encoded string -func base64Decode(s string) ([]byte, error) { - // add back missing padding - switch len(s) % 4 { - case 2: - s += "==" - case 3: - s += "=" - } - return base64.URLEncoding.DecodeString(s) -} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/jwt/example_test.go b/Godeps/_workspace/src/golang.org/x/oauth2/jwt/example_test.go deleted file mode 100644 index 6d618836ea..0000000000 --- a/Godeps/_workspace/src/golang.org/x/oauth2/jwt/example_test.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2014 The oauth2 Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package jwt_test - -import ( - "golang.org/x/oauth2" - "golang.org/x/oauth2/jwt" -) - -func ExampleJWTConfig() { - conf := &jwt.Config{ - Email: "xxx@developer.com", - // The contents of your RSA private key or your PEM file - // that contains a private key. - // If you have a p12 file instead, you - // can use `openssl` to export the private key into a pem file. - // - // $ openssl pkcs12 -in key.p12 -out key.pem -nodes - // - // It only supports PEM containers with no passphrase. - PrivateKey: []byte("-----BEGIN RSA PRIVATE KEY-----..."), - Subject: "user@example.com", - TokenURL: "https://provider.com/o/oauth2/token", - } - // Initiate an http.Client, the following GET request will be - // authorized and authenticated on the behalf of user@example.com. - client := conf.Client(oauth2.NoContext) - client.Get("...") -} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/jwt/jwt.go b/Godeps/_workspace/src/golang.org/x/oauth2/jwt/jwt.go deleted file mode 100644 index 205d23ed43..0000000000 --- a/Godeps/_workspace/src/golang.org/x/oauth2/jwt/jwt.go +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright 2014 The oauth2 Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package jwt implements the OAuth 2.0 JSON Web Token flow, commonly -// known as "two-legged OAuth 2.0". -// -// See: https://tools.ietf.org/html/draft-ietf-oauth-jwt-bearer-12 -package jwt - -import ( - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "strings" - "time" - - "golang.org/x/net/context" - "golang.org/x/oauth2" - "golang.org/x/oauth2/internal" - "golang.org/x/oauth2/jws" -) - -var ( - defaultGrantType = "urn:ietf:params:oauth:grant-type:jwt-bearer" - defaultHeader = &jws.Header{Algorithm: "RS256", Typ: "JWT"} -) - -// Config is the configuration for using JWT to fetch tokens, -// commonly known as "two-legged OAuth 2.0". -type Config struct { - // Email is the OAuth client identifier used when communicating with - // the configured OAuth provider. - Email string - - // PrivateKey contains the contents of an RSA private key or the - // contents of a PEM file that contains a private key. The provided - // private key is used to sign JWT payloads. - // PEM containers with a passphrase are not supported. - // Use the following command to convert a PKCS 12 file into a PEM. - // - // $ openssl pkcs12 -in key.p12 -out key.pem -nodes - // - PrivateKey []byte - - // Subject is the optional user to impersonate. - Subject string - - // Scopes optionally specifies a list of requested permission scopes. - Scopes []string - - // TokenURL is the endpoint required to complete the 2-legged JWT flow. - TokenURL string -} - -// TokenSource returns a JWT TokenSource using the configuration -// in c and the HTTP client from the provided context. -func (c *Config) TokenSource(ctx context.Context) oauth2.TokenSource { - return oauth2.ReuseTokenSource(nil, jwtSource{ctx, c}) -} - -// Client returns an HTTP client wrapping the context's -// HTTP transport and adding Authorization headers with tokens -// obtained from c. -// -// The returned client and its Transport should not be modified. -func (c *Config) Client(ctx context.Context) *http.Client { - return oauth2.NewClient(ctx, c.TokenSource(ctx)) -} - -// jwtSource is a source that always does a signed JWT request for a token. -// It should typically be wrapped with a reuseTokenSource. -type jwtSource struct { - ctx context.Context - conf *Config -} - -func (js jwtSource) Token() (*oauth2.Token, error) { - pk, err := internal.ParseKey(js.conf.PrivateKey) - if err != nil { - return nil, err - } - hc := oauth2.NewClient(js.ctx, nil) - claimSet := &jws.ClaimSet{ - Iss: js.conf.Email, - Scope: strings.Join(js.conf.Scopes, " "), - Aud: js.conf.TokenURL, - } - if subject := js.conf.Subject; subject != "" { - claimSet.Sub = subject - // prn is the old name of sub. Keep setting it - // to be compatible with legacy OAuth 2.0 providers. - claimSet.Prn = subject - } - payload, err := jws.Encode(defaultHeader, claimSet, pk) - if err != nil { - return nil, err - } - v := url.Values{} - v.Set("grant_type", defaultGrantType) - v.Set("assertion", payload) - resp, err := hc.PostForm(js.conf.TokenURL, v) - if err != nil { - return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) - } - defer resp.Body.Close() - body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) - if err != nil { - return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) - } - if c := resp.StatusCode; c < 200 || c > 299 { - return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", resp.Status, body) - } - // tokenRes is the JSON response body. - var tokenRes struct { - AccessToken string `json:"access_token"` - TokenType string `json:"token_type"` - IDToken string `json:"id_token"` - ExpiresIn int64 `json:"expires_in"` // relative seconds from now - } - if err := json.Unmarshal(body, &tokenRes); err != nil { - return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) - } - token := &oauth2.Token{ - AccessToken: tokenRes.AccessToken, - TokenType: tokenRes.TokenType, - } - raw := make(map[string]interface{}) - json.Unmarshal(body, &raw) // no error checks for optional fields - token = token.WithExtra(raw) - - if secs := tokenRes.ExpiresIn; secs > 0 { - token.Expiry = time.Now().Add(time.Duration(secs) * time.Second) - } - if v := tokenRes.IDToken; v != "" { - // decode returned id token to get expiry - claimSet, err := jws.Decode(v) - if err != nil { - return nil, fmt.Errorf("oauth2: error decoding JWT token: %v", err) - } - token.Expiry = time.Unix(claimSet.Exp, 0) - } - return token, nil -} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/jwt/jwt_test.go b/Godeps/_workspace/src/golang.org/x/oauth2/jwt/jwt_test.go deleted file mode 100644 index da922c3d00..0000000000 --- a/Godeps/_workspace/src/golang.org/x/oauth2/jwt/jwt_test.go +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright 2014 The oauth2 Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package jwt - -import ( - "net/http" - "net/http/httptest" - "testing" - - "golang.org/x/oauth2" -) - -var dummyPrivateKey = []byte(`-----BEGIN RSA PRIVATE KEY----- -MIIEpAIBAAKCAQEAx4fm7dngEmOULNmAs1IGZ9Apfzh+BkaQ1dzkmbUgpcoghucE -DZRnAGd2aPyB6skGMXUytWQvNYav0WTR00wFtX1ohWTfv68HGXJ8QXCpyoSKSSFY -fuP9X36wBSkSX9J5DVgiuzD5VBdzUISSmapjKm+DcbRALjz6OUIPEWi1Tjl6p5RK -1w41qdbmt7E5/kGhKLDuT7+M83g4VWhgIvaAXtnhklDAggilPPa8ZJ1IFe31lNlr -k4DRk38nc6sEutdf3RL7QoH7FBusI7uXV03DC6dwN1kP4GE7bjJhcRb/7jYt7CQ9 -/E9Exz3c0yAp0yrTg0Fwh+qxfH9dKwN52S7SBwIDAQABAoIBAQCaCs26K07WY5Jt -3a2Cw3y2gPrIgTCqX6hJs7O5ByEhXZ8nBwsWANBUe4vrGaajQHdLj5OKfsIDrOvn -2NI1MqflqeAbu/kR32q3tq8/Rl+PPiwUsW3E6Pcf1orGMSNCXxeducF2iySySzh3 -nSIhCG5uwJDWI7a4+9KiieFgK1pt/Iv30q1SQS8IEntTfXYwANQrfKUVMmVF9aIK -6/WZE2yd5+q3wVVIJ6jsmTzoDCX6QQkkJICIYwCkglmVy5AeTckOVwcXL0jqw5Kf -5/soZJQwLEyBoQq7Kbpa26QHq+CJONetPP8Ssy8MJJXBT+u/bSseMb3Zsr5cr43e -DJOhwsThAoGBAPY6rPKl2NT/K7XfRCGm1sbWjUQyDShscwuWJ5+kD0yudnT/ZEJ1 -M3+KS/iOOAoHDdEDi9crRvMl0UfNa8MAcDKHflzxg2jg/QI+fTBjPP5GOX0lkZ9g -z6VePoVoQw2gpPFVNPPTxKfk27tEzbaffvOLGBEih0Kb7HTINkW8rIlzAoGBAM9y -1yr+jvfS1cGFtNU+Gotoihw2eMKtIqR03Yn3n0PK1nVCDKqwdUqCypz4+ml6cxRK -J8+Pfdh7D+ZJd4LEG6Y4QRDLuv5OA700tUoSHxMSNn3q9As4+T3MUyYxWKvTeu3U -f2NWP9ePU0lV8ttk7YlpVRaPQmc1qwooBA/z/8AdAoGAW9x0HWqmRICWTBnpjyxx -QGlW9rQ9mHEtUotIaRSJ6K/F3cxSGUEkX1a3FRnp6kPLcckC6NlqdNgNBd6rb2rA -cPl/uSkZP42Als+9YMoFPU/xrrDPbUhu72EDrj3Bllnyb168jKLa4VBOccUvggxr -Dm08I1hgYgdN5huzs7y6GeUCgYEAj+AZJSOJ6o1aXS6rfV3mMRve9bQ9yt8jcKXw -5HhOCEmMtaSKfnOF1Ziih34Sxsb7O2428DiX0mV/YHtBnPsAJidL0SdLWIapBzeg -KHArByIRkwE6IvJvwpGMdaex1PIGhx5i/3VZL9qiq/ElT05PhIb+UXgoWMabCp84 -OgxDK20CgYAeaFo8BdQ7FmVX2+EEejF+8xSge6WVLtkaon8bqcn6P0O8lLypoOhd -mJAYH8WU+UAy9pecUnDZj14LAGNVmYcse8HFX71MoshnvCTFEPVo4rZxIAGwMpeJ -5jgQ3slYLpqrGlcbLgUXBUgzEO684Wk/UV9DFPlHALVqCfXQ9dpJPg== ------END RSA PRIVATE KEY-----`) - -func TestJWTFetch_JSONResponse(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - w.Write([]byte(`{ - "access_token": "90d64460d14870c08c81352a05dedd3465940a7c", - "scope": "user", - "token_type": "bearer", - "expires_in": 3600 - }`)) - })) - defer ts.Close() - - conf := &Config{ - Email: "aaa@xxx.com", - PrivateKey: dummyPrivateKey, - TokenURL: ts.URL, - } - tok, err := conf.TokenSource(oauth2.NoContext).Token() - if err != nil { - t.Fatal(err) - } - if !tok.Valid() { - t.Errorf("Token invalid") - } - if tok.AccessToken != "90d64460d14870c08c81352a05dedd3465940a7c" { - t.Errorf("Unexpected access token, %#v", tok.AccessToken) - } - if tok.TokenType != "bearer" { - t.Errorf("Unexpected token type, %#v", tok.TokenType) - } - if tok.Expiry.IsZero() { - t.Errorf("Unexpected token expiry, %#v", tok.Expiry) - } - scope := tok.Extra("scope") - if scope != "user" { - t.Errorf("Unexpected value for scope: %v", scope) - } -} - -func TestJWTFetch_BadResponse(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - w.Write([]byte(`{"scope": "user", "token_type": "bearer"}`)) - })) - defer ts.Close() - - conf := &Config{ - Email: "aaa@xxx.com", - PrivateKey: dummyPrivateKey, - TokenURL: ts.URL, - } - tok, err := conf.TokenSource(oauth2.NoContext).Token() - if err != nil { - t.Fatal(err) - } - if tok == nil { - t.Fatalf("token is nil") - } - if tok.Valid() { - t.Errorf("token is valid. want invalid.") - } - if tok.AccessToken != "" { - t.Errorf("Unexpected non-empty access token %q.", tok.AccessToken) - } - if want := "bearer"; tok.TokenType != want { - t.Errorf("TokenType = %q; want %q", tok.TokenType, want) - } - scope := tok.Extra("scope") - if want := "user"; scope != want { - t.Errorf("token scope = %q; want %q", scope, want) - } -} - -func TestJWTFetch_BadResponseType(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - w.Write([]byte(`{"access_token":123, "scope": "user", "token_type": "bearer"}`)) - })) - defer ts.Close() - conf := &Config{ - Email: "aaa@xxx.com", - PrivateKey: dummyPrivateKey, - TokenURL: ts.URL, - } - tok, err := conf.TokenSource(oauth2.NoContext).Token() - if err == nil { - t.Error("got a token; expected error") - if tok.AccessToken != "" { - t.Errorf("Unexpected access token, %#v.", tok.AccessToken) - } - } -} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/linkedin/linkedin.go b/Godeps/_workspace/src/golang.org/x/oauth2/linkedin/linkedin.go deleted file mode 100644 index d93fded6ad..0000000000 --- a/Godeps/_workspace/src/golang.org/x/oauth2/linkedin/linkedin.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2015 The oauth2 Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package linkedin provides constants for using OAuth2 to access LinkedIn. -package linkedin - -import ( - "golang.org/x/oauth2" -) - -// Endpoint is LinkedIn's OAuth 2.0 endpoint. -var Endpoint = oauth2.Endpoint{ - AuthURL: "https://www.linkedin.com/uas/oauth2/authorization", - TokenURL: "https://www.linkedin.com/uas/oauth2/accessToken", -} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/oauth2.go b/Godeps/_workspace/src/golang.org/x/oauth2/oauth2.go deleted file mode 100644 index ece967a69b..0000000000 --- a/Godeps/_workspace/src/golang.org/x/oauth2/oauth2.go +++ /dev/null @@ -1,523 +0,0 @@ -// Copyright 2014 The oauth2 Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package oauth2 provides support for making -// OAuth2 authorized and authenticated HTTP requests. -// It can additionally grant authorization with Bearer JWT. -package oauth2 - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "mime" - "net/http" - "net/url" - "strconv" - "strings" - "sync" - "time" - - "golang.org/x/net/context" -) - -// NoContext is the default context you should supply if not using -// your own context.Context (see https://golang.org/x/net/context). -var NoContext = context.TODO() - -// Config describes a typical 3-legged OAuth2 flow, with both the -// client application information and the server's endpoint URLs. -type Config struct { - // ClientID is the application's ID. - ClientID string - - // ClientSecret is the application's secret. - ClientSecret string - - // Endpoint contains the resource server's token endpoint - // URLs. These are constants specific to each server and are - // often available via site-specific packages, such as - // google.Endpoint or github.Endpoint. - Endpoint Endpoint - - // RedirectURL is the URL to redirect users going through - // the OAuth flow, after the resource owner's URLs. - RedirectURL string - - // Scope specifies optional requested permissions. - Scopes []string -} - -// A TokenSource is anything that can return a token. -type TokenSource interface { - // Token returns a token or an error. - // Token must be safe for concurrent use by multiple goroutines. - // The returned Token must not be modified. - Token() (*Token, error) -} - -// Endpoint contains the OAuth 2.0 provider's authorization and token -// endpoint URLs. -type Endpoint struct { - AuthURL string - TokenURL string -} - -var ( - // AccessTypeOnline and AccessTypeOffline are options passed - // to the Options.AuthCodeURL method. They modify the - // "access_type" field that gets sent in the URL returned by - // AuthCodeURL. - // - // Online is the default if neither is specified. If your - // application needs to refresh access tokens when the user - // is not present at the browser, then use offline. This will - // result in your application obtaining a refresh token the - // first time your application exchanges an authorization - // code for a user. - AccessTypeOnline AuthCodeOption = SetAuthURLParam("access_type", "online") - AccessTypeOffline AuthCodeOption = SetAuthURLParam("access_type", "offline") - - // ApprovalForce forces the users to view the consent dialog - // and confirm the permissions request at the URL returned - // from AuthCodeURL, even if they've already done so. - ApprovalForce AuthCodeOption = SetAuthURLParam("approval_prompt", "force") -) - -// An AuthCodeOption is passed to Config.AuthCodeURL. -type AuthCodeOption interface { - setValue(url.Values) -} - -type setParam struct{ k, v string } - -func (p setParam) setValue(m url.Values) { m.Set(p.k, p.v) } - -// SetAuthURLParam builds an AuthCodeOption which passes key/value parameters -// to a provider's authorization endpoint. -func SetAuthURLParam(key, value string) AuthCodeOption { - return setParam{key, value} -} - -// AuthCodeURL returns a URL to OAuth 2.0 provider's consent page -// that asks for permissions for the required scopes explicitly. -// -// State is a token to protect the user from CSRF attacks. You must -// always provide a non-zero string and validate that it matches the -// the state query parameter on your redirect callback. -// See http://tools.ietf.org/html/rfc6749#section-10.12 for more info. -// -// Opts may include AccessTypeOnline or AccessTypeOffline, as well -// as ApprovalForce. -func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { - var buf bytes.Buffer - buf.WriteString(c.Endpoint.AuthURL) - v := url.Values{ - "response_type": {"code"}, - "client_id": {c.ClientID}, - "redirect_uri": condVal(c.RedirectURL), - "scope": condVal(strings.Join(c.Scopes, " ")), - "state": condVal(state), - } - for _, opt := range opts { - opt.setValue(v) - } - if strings.Contains(c.Endpoint.AuthURL, "?") { - buf.WriteByte('&') - } else { - buf.WriteByte('?') - } - buf.WriteString(v.Encode()) - return buf.String() -} - -// PasswordCredentialsToken converts a resource owner username and password -// pair into a token. -// -// Per the RFC, this grant type should only be used "when there is a high -// degree of trust between the resource owner and the client (e.g., the client -// is part of the device operating system or a highly privileged application), -// and when other authorization grant types are not available." -// See https://tools.ietf.org/html/rfc6749#section-4.3 for more info. -// -// The HTTP client to use is derived from the context. -// If nil, http.DefaultClient is used. -func (c *Config) PasswordCredentialsToken(ctx context.Context, username, password string) (*Token, error) { - return retrieveToken(ctx, c, url.Values{ - "grant_type": {"password"}, - "username": {username}, - "password": {password}, - "scope": condVal(strings.Join(c.Scopes, " ")), - }) -} - -// Exchange converts an authorization code into a token. -// -// It is used after a resource provider redirects the user back -// to the Redirect URI (the URL obtained from AuthCodeURL). -// -// The HTTP client to use is derived from the context. -// If a client is not provided via the context, http.DefaultClient is used. -// -// The code will be in the *http.Request.FormValue("code"). Before -// calling Exchange, be sure to validate FormValue("state"). -func (c *Config) Exchange(ctx context.Context, code string) (*Token, error) { - return retrieveToken(ctx, c, url.Values{ - "grant_type": {"authorization_code"}, - "code": {code}, - "redirect_uri": condVal(c.RedirectURL), - "scope": condVal(strings.Join(c.Scopes, " ")), - }) -} - -// contextClientFunc is a func which tries to return an *http.Client -// given a Context value. If it returns an error, the search stops -// with that error. If it returns (nil, nil), the search continues -// down the list of registered funcs. -type contextClientFunc func(context.Context) (*http.Client, error) - -var contextClientFuncs []contextClientFunc - -func registerContextClientFunc(fn contextClientFunc) { - contextClientFuncs = append(contextClientFuncs, fn) -} - -func contextClient(ctx context.Context) (*http.Client, error) { - for _, fn := range contextClientFuncs { - c, err := fn(ctx) - if err != nil { - return nil, err - } - if c != nil { - return c, nil - } - } - if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok { - return hc, nil - } - return http.DefaultClient, nil -} - -func contextTransport(ctx context.Context) http.RoundTripper { - hc, err := contextClient(ctx) - if err != nil { - // This is a rare error case (somebody using nil on App Engine), - // so I'd rather not everybody do an error check on this Client - // method. They can get the error that they're doing it wrong - // later, at client.Get/PostForm time. - return errorTransport{err} - } - return hc.Transport -} - -// Client returns an HTTP client using the provided token. -// The token will auto-refresh as necessary. The underlying -// HTTP transport will be obtained using the provided context. -// The returned client and its Transport should not be modified. -func (c *Config) Client(ctx context.Context, t *Token) *http.Client { - return NewClient(ctx, c.TokenSource(ctx, t)) -} - -// TokenSource returns a TokenSource that returns t until t expires, -// automatically refreshing it as necessary using the provided context. -// -// Most users will use Config.Client instead. -func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource { - tkr := &tokenRefresher{ - ctx: ctx, - conf: c, - } - if t != nil { - tkr.refreshToken = t.RefreshToken - } - return &reuseTokenSource{ - t: t, - new: tkr, - } -} - -// tokenRefresher is a TokenSource that makes "grant_type"=="refresh_token" -// HTTP requests to renew a token using a RefreshToken. -type tokenRefresher struct { - ctx context.Context // used to get HTTP requests - conf *Config - refreshToken string -} - -// WARNING: Token is not safe for concurrent access, as it -// updates the tokenRefresher's refreshToken field. -// Within this package, it is used by reuseTokenSource which -// synchronizes calls to this method with its own mutex. -func (tf *tokenRefresher) Token() (*Token, error) { - if tf.refreshToken == "" { - return nil, errors.New("oauth2: token expired and refresh token is not set") - } - - tk, err := retrieveToken(tf.ctx, tf.conf, url.Values{ - "grant_type": {"refresh_token"}, - "refresh_token": {tf.refreshToken}, - }) - - if err != nil { - return nil, err - } - if tf.refreshToken != tk.RefreshToken { - tf.refreshToken = tk.RefreshToken - } - return tk, err -} - -// reuseTokenSource is a TokenSource that holds a single token in memory -// and validates its expiry before each call to retrieve it with -// Token. If it's expired, it will be auto-refreshed using the -// new TokenSource. -type reuseTokenSource struct { - new TokenSource // called when t is expired. - - mu sync.Mutex // guards t - t *Token -} - -// Token returns the current token if it's still valid, else will -// refresh the current token (using r.Context for HTTP client -// information) and return the new one. -func (s *reuseTokenSource) Token() (*Token, error) { - s.mu.Lock() - defer s.mu.Unlock() - if s.t.Valid() { - return s.t, nil - } - t, err := s.new.Token() - if err != nil { - return nil, err - } - s.t = t - return t, nil -} - -func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) { - hc, err := contextClient(ctx) - if err != nil { - return nil, err - } - v.Set("client_id", c.ClientID) - bustedAuth := !providerAuthHeaderWorks(c.Endpoint.TokenURL) - if bustedAuth && c.ClientSecret != "" { - v.Set("client_secret", c.ClientSecret) - } - req, err := http.NewRequest("POST", c.Endpoint.TokenURL, strings.NewReader(v.Encode())) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", "application/x-www-form-urlencoded") - if !bustedAuth { - req.SetBasicAuth(c.ClientID, c.ClientSecret) - } - r, err := hc.Do(req) - if err != nil { - return nil, err - } - defer r.Body.Close() - body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20)) - if err != nil { - return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) - } - if code := r.StatusCode; code < 200 || code > 299 { - return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", r.Status, body) - } - - var token *Token - content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type")) - switch content { - case "application/x-www-form-urlencoded", "text/plain": - vals, err := url.ParseQuery(string(body)) - if err != nil { - return nil, err - } - token = &Token{ - AccessToken: vals.Get("access_token"), - TokenType: vals.Get("token_type"), - RefreshToken: vals.Get("refresh_token"), - raw: vals, - } - e := vals.Get("expires_in") - if e == "" { - // TODO(jbd): Facebook's OAuth2 implementation is broken and - // returns expires_in field in expires. Remove the fallback to expires, - // when Facebook fixes their implementation. - e = vals.Get("expires") - } - expires, _ := strconv.Atoi(e) - if expires != 0 { - token.Expiry = time.Now().Add(time.Duration(expires) * time.Second) - } - default: - var tj tokenJSON - if err = json.Unmarshal(body, &tj); err != nil { - return nil, err - } - token = &Token{ - AccessToken: tj.AccessToken, - TokenType: tj.TokenType, - RefreshToken: tj.RefreshToken, - Expiry: tj.expiry(), - raw: make(map[string]interface{}), - } - json.Unmarshal(body, &token.raw) // no error checks for optional fields - } - // Don't overwrite `RefreshToken` with an empty value - // if this was a token refreshing request. - if token.RefreshToken == "" { - token.RefreshToken = v.Get("refresh_token") - } - return token, nil -} - -// tokenJSON is the struct representing the HTTP response from OAuth2 -// providers returning a token in JSON form. -type tokenJSON struct { - AccessToken string `json:"access_token"` - TokenType string `json:"token_type"` - RefreshToken string `json:"refresh_token"` - ExpiresIn expirationTime `json:"expires_in"` // at least PayPal returns string, while most return number - Expires expirationTime `json:"expires"` // broken Facebook spelling of expires_in -} - -func (e *tokenJSON) expiry() (t time.Time) { - if v := e.ExpiresIn; v != 0 { - return time.Now().Add(time.Duration(v) * time.Second) - } - if v := e.Expires; v != 0 { - return time.Now().Add(time.Duration(v) * time.Second) - } - return -} - -type expirationTime int32 - -func (e *expirationTime) UnmarshalJSON(b []byte) error { - var n json.Number - err := json.Unmarshal(b, &n) - if err != nil { - return err - } - i, err := n.Int64() - if err != nil { - return err - } - *e = expirationTime(i) - return nil -} - -func condVal(v string) []string { - if v == "" { - return nil - } - return []string{v} -} - -var brokenAuthHeaderProviders = []string{ - "https://accounts.google.com/", - "https://www.googleapis.com/", - "https://github.com/", - "https://api.instagram.com/", - "https://www.douban.com/", - "https://api.dropbox.com/", - "https://api.soundcloud.com/", - "https://www.linkedin.com/", - "https://api.twitch.tv/", - "https://oauth.vk.com/", - "https://api.odnoklassniki.ru/", - "https://connect.stripe.com/", - "https://api.pushbullet.com/", - "https://oauth.sandbox.trainingpeaks.com/", - "https://oauth.trainingpeaks.com/", - "https://www.strava.com/oauth/", -} - -// providerAuthHeaderWorks reports whether the OAuth2 server identified by the tokenURL -// implements the OAuth2 spec correctly -// See https://code.google.com/p/goauth2/issues/detail?id=31 for background. -// In summary: -// - Reddit only accepts client secret in the Authorization header -// - Dropbox accepts either it in URL param or Auth header, but not both. -// - Google only accepts URL param (not spec compliant?), not Auth header -// - Stripe only accepts client secret in Auth header with Bearer method, not Basic -func providerAuthHeaderWorks(tokenURL string) bool { - for _, s := range brokenAuthHeaderProviders { - if strings.HasPrefix(tokenURL, s) { - // Some sites fail to implement the OAuth2 spec fully. - return false - } - } - - // Assume the provider implements the spec properly - // otherwise. We can add more exceptions as they're - // discovered. We will _not_ be adding configurable hooks - // to this package to let users select server bugs. - return true -} - -// HTTPClient is the context key to use with golang.org/x/net/context's -// WithValue function to associate an *http.Client value with a context. -var HTTPClient contextKey - -// contextKey is just an empty struct. It exists so HTTPClient can be -// an immutable public variable with a unique type. It's immutable -// because nobody else can create a contextKey, being unexported. -type contextKey struct{} - -// NewClient creates an *http.Client from a Context and TokenSource. -// The returned client is not valid beyond the lifetime of the context. -// -// As a special case, if src is nil, a non-OAuth2 client is returned -// using the provided context. This exists to support related OAuth2 -// packages. -func NewClient(ctx context.Context, src TokenSource) *http.Client { - if src == nil { - c, err := contextClient(ctx) - if err != nil { - return &http.Client{Transport: errorTransport{err}} - } - return c - } - return &http.Client{ - Transport: &Transport{ - Base: contextTransport(ctx), - Source: ReuseTokenSource(nil, src), - }, - } -} - -// ReuseTokenSource returns a TokenSource which repeatedly returns the -// same token as long as it's valid, starting with t. -// When its cached token is invalid, a new token is obtained from src. -// -// ReuseTokenSource is typically used to reuse tokens from a cache -// (such as a file on disk) between runs of a program, rather than -// obtaining new tokens unnecessarily. -// -// The initial token t may be nil, in which case the TokenSource is -// wrapped in a caching version if it isn't one already. This also -// means it's always safe to wrap ReuseTokenSource around any other -// TokenSource without adverse effects. -func ReuseTokenSource(t *Token, src TokenSource) TokenSource { - // Don't wrap a reuseTokenSource in itself. That would work, - // but cause an unnecessary number of mutex operations. - // Just build the equivalent one. - if rt, ok := src.(*reuseTokenSource); ok { - if t == nil { - // Just use it directly. - return rt - } - src = rt.new - } - return &reuseTokenSource{ - t: t, - new: src, - } -} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/oauth2_test.go b/Godeps/_workspace/src/golang.org/x/oauth2/oauth2_test.go deleted file mode 100644 index 7b7e3c130a..0000000000 --- a/Godeps/_workspace/src/golang.org/x/oauth2/oauth2_test.go +++ /dev/null @@ -1,439 +0,0 @@ -// Copyright 2014 The oauth2 Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package oauth2 - -import ( - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "net/http" - "net/http/httptest" - "reflect" - "strconv" - "testing" - "time" - - "golang.org/x/net/context" -) - -type mockTransport struct { - rt func(req *http.Request) (resp *http.Response, err error) -} - -func (t *mockTransport) RoundTrip(req *http.Request) (resp *http.Response, err error) { - return t.rt(req) -} - -type mockCache struct { - token *Token - readErr error -} - -func (c *mockCache) ReadToken() (*Token, error) { - return c.token, c.readErr -} - -func (c *mockCache) WriteToken(*Token) { - // do nothing -} - -func newConf(url string) *Config { - return &Config{ - ClientID: "CLIENT_ID", - ClientSecret: "CLIENT_SECRET", - RedirectURL: "REDIRECT_URL", - Scopes: []string{"scope1", "scope2"}, - Endpoint: Endpoint{ - AuthURL: url + "/auth", - TokenURL: url + "/token", - }, - } -} - -func TestAuthCodeURL(t *testing.T) { - conf := newConf("server") - url := conf.AuthCodeURL("foo", AccessTypeOffline, ApprovalForce) - if url != "server/auth?access_type=offline&approval_prompt=force&client_id=CLIENT_ID&redirect_uri=REDIRECT_URL&response_type=code&scope=scope1+scope2&state=foo" { - t.Errorf("Auth code URL doesn't match the expected, found: %v", url) - } -} - -func TestAuthCodeURL_CustomParam(t *testing.T) { - conf := newConf("server") - param := SetAuthURLParam("foo", "bar") - url := conf.AuthCodeURL("baz", param) - if url != "server/auth?client_id=CLIENT_ID&foo=bar&redirect_uri=REDIRECT_URL&response_type=code&scope=scope1+scope2&state=baz" { - t.Errorf("Auth code URL doesn't match the expected, found: %v", url) - } -} - -func TestAuthCodeURL_Optional(t *testing.T) { - conf := &Config{ - ClientID: "CLIENT_ID", - Endpoint: Endpoint{ - AuthURL: "/auth-url", - TokenURL: "/token-url", - }, - } - url := conf.AuthCodeURL("") - if url != "/auth-url?client_id=CLIENT_ID&response_type=code" { - t.Fatalf("Auth code URL doesn't match the expected, found: %v", url) - } -} - -func TestExchangeRequest(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.URL.String() != "/token" { - t.Errorf("Unexpected exchange request URL, %v is found.", r.URL) - } - headerAuth := r.Header.Get("Authorization") - if headerAuth != "Basic Q0xJRU5UX0lEOkNMSUVOVF9TRUNSRVQ=" { - t.Errorf("Unexpected authorization header, %v is found.", headerAuth) - } - headerContentType := r.Header.Get("Content-Type") - if headerContentType != "application/x-www-form-urlencoded" { - t.Errorf("Unexpected Content-Type header, %v is found.", headerContentType) - } - body, err := ioutil.ReadAll(r.Body) - if err != nil { - t.Errorf("Failed reading request body: %s.", err) - } - if string(body) != "client_id=CLIENT_ID&code=exchange-code&grant_type=authorization_code&redirect_uri=REDIRECT_URL&scope=scope1+scope2" { - t.Errorf("Unexpected exchange payload, %v is found.", string(body)) - } - w.Header().Set("Content-Type", "application/x-www-form-urlencoded") - w.Write([]byte("access_token=90d64460d14870c08c81352a05dedd3465940a7c&scope=user&token_type=bearer")) - })) - defer ts.Close() - conf := newConf(ts.URL) - tok, err := conf.Exchange(NoContext, "exchange-code") - if err != nil { - t.Error(err) - } - if !tok.Valid() { - t.Fatalf("Token invalid. Got: %#v", tok) - } - if tok.AccessToken != "90d64460d14870c08c81352a05dedd3465940a7c" { - t.Errorf("Unexpected access token, %#v.", tok.AccessToken) - } - if tok.TokenType != "bearer" { - t.Errorf("Unexpected token type, %#v.", tok.TokenType) - } - scope := tok.Extra("scope") - if scope != "user" { - t.Errorf("Unexpected value for scope: %v", scope) - } -} - -func TestExchangeRequest_JSONResponse(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.URL.String() != "/token" { - t.Errorf("Unexpected exchange request URL, %v is found.", r.URL) - } - headerAuth := r.Header.Get("Authorization") - if headerAuth != "Basic Q0xJRU5UX0lEOkNMSUVOVF9TRUNSRVQ=" { - t.Errorf("Unexpected authorization header, %v is found.", headerAuth) - } - headerContentType := r.Header.Get("Content-Type") - if headerContentType != "application/x-www-form-urlencoded" { - t.Errorf("Unexpected Content-Type header, %v is found.", headerContentType) - } - body, err := ioutil.ReadAll(r.Body) - if err != nil { - t.Errorf("Failed reading request body: %s.", err) - } - if string(body) != "client_id=CLIENT_ID&code=exchange-code&grant_type=authorization_code&redirect_uri=REDIRECT_URL&scope=scope1+scope2" { - t.Errorf("Unexpected exchange payload, %v is found.", string(body)) - } - w.Header().Set("Content-Type", "application/json") - w.Write([]byte(`{"access_token": "90d64460d14870c08c81352a05dedd3465940a7c", "scope": "user", "token_type": "bearer", "expires_in": 86400}`)) - })) - defer ts.Close() - conf := newConf(ts.URL) - tok, err := conf.Exchange(NoContext, "exchange-code") - if err != nil { - t.Error(err) - } - if !tok.Valid() { - t.Fatalf("Token invalid. Got: %#v", tok) - } - if tok.AccessToken != "90d64460d14870c08c81352a05dedd3465940a7c" { - t.Errorf("Unexpected access token, %#v.", tok.AccessToken) - } - if tok.TokenType != "bearer" { - t.Errorf("Unexpected token type, %#v.", tok.TokenType) - } - scope := tok.Extra("scope") - if scope != "user" { - t.Errorf("Unexpected value for scope: %v", scope) - } -} - -const day = 24 * time.Hour - -func TestExchangeRequest_JSONResponse_Expiry(t *testing.T) { - seconds := int32(day.Seconds()) - jsonNumberType := reflect.TypeOf(json.Number("0")) - for _, c := range []struct { - expires string - expect error - }{ - {fmt.Sprintf(`"expires_in": %d`, seconds), nil}, - {fmt.Sprintf(`"expires_in": "%d"`, seconds), nil}, // PayPal case - {fmt.Sprintf(`"expires": %d`, seconds), nil}, // Facebook case - {`"expires": false`, &json.UnmarshalTypeError{Value: "bool", Type: jsonNumberType}}, // wrong type - {`"expires": {}`, &json.UnmarshalTypeError{Value: "object", Type: jsonNumberType}}, // wrong type - {`"expires": "zzz"`, &strconv.NumError{Func: "ParseInt", Num: "zzz", Err: strconv.ErrSyntax}}, // wrong value - } { - testExchangeRequest_JSONResponse_expiry(t, c.expires, c.expect) - } -} - -func testExchangeRequest_JSONResponse_expiry(t *testing.T, exp string, expect error) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - w.Write([]byte(fmt.Sprintf(`{"access_token": "90d", "scope": "user", "token_type": "bearer", %s}`, exp))) - })) - defer ts.Close() - conf := newConf(ts.URL) - t1 := time.Now().Add(day) - tok, err := conf.Exchange(NoContext, "exchange-code") - t2 := time.Now().Add(day) - // Do a fmt.Sprint comparison so either side can be - // nil. fmt.Sprint just stringifies them to "", and no - // non-nil expected error ever stringifies as "", so this - // isn't terribly disgusting. We do this because Go 1.4 and - // Go 1.5 return a different deep value for - // json.UnmarshalTypeError. In Go 1.5, the - // json.UnmarshalTypeError contains a new field with a new - // non-zero value. Rather than ignore it here with reflect or - // add new files and +build tags, just look at the strings. - if fmt.Sprint(err) != fmt.Sprint(expect) { - t.Errorf("Error = %v; want %v", err, expect) - } - if err != nil { - return - } - if !tok.Valid() { - t.Fatalf("Token invalid. Got: %#v", tok) - } - expiry := tok.Expiry - if expiry.Before(t1) || expiry.After(t2) { - t.Errorf("Unexpected value for Expiry: %v (shold be between %v and %v)", expiry, t1, t2) - } -} - -func TestExchangeRequest_BadResponse(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - w.Write([]byte(`{"scope": "user", "token_type": "bearer"}`)) - })) - defer ts.Close() - conf := newConf(ts.URL) - tok, err := conf.Exchange(NoContext, "code") - if err != nil { - t.Fatal(err) - } - if tok.AccessToken != "" { - t.Errorf("Unexpected access token, %#v.", tok.AccessToken) - } -} - -func TestExchangeRequest_BadResponseType(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - w.Write([]byte(`{"access_token":123, "scope": "user", "token_type": "bearer"}`)) - })) - defer ts.Close() - conf := newConf(ts.URL) - _, err := conf.Exchange(NoContext, "exchange-code") - if err == nil { - t.Error("expected error from invalid access_token type") - } -} - -func TestExchangeRequest_NonBasicAuth(t *testing.T) { - tr := &mockTransport{ - rt: func(r *http.Request) (w *http.Response, err error) { - headerAuth := r.Header.Get("Authorization") - if headerAuth != "" { - t.Errorf("Unexpected authorization header, %v is found.", headerAuth) - } - return nil, errors.New("no response") - }, - } - c := &http.Client{Transport: tr} - conf := &Config{ - ClientID: "CLIENT_ID", - Endpoint: Endpoint{ - AuthURL: "https://accounts.google.com/auth", - TokenURL: "https://accounts.google.com/token", - }, - } - - ctx := context.WithValue(context.Background(), HTTPClient, c) - conf.Exchange(ctx, "code") -} - -func TestPasswordCredentialsTokenRequest(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - defer r.Body.Close() - expected := "/token" - if r.URL.String() != expected { - t.Errorf("URL = %q; want %q", r.URL, expected) - } - headerAuth := r.Header.Get("Authorization") - expected = "Basic Q0xJRU5UX0lEOkNMSUVOVF9TRUNSRVQ=" - if headerAuth != expected { - t.Errorf("Authorization header = %q; want %q", headerAuth, expected) - } - headerContentType := r.Header.Get("Content-Type") - expected = "application/x-www-form-urlencoded" - if headerContentType != expected { - t.Errorf("Content-Type header = %q; want %q", headerContentType, expected) - } - body, err := ioutil.ReadAll(r.Body) - if err != nil { - t.Errorf("Failed reading request body: %s.", err) - } - expected = "client_id=CLIENT_ID&grant_type=password&password=password1&scope=scope1+scope2&username=user1" - if string(body) != expected { - t.Errorf("res.Body = %q; want %q", string(body), expected) - } - w.Header().Set("Content-Type", "application/x-www-form-urlencoded") - w.Write([]byte("access_token=90d64460d14870c08c81352a05dedd3465940a7c&scope=user&token_type=bearer")) - })) - defer ts.Close() - conf := newConf(ts.URL) - tok, err := conf.PasswordCredentialsToken(NoContext, "user1", "password1") - if err != nil { - t.Error(err) - } - if !tok.Valid() { - t.Fatalf("Token invalid. Got: %#v", tok) - } - expected := "90d64460d14870c08c81352a05dedd3465940a7c" - if tok.AccessToken != expected { - t.Errorf("AccessToken = %q; want %q", tok.AccessToken, expected) - } - expected = "bearer" - if tok.TokenType != expected { - t.Errorf("TokenType = %q; want %q", tok.TokenType, expected) - } -} - -func TestTokenRefreshRequest(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.URL.String() == "/somethingelse" { - return - } - if r.URL.String() != "/token" { - t.Errorf("Unexpected token refresh request URL, %v is found.", r.URL) - } - headerContentType := r.Header.Get("Content-Type") - if headerContentType != "application/x-www-form-urlencoded" { - t.Errorf("Unexpected Content-Type header, %v is found.", headerContentType) - } - body, _ := ioutil.ReadAll(r.Body) - if string(body) != "client_id=CLIENT_ID&grant_type=refresh_token&refresh_token=REFRESH_TOKEN" { - t.Errorf("Unexpected refresh token payload, %v is found.", string(body)) - } - })) - defer ts.Close() - conf := newConf(ts.URL) - c := conf.Client(NoContext, &Token{RefreshToken: "REFRESH_TOKEN"}) - c.Get(ts.URL + "/somethingelse") -} - -func TestFetchWithNoRefreshToken(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.URL.String() == "/somethingelse" { - return - } - if r.URL.String() != "/token" { - t.Errorf("Unexpected token refresh request URL, %v is found.", r.URL) - } - headerContentType := r.Header.Get("Content-Type") - if headerContentType != "application/x-www-form-urlencoded" { - t.Errorf("Unexpected Content-Type header, %v is found.", headerContentType) - } - body, _ := ioutil.ReadAll(r.Body) - if string(body) != "client_id=CLIENT_ID&grant_type=refresh_token&refresh_token=REFRESH_TOKEN" { - t.Errorf("Unexpected refresh token payload, %v is found.", string(body)) - } - })) - defer ts.Close() - conf := newConf(ts.URL) - c := conf.Client(NoContext, nil) - _, err := c.Get(ts.URL + "/somethingelse") - if err == nil { - t.Errorf("Fetch should return an error if no refresh token is set") - } -} - -func TestRefreshToken_RefreshTokenReplacement(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - w.Write([]byte(`{"access_token":"ACCESS TOKEN", "scope": "user", "token_type": "bearer", "refresh_token": "NEW REFRESH TOKEN"}`)) - return - })) - defer ts.Close() - conf := newConf(ts.URL) - tkr := tokenRefresher{ - conf: conf, - ctx: NoContext, - refreshToken: "OLD REFRESH TOKEN", - } - tk, err := tkr.Token() - if err != nil { - t.Errorf("Unexpected refreshToken error returned: %v", err) - return - } - if tk.RefreshToken != tkr.refreshToken { - t.Errorf("tokenRefresher.refresh_token = %s; want %s", tkr.refreshToken, tk.RefreshToken) - } -} - -func TestConfigClientWithToken(t *testing.T) { - tok := &Token{ - AccessToken: "abc123", - } - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if got, want := r.Header.Get("Authorization"), fmt.Sprintf("Bearer %s", tok.AccessToken); got != want { - t.Errorf("Authorization header = %q; want %q", got, want) - } - return - })) - defer ts.Close() - conf := newConf(ts.URL) - - c := conf.Client(NoContext, tok) - req, err := http.NewRequest("GET", ts.URL, nil) - if err != nil { - t.Error(err) - } - _, err = c.Do(req) - if err != nil { - t.Error(err) - } -} - -func Test_providerAuthHeaderWorks(t *testing.T) { - for _, p := range brokenAuthHeaderProviders { - if providerAuthHeaderWorks(p) { - t.Errorf("URL: %s not found in list", p) - } - p := fmt.Sprintf("%ssomesuffix", p) - if providerAuthHeaderWorks(p) { - t.Errorf("URL: %s not found in list", p) - } - } - p := "https://api.not-in-the-list-example.com/" - if !providerAuthHeaderWorks(p) { - t.Errorf("URL: %s found in list", p) - } - -} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/odnoklassniki/odnoklassniki.go b/Godeps/_workspace/src/golang.org/x/oauth2/odnoklassniki/odnoklassniki.go deleted file mode 100644 index f0b66f97de..0000000000 --- a/Godeps/_workspace/src/golang.org/x/oauth2/odnoklassniki/odnoklassniki.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2015 The oauth2 Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package odnoklassniki provides constants for using OAuth2 to access Odnoklassniki. -package odnoklassniki - -import ( - "golang.org/x/oauth2" -) - -// Endpoint is Odnoklassniki's OAuth 2.0 endpoint. -var Endpoint = oauth2.Endpoint{ - AuthURL: "https://www.odnoklassniki.ru/oauth/authorize", - TokenURL: "https://api.odnoklassniki.ru/oauth/token.do", -} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/paypal/paypal.go b/Godeps/_workspace/src/golang.org/x/oauth2/paypal/paypal.go deleted file mode 100644 index a99366b6e2..0000000000 --- a/Godeps/_workspace/src/golang.org/x/oauth2/paypal/paypal.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2015 The oauth2 Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package paypal provides constants for using OAuth2 to access PayPal. -package paypal - -import ( - "golang.org/x/oauth2" -) - -// Endpoint is PayPal's OAuth 2.0 endpoint in live (production) environment. -var Endpoint = oauth2.Endpoint{ - AuthURL: "https://www.paypal.com/webapps/auth/protocol/openidconnect/v1/authorize", - TokenURL: "https://api.paypal.com/v1/identity/openidconnect/tokenservice", -} - -// SandboxEndpoint is PayPal's OAuth 2.0 endpoint in sandbox (testing) environment. -var SandboxEndpoint = oauth2.Endpoint{ - AuthURL: "https://www.sandbox.paypal.com/webapps/auth/protocol/openidconnect/v1/authorize", - TokenURL: "https://api.sandbox.paypal.com/v1/identity/openidconnect/tokenservice", -} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/token.go b/Godeps/_workspace/src/golang.org/x/oauth2/token.go deleted file mode 100644 index 9852ceb4a6..0000000000 --- a/Godeps/_workspace/src/golang.org/x/oauth2/token.go +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright 2014 The oauth2 Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package oauth2 - -import ( - "net/http" - "net/url" - "time" -) - -// expiryDelta determines how earlier a token should be considered -// expired than its actual expiration time. It is used to avoid late -// expirations due to client-server time mismatches. -const expiryDelta = 10 * time.Second - -// Token represents the crendentials used to authorize -// the requests to access protected resources on the OAuth 2.0 -// provider's backend. -// -// Most users of this package should not access fields of Token -// directly. They're exported mostly for use by related packages -// implementing derivative OAuth2 flows. -type Token struct { - // AccessToken is the token that authorizes and authenticates - // the requests. - AccessToken string `json:"access_token"` - - // TokenType is the type of token. - // The Type method returns either this or "Bearer", the default. - TokenType string `json:"token_type,omitempty"` - - // RefreshToken is a token that's used by the application - // (as opposed to the user) to refresh the access token - // if it expires. - RefreshToken string `json:"refresh_token,omitempty"` - - // Expiry is the optional expiration time of the access token. - // - // If zero, TokenSource implementations will reuse the same - // token forever and RefreshToken or equivalent - // mechanisms for that TokenSource will not be used. - Expiry time.Time `json:"expiry,omitempty"` - - // raw optionally contains extra metadata from the server - // when updating a token. - raw interface{} -} - -// Type returns t.TokenType if non-empty, else "Bearer". -func (t *Token) Type() string { - if t.TokenType != "" { - return t.TokenType - } - return "Bearer" -} - -// SetAuthHeader sets the Authorization header to r using the access -// token in t. -// -// This method is unnecessary when using Transport or an HTTP Client -// returned by this package. -func (t *Token) SetAuthHeader(r *http.Request) { - r.Header.Set("Authorization", t.Type()+" "+t.AccessToken) -} - -// WithExtra returns a new Token that's a clone of t, but using the -// provided raw extra map. This is only intended for use by packages -// implementing derivative OAuth2 flows. -func (t *Token) WithExtra(extra interface{}) *Token { - t2 := new(Token) - *t2 = *t - t2.raw = extra - return t2 -} - -// Extra returns an extra field. -// Extra fields are key-value pairs returned by the server as a -// part of the token retrieval response. -func (t *Token) Extra(key string) interface{} { - if vals, ok := t.raw.(url.Values); ok { - // TODO(jbd): Cast numeric values to int64 or float64. - return vals.Get(key) - } - if raw, ok := t.raw.(map[string]interface{}); ok { - return raw[key] - } - return nil -} - -// expired reports whether the token is expired. -// t must be non-nil. -func (t *Token) expired() bool { - if t.Expiry.IsZero() { - return false - } - return t.Expiry.Add(-expiryDelta).Before(time.Now()) -} - -// Valid reports whether t is non-nil, has an AccessToken, and is not expired. -func (t *Token) Valid() bool { - return t != nil && t.AccessToken != "" && !t.expired() -} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/token_test.go b/Godeps/_workspace/src/golang.org/x/oauth2/token_test.go deleted file mode 100644 index 739eeb2a20..0000000000 --- a/Godeps/_workspace/src/golang.org/x/oauth2/token_test.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2014 The oauth2 Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package oauth2 - -import ( - "testing" - "time" -) - -func TestTokenExtra(t *testing.T) { - type testCase struct { - key string - val interface{} - want interface{} - } - const key = "extra-key" - cases := []testCase{ - {key: key, val: "abc", want: "abc"}, - {key: key, val: 123, want: 123}, - {key: key, val: "", want: ""}, - {key: "other-key", val: "def", want: nil}, - } - for _, tc := range cases { - extra := make(map[string]interface{}) - extra[tc.key] = tc.val - tok := &Token{raw: extra} - if got, want := tok.Extra(key), tc.want; got != want { - t.Errorf("Extra(%q) = %q; want %q", key, got, want) - } - } -} - -func TestTokenExpiry(t *testing.T) { - now := time.Now() - cases := []struct { - name string - tok *Token - want bool - }{ - {name: "12 seconds", tok: &Token{Expiry: now.Add(12 * time.Second)}, want: false}, - {name: "10 seconds", tok: &Token{Expiry: now.Add(expiryDelta)}, want: true}, - } - for _, tc := range cases { - if got, want := tc.tok.expired(), tc.want; got != want { - t.Errorf("expired (%q) = %v; want %v", tc.name, got, want) - } - } -} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/transport.go b/Godeps/_workspace/src/golang.org/x/oauth2/transport.go deleted file mode 100644 index 10339a0be7..0000000000 --- a/Godeps/_workspace/src/golang.org/x/oauth2/transport.go +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright 2014 The oauth2 Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package oauth2 - -import ( - "errors" - "io" - "net/http" - "sync" -) - -// Transport is an http.RoundTripper that makes OAuth 2.0 HTTP requests, -// wrapping a base RoundTripper and adding an Authorization header -// with a token from the supplied Sources. -// -// Transport is a low-level mechanism. Most code will use the -// higher-level Config.Client method instead. -type Transport struct { - // Source supplies the token to add to outgoing requests' - // Authorization headers. - Source TokenSource - - // Base is the base RoundTripper used to make HTTP requests. - // If nil, http.DefaultTransport is used. - Base http.RoundTripper - - mu sync.Mutex // guards modReq - modReq map[*http.Request]*http.Request // original -> modified -} - -// RoundTrip authorizes and authenticates the request with an -// access token. If no token exists or token is expired, -// tries to refresh/fetch a new token. -func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { - if t.Source == nil { - return nil, errors.New("oauth2: Transport's Source is nil") - } - token, err := t.Source.Token() - if err != nil { - return nil, err - } - - req2 := cloneRequest(req) // per RoundTripper contract - token.SetAuthHeader(req2) - t.setModReq(req, req2) - res, err := t.base().RoundTrip(req2) - if err != nil { - t.setModReq(req, nil) - return nil, err - } - res.Body = &onEOFReader{ - rc: res.Body, - fn: func() { t.setModReq(req, nil) }, - } - return res, nil -} - -// CancelRequest cancels an in-flight request by closing its connection. -func (t *Transport) CancelRequest(req *http.Request) { - type canceler interface { - CancelRequest(*http.Request) - } - if cr, ok := t.base().(canceler); ok { - t.mu.Lock() - modReq := t.modReq[req] - delete(t.modReq, req) - t.mu.Unlock() - cr.CancelRequest(modReq) - } -} - -func (t *Transport) base() http.RoundTripper { - if t.Base != nil { - return t.Base - } - return http.DefaultTransport -} - -func (t *Transport) setModReq(orig, mod *http.Request) { - t.mu.Lock() - defer t.mu.Unlock() - if t.modReq == nil { - t.modReq = make(map[*http.Request]*http.Request) - } - if mod == nil { - delete(t.modReq, orig) - } else { - t.modReq[orig] = mod - } -} - -// cloneRequest returns a clone of the provided *http.Request. -// The clone is a shallow copy of the struct and its Header map. -func cloneRequest(r *http.Request) *http.Request { - // shallow copy of the struct - r2 := new(http.Request) - *r2 = *r - // deep copy of the Header - r2.Header = make(http.Header, len(r.Header)) - for k, s := range r.Header { - r2.Header[k] = append([]string(nil), s...) - } - return r2 -} - -type onEOFReader struct { - rc io.ReadCloser - fn func() -} - -func (r *onEOFReader) Read(p []byte) (n int, err error) { - n, err = r.rc.Read(p) - if err == io.EOF { - r.runFunc() - } - return -} - -func (r *onEOFReader) Close() error { - err := r.rc.Close() - r.runFunc() - return err -} - -func (r *onEOFReader) runFunc() { - if fn := r.fn; fn != nil { - fn() - r.fn = nil - } -} - -type errorTransport struct{ err error } - -func (t errorTransport) RoundTrip(*http.Request) (*http.Response, error) { - return nil, t.err -} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/transport_test.go b/Godeps/_workspace/src/golang.org/x/oauth2/transport_test.go deleted file mode 100644 index efb8232ac4..0000000000 --- a/Godeps/_workspace/src/golang.org/x/oauth2/transport_test.go +++ /dev/null @@ -1,53 +0,0 @@ -package oauth2 - -import ( - "net/http" - "net/http/httptest" - "testing" - "time" -) - -type tokenSource struct{ token *Token } - -func (t *tokenSource) Token() (*Token, error) { - return t.token, nil -} - -func TestTransportTokenSource(t *testing.T) { - ts := &tokenSource{ - token: &Token{ - AccessToken: "abc", - }, - } - tr := &Transport{ - Source: ts, - } - server := newMockServer(func(w http.ResponseWriter, r *http.Request) { - if r.Header.Get("Authorization") != "Bearer abc" { - t.Errorf("Transport doesn't set the Authorization header from the fetched token") - } - }) - defer server.Close() - client := http.Client{Transport: tr} - client.Get(server.URL) -} - -func TestTokenValidNoAccessToken(t *testing.T) { - token := &Token{} - if token.Valid() { - t.Errorf("Token should not be valid with no access token") - } -} - -func TestExpiredWithExpiry(t *testing.T) { - token := &Token{ - Expiry: time.Now().Add(-5 * time.Hour), - } - if token.Valid() { - t.Errorf("Token should not be valid if it expired in the past") - } -} - -func newMockServer(handler func(w http.ResponseWriter, r *http.Request)) *httptest.Server { - return httptest.NewServer(http.HandlerFunc(handler)) -} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/vk/vk.go b/Godeps/_workspace/src/golang.org/x/oauth2/vk/vk.go deleted file mode 100644 index 00e929357a..0000000000 --- a/Godeps/_workspace/src/golang.org/x/oauth2/vk/vk.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2015 The oauth2 Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package vk provides constants for using OAuth2 to access VK.com. -package vk - -import ( - "golang.org/x/oauth2" -) - -// Endpoint is VK's OAuth 2.0 endpoint. -var Endpoint = oauth2.Endpoint{ - AuthURL: "https://oauth.vk.com/authorize", - TokenURL: "https://oauth.vk.com/access_token", -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/compute/metadata/go13.go b/Godeps/_workspace/src/google.golang.org/cloud/compute/metadata/go13.go deleted file mode 100644 index c979f43906..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/compute/metadata/go13.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2015 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build go1.3 - -package metadata - -import ( - "net" - "time" -) - -// This is a workaround for https://github.com/golang/oauth2/issues/70, where -// net.Dialer.KeepAlive is unavailable on Go 1.2 (which App Engine as of -// Jan 2015 still runs). -// -// TODO(bradfitz,jbd,adg): remove this once App Engine supports Go -// 1.3+. -func init() { - go13Dialer = func() *net.Dialer { - return &net.Dialer{ - Timeout: 750 * time.Millisecond, - KeepAlive: 30 * time.Second, - } - } -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/compute/metadata/metadata.go b/Godeps/_workspace/src/google.golang.org/cloud/compute/metadata/metadata.go deleted file mode 100644 index 7753a05b67..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/compute/metadata/metadata.go +++ /dev/null @@ -1,267 +0,0 @@ -// Copyright 2014 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package metadata provides access to Google Compute Engine (GCE) -// metadata and API service accounts. -// -// This package is a wrapper around the GCE metadata service, -// as documented at https://developers.google.com/compute/docs/metadata. -package metadata - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net" - "net/http" - "strings" - "sync" - "time" - - "google.golang.org/cloud/internal" -) - -type cachedValue struct { - k string - trim bool - mu sync.Mutex - v string -} - -var ( - projID = &cachedValue{k: "project/project-id", trim: true} - projNum = &cachedValue{k: "project/numeric-project-id", trim: true} - instID = &cachedValue{k: "instance/id", trim: true} -) - -var metaClient = &http.Client{ - Transport: &internal.Transport{ - Base: &http.Transport{ - Dial: dialer().Dial, - ResponseHeaderTimeout: 750 * time.Millisecond, - }, - }, -} - -// go13Dialer is nil until we're using Go 1.3+. -// This is a workaround for https://github.com/golang/oauth2/issues/70, where -// net.Dialer.KeepAlive is unavailable on Go 1.2 (which App Engine as of -// Jan 2015 still runs). -// -// TODO(bradfitz,jbd,adg,dsymonds): remove this once App Engine supports Go -// 1.3+ and go-app-builder also supports 1.3+, or when Go 1.2 is no longer an -// option on App Engine. -var go13Dialer func() *net.Dialer - -func dialer() *net.Dialer { - if fn := go13Dialer; fn != nil { - return fn() - } - return &net.Dialer{ - Timeout: 750 * time.Millisecond, - } -} - -// NotDefinedError is returned when requested metadata is not defined. -// -// The underlying string is the suffix after "/computeMetadata/v1/". -// -// This error is not returned if the value is defined to be the empty -// string. -type NotDefinedError string - -func (suffix NotDefinedError) Error() string { - return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix)) -} - -// Get returns a value from the metadata service. -// The suffix is appended to "http://metadata/computeMetadata/v1/". -// -// If the requested metadata is not defined, the returned error will -// be of type NotDefinedError. -func Get(suffix string) (string, error) { - // Using 169.254.169.254 instead of "metadata" here because Go - // binaries built with the "netgo" tag and without cgo won't - // know the search suffix for "metadata" is - // ".google.internal", and this IP address is documented as - // being stable anyway. - url := "http://169.254.169.254/computeMetadata/v1/" + suffix - req, _ := http.NewRequest("GET", url, nil) - req.Header.Set("Metadata-Flavor", "Google") - res, err := metaClient.Do(req) - if err != nil { - return "", err - } - defer res.Body.Close() - if res.StatusCode == http.StatusNotFound { - return "", NotDefinedError(suffix) - } - if res.StatusCode != 200 { - return "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url) - } - all, err := ioutil.ReadAll(res.Body) - if err != nil { - return "", err - } - return string(all), nil -} - -func getTrimmed(suffix string) (s string, err error) { - s, err = Get(suffix) - s = strings.TrimSpace(s) - return -} - -func (c *cachedValue) get() (v string, err error) { - defer c.mu.Unlock() - c.mu.Lock() - if c.v != "" { - return c.v, nil - } - if c.trim { - v, err = getTrimmed(c.k) - } else { - v, err = Get(c.k) - } - if err == nil { - c.v = v - } - return -} - -var onGCE struct { - sync.Mutex - set bool - v bool -} - -// OnGCE reports whether this process is running on Google Compute Engine. -func OnGCE() bool { - defer onGCE.Unlock() - onGCE.Lock() - if onGCE.set { - return onGCE.v - } - onGCE.set = true - - // We use the DNS name of the metadata service here instead of the IP address - // because we expect that to fail faster in the not-on-GCE case. - res, err := metaClient.Get("http://metadata.google.internal") - if err != nil { - return false - } - onGCE.v = res.Header.Get("Metadata-Flavor") == "Google" - return onGCE.v -} - -// ProjectID returns the current instance's project ID string. -func ProjectID() (string, error) { return projID.get() } - -// NumericProjectID returns the current instance's numeric project ID. -func NumericProjectID() (string, error) { return projNum.get() } - -// InternalIP returns the instance's primary internal IP address. -func InternalIP() (string, error) { - return getTrimmed("instance/network-interfaces/0/ip") -} - -// ExternalIP returns the instance's primary external (public) IP address. -func ExternalIP() (string, error) { - return getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip") -} - -// Hostname returns the instance's hostname. This will probably be of -// the form "INSTANCENAME.c.PROJECT.internal" but that isn't -// guaranteed. -// -// TODO: what is this defined to be? Docs say "The host name of the -// instance." -func Hostname() (string, error) { - return getTrimmed("network-interfaces/0/ip") -} - -// InstanceTags returns the list of user-defined instance tags, -// assigned when initially creating a GCE instance. -func InstanceTags() ([]string, error) { - var s []string - j, err := Get("instance/tags") - if err != nil { - return nil, err - } - if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil { - return nil, err - } - return s, nil -} - -// InstanceID returns the current VM's numeric instance ID. -func InstanceID() (string, error) { - return instID.get() -} - -// InstanceAttributes returns the list of user-defined attributes, -// assigned when initially creating a GCE VM instance. The value of an -// attribute can be obtained with InstanceAttributeValue. -func InstanceAttributes() ([]string, error) { return lines("instance/attributes/") } - -// ProjectAttributes returns the list of user-defined attributes -// applying to the project as a whole, not just this VM. The value of -// an attribute can be obtained with ProjectAttributeValue. -func ProjectAttributes() ([]string, error) { return lines("project/attributes/") } - -func lines(suffix string) ([]string, error) { - j, err := Get(suffix) - if err != nil { - return nil, err - } - s := strings.Split(strings.TrimSpace(j), "\n") - for i := range s { - s[i] = strings.TrimSpace(s[i]) - } - return s, nil -} - -// InstanceAttributeValue returns the value of the provided VM -// instance attribute. -// -// If the requested attribute is not defined, the returned error will -// be of type NotDefinedError. -// -// InstanceAttributeValue may return ("", nil) if the attribute was -// defined to be the empty string. -func InstanceAttributeValue(attr string) (string, error) { - return Get("instance/attributes/" + attr) -} - -// ProjectAttributeValue returns the value of the provided -// project attribute. -// -// If the requested attribute is not defined, the returned error will -// be of type NotDefinedError. -// -// ProjectAttributeValue may return ("", nil) if the attribute was -// defined to be the empty string. -func ProjectAttributeValue(attr string) (string, error) { - return Get("project/attributes/" + attr) -} - -// Scopes returns the service account scopes for the given account. -// The account may be empty or the string "default" to use the instance's -// main account. -func Scopes(serviceAccount string) ([]string, error) { - if serviceAccount == "" { - serviceAccount = "default" - } - return lines("instance/service-accounts/" + serviceAccount + "/scopes") -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/internal/cloud.go b/Godeps/_workspace/src/google.golang.org/cloud/internal/cloud.go deleted file mode 100644 index 8b0db1b5da..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/internal/cloud.go +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright 2014 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package internal provides support for the cloud packages. -// -// Users should not import this package directly. -package internal - -import ( - "fmt" - "net/http" - "sync" - - "golang.org/x/net/context" -) - -type contextKey struct{} - -func WithContext(parent context.Context, projID string, c *http.Client) context.Context { - if c == nil { - panic("nil *http.Client passed to WithContext") - } - if projID == "" { - panic("empty project ID passed to WithContext") - } - return context.WithValue(parent, contextKey{}, &cloudContext{ - ProjectID: projID, - HTTPClient: c, - }) -} - -const userAgent = "gcloud-golang/0.1" - -type cloudContext struct { - ProjectID string - HTTPClient *http.Client - - mu sync.Mutex // guards svc - svc map[string]interface{} // e.g. "storage" => *rawStorage.Service -} - -// Service returns the result of the fill function if it's never been -// called before for the given name (which is assumed to be an API -// service name, like "datastore"). If it has already been cached, the fill -// func is not run. -// It's safe for concurrent use by multiple goroutines. -func Service(ctx context.Context, name string, fill func(*http.Client) interface{}) interface{} { - return cc(ctx).service(name, fill) -} - -func (c *cloudContext) service(name string, fill func(*http.Client) interface{}) interface{} { - c.mu.Lock() - defer c.mu.Unlock() - - if c.svc == nil { - c.svc = make(map[string]interface{}) - } else if v, ok := c.svc[name]; ok { - return v - } - v := fill(c.HTTPClient) - c.svc[name] = v - return v -} - -// Transport is an http.RoundTripper that appends -// Google Cloud client's user-agent to the original -// request's user-agent header. -type Transport struct { - // Base represents the actual http.RoundTripper - // the requests will be delegated to. - Base http.RoundTripper -} - -// RoundTrip appends a user-agent to the existing user-agent -// header and delegates the request to the base http.RoundTripper. -func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { - req = cloneRequest(req) - ua := req.Header.Get("User-Agent") - if ua == "" { - ua = userAgent - } else { - ua = fmt.Sprintf("%s %s", ua, userAgent) - } - req.Header.Set("User-Agent", ua) - return t.Base.RoundTrip(req) -} - -// cloneRequest returns a clone of the provided *http.Request. -// The clone is a shallow copy of the struct and its Header map. -func cloneRequest(r *http.Request) *http.Request { - // shallow copy of the struct - r2 := new(http.Request) - *r2 = *r - // deep copy of the Header - r2.Header = make(http.Header) - for k, s := range r.Header { - r2.Header[k] = s - } - return r2 -} - -func ProjID(ctx context.Context) string { - return cc(ctx).ProjectID -} - -func HTTPClient(ctx context.Context) *http.Client { - return cc(ctx).HTTPClient -} - -// cc returns the internal *cloudContext (cc) state for a context.Context. -// It panics if the user did it wrong. -func cc(ctx context.Context) *cloudContext { - if c, ok := ctx.Value(contextKey{}).(*cloudContext); ok { - return c - } - panic("invalid context.Context type; it should be created with cloud.NewContext") -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/internal/datastore/datastore_v1.pb.go b/Godeps/_workspace/src/google.golang.org/cloud/internal/datastore/datastore_v1.pb.go deleted file mode 100644 index 9cb9be528c..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/internal/datastore/datastore_v1.pb.go +++ /dev/null @@ -1,1633 +0,0 @@ -// Code generated by protoc-gen-go. -// source: datastore_v1.proto -// DO NOT EDIT! - -/* -Package datastore is a generated protocol buffer package. - -It is generated from these files: - datastore_v1.proto - -It has these top-level messages: - PartitionId - Key - Value - Property - Entity - EntityResult - Query - KindExpression - PropertyReference - PropertyExpression - PropertyOrder - Filter - CompositeFilter - PropertyFilter - GqlQuery - GqlQueryArg - QueryResultBatch - Mutation - MutationResult - ReadOptions - LookupRequest - LookupResponse - RunQueryRequest - RunQueryResponse - BeginTransactionRequest - BeginTransactionResponse - RollbackRequest - RollbackResponse - CommitRequest - CommitResponse - AllocateIdsRequest - AllocateIdsResponse -*/ -package datastore - -import proto "github.com/golang/protobuf/proto" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = math.Inf - -// Specifies what data the 'entity' field contains. -// A ResultType is either implied (for example, in LookupResponse.found it -// is always FULL) or specified by context (for example, in message -// QueryResultBatch, field 'entity_result_type' specifies a ResultType -// for all the values in field 'entity_result'). -type EntityResult_ResultType int32 - -const ( - EntityResult_FULL EntityResult_ResultType = 1 - EntityResult_PROJECTION EntityResult_ResultType = 2 - // The entity may have no key. - // A property value may have meaning 18. - EntityResult_KEY_ONLY EntityResult_ResultType = 3 -) - -var EntityResult_ResultType_name = map[int32]string{ - 1: "FULL", - 2: "PROJECTION", - 3: "KEY_ONLY", -} -var EntityResult_ResultType_value = map[string]int32{ - "FULL": 1, - "PROJECTION": 2, - "KEY_ONLY": 3, -} - -func (x EntityResult_ResultType) Enum() *EntityResult_ResultType { - p := new(EntityResult_ResultType) - *p = x - return p -} -func (x EntityResult_ResultType) String() string { - return proto.EnumName(EntityResult_ResultType_name, int32(x)) -} -func (x *EntityResult_ResultType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(EntityResult_ResultType_value, data, "EntityResult_ResultType") - if err != nil { - return err - } - *x = EntityResult_ResultType(value) - return nil -} - -type PropertyExpression_AggregationFunction int32 - -const ( - PropertyExpression_FIRST PropertyExpression_AggregationFunction = 1 -) - -var PropertyExpression_AggregationFunction_name = map[int32]string{ - 1: "FIRST", -} -var PropertyExpression_AggregationFunction_value = map[string]int32{ - "FIRST": 1, -} - -func (x PropertyExpression_AggregationFunction) Enum() *PropertyExpression_AggregationFunction { - p := new(PropertyExpression_AggregationFunction) - *p = x - return p -} -func (x PropertyExpression_AggregationFunction) String() string { - return proto.EnumName(PropertyExpression_AggregationFunction_name, int32(x)) -} -func (x *PropertyExpression_AggregationFunction) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(PropertyExpression_AggregationFunction_value, data, "PropertyExpression_AggregationFunction") - if err != nil { - return err - } - *x = PropertyExpression_AggregationFunction(value) - return nil -} - -type PropertyOrder_Direction int32 - -const ( - PropertyOrder_ASCENDING PropertyOrder_Direction = 1 - PropertyOrder_DESCENDING PropertyOrder_Direction = 2 -) - -var PropertyOrder_Direction_name = map[int32]string{ - 1: "ASCENDING", - 2: "DESCENDING", -} -var PropertyOrder_Direction_value = map[string]int32{ - "ASCENDING": 1, - "DESCENDING": 2, -} - -func (x PropertyOrder_Direction) Enum() *PropertyOrder_Direction { - p := new(PropertyOrder_Direction) - *p = x - return p -} -func (x PropertyOrder_Direction) String() string { - return proto.EnumName(PropertyOrder_Direction_name, int32(x)) -} -func (x *PropertyOrder_Direction) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(PropertyOrder_Direction_value, data, "PropertyOrder_Direction") - if err != nil { - return err - } - *x = PropertyOrder_Direction(value) - return nil -} - -type CompositeFilter_Operator int32 - -const ( - CompositeFilter_AND CompositeFilter_Operator = 1 -) - -var CompositeFilter_Operator_name = map[int32]string{ - 1: "AND", -} -var CompositeFilter_Operator_value = map[string]int32{ - "AND": 1, -} - -func (x CompositeFilter_Operator) Enum() *CompositeFilter_Operator { - p := new(CompositeFilter_Operator) - *p = x - return p -} -func (x CompositeFilter_Operator) String() string { - return proto.EnumName(CompositeFilter_Operator_name, int32(x)) -} -func (x *CompositeFilter_Operator) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(CompositeFilter_Operator_value, data, "CompositeFilter_Operator") - if err != nil { - return err - } - *x = CompositeFilter_Operator(value) - return nil -} - -type PropertyFilter_Operator int32 - -const ( - PropertyFilter_LESS_THAN PropertyFilter_Operator = 1 - PropertyFilter_LESS_THAN_OR_EQUAL PropertyFilter_Operator = 2 - PropertyFilter_GREATER_THAN PropertyFilter_Operator = 3 - PropertyFilter_GREATER_THAN_OR_EQUAL PropertyFilter_Operator = 4 - PropertyFilter_EQUAL PropertyFilter_Operator = 5 - PropertyFilter_HAS_ANCESTOR PropertyFilter_Operator = 11 -) - -var PropertyFilter_Operator_name = map[int32]string{ - 1: "LESS_THAN", - 2: "LESS_THAN_OR_EQUAL", - 3: "GREATER_THAN", - 4: "GREATER_THAN_OR_EQUAL", - 5: "EQUAL", - 11: "HAS_ANCESTOR", -} -var PropertyFilter_Operator_value = map[string]int32{ - "LESS_THAN": 1, - "LESS_THAN_OR_EQUAL": 2, - "GREATER_THAN": 3, - "GREATER_THAN_OR_EQUAL": 4, - "EQUAL": 5, - "HAS_ANCESTOR": 11, -} - -func (x PropertyFilter_Operator) Enum() *PropertyFilter_Operator { - p := new(PropertyFilter_Operator) - *p = x - return p -} -func (x PropertyFilter_Operator) String() string { - return proto.EnumName(PropertyFilter_Operator_name, int32(x)) -} -func (x *PropertyFilter_Operator) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(PropertyFilter_Operator_value, data, "PropertyFilter_Operator") - if err != nil { - return err - } - *x = PropertyFilter_Operator(value) - return nil -} - -// The possible values for the 'more_results' field. -type QueryResultBatch_MoreResultsType int32 - -const ( - QueryResultBatch_NOT_FINISHED QueryResultBatch_MoreResultsType = 1 - QueryResultBatch_MORE_RESULTS_AFTER_LIMIT QueryResultBatch_MoreResultsType = 2 - // results after the limit. - QueryResultBatch_NO_MORE_RESULTS QueryResultBatch_MoreResultsType = 3 -) - -var QueryResultBatch_MoreResultsType_name = map[int32]string{ - 1: "NOT_FINISHED", - 2: "MORE_RESULTS_AFTER_LIMIT", - 3: "NO_MORE_RESULTS", -} -var QueryResultBatch_MoreResultsType_value = map[string]int32{ - "NOT_FINISHED": 1, - "MORE_RESULTS_AFTER_LIMIT": 2, - "NO_MORE_RESULTS": 3, -} - -func (x QueryResultBatch_MoreResultsType) Enum() *QueryResultBatch_MoreResultsType { - p := new(QueryResultBatch_MoreResultsType) - *p = x - return p -} -func (x QueryResultBatch_MoreResultsType) String() string { - return proto.EnumName(QueryResultBatch_MoreResultsType_name, int32(x)) -} -func (x *QueryResultBatch_MoreResultsType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(QueryResultBatch_MoreResultsType_value, data, "QueryResultBatch_MoreResultsType") - if err != nil { - return err - } - *x = QueryResultBatch_MoreResultsType(value) - return nil -} - -type ReadOptions_ReadConsistency int32 - -const ( - ReadOptions_DEFAULT ReadOptions_ReadConsistency = 0 - ReadOptions_STRONG ReadOptions_ReadConsistency = 1 - ReadOptions_EVENTUAL ReadOptions_ReadConsistency = 2 -) - -var ReadOptions_ReadConsistency_name = map[int32]string{ - 0: "DEFAULT", - 1: "STRONG", - 2: "EVENTUAL", -} -var ReadOptions_ReadConsistency_value = map[string]int32{ - "DEFAULT": 0, - "STRONG": 1, - "EVENTUAL": 2, -} - -func (x ReadOptions_ReadConsistency) Enum() *ReadOptions_ReadConsistency { - p := new(ReadOptions_ReadConsistency) - *p = x - return p -} -func (x ReadOptions_ReadConsistency) String() string { - return proto.EnumName(ReadOptions_ReadConsistency_name, int32(x)) -} -func (x *ReadOptions_ReadConsistency) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(ReadOptions_ReadConsistency_value, data, "ReadOptions_ReadConsistency") - if err != nil { - return err - } - *x = ReadOptions_ReadConsistency(value) - return nil -} - -type BeginTransactionRequest_IsolationLevel int32 - -const ( - BeginTransactionRequest_SNAPSHOT BeginTransactionRequest_IsolationLevel = 0 - // conflict if their mutations conflict. For example: - // Read(A),Write(B) may not conflict with Read(B),Write(A), - // but Read(B),Write(B) does conflict with Read(B),Write(B). - BeginTransactionRequest_SERIALIZABLE BeginTransactionRequest_IsolationLevel = 1 -) - -var BeginTransactionRequest_IsolationLevel_name = map[int32]string{ - 0: "SNAPSHOT", - 1: "SERIALIZABLE", -} -var BeginTransactionRequest_IsolationLevel_value = map[string]int32{ - "SNAPSHOT": 0, - "SERIALIZABLE": 1, -} - -func (x BeginTransactionRequest_IsolationLevel) Enum() *BeginTransactionRequest_IsolationLevel { - p := new(BeginTransactionRequest_IsolationLevel) - *p = x - return p -} -func (x BeginTransactionRequest_IsolationLevel) String() string { - return proto.EnumName(BeginTransactionRequest_IsolationLevel_name, int32(x)) -} -func (x *BeginTransactionRequest_IsolationLevel) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(BeginTransactionRequest_IsolationLevel_value, data, "BeginTransactionRequest_IsolationLevel") - if err != nil { - return err - } - *x = BeginTransactionRequest_IsolationLevel(value) - return nil -} - -type CommitRequest_Mode int32 - -const ( - CommitRequest_TRANSACTIONAL CommitRequest_Mode = 1 - CommitRequest_NON_TRANSACTIONAL CommitRequest_Mode = 2 -) - -var CommitRequest_Mode_name = map[int32]string{ - 1: "TRANSACTIONAL", - 2: "NON_TRANSACTIONAL", -} -var CommitRequest_Mode_value = map[string]int32{ - "TRANSACTIONAL": 1, - "NON_TRANSACTIONAL": 2, -} - -func (x CommitRequest_Mode) Enum() *CommitRequest_Mode { - p := new(CommitRequest_Mode) - *p = x - return p -} -func (x CommitRequest_Mode) String() string { - return proto.EnumName(CommitRequest_Mode_name, int32(x)) -} -func (x *CommitRequest_Mode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(CommitRequest_Mode_value, data, "CommitRequest_Mode") - if err != nil { - return err - } - *x = CommitRequest_Mode(value) - return nil -} - -// An identifier for a particular subset of entities. -// -// Entities are partitioned into various subsets, each used by different -// datasets and different namespaces within a dataset and so forth. -// -// All input partition IDs are normalized before use. -// A partition ID is normalized as follows: -// If the partition ID is unset or is set to an empty partition ID, replace it -// with the context partition ID. -// Otherwise, if the partition ID has no dataset ID, assign it the context -// partition ID's dataset ID. -// Unless otherwise documented, the context partition ID has the dataset ID set -// to the context dataset ID and no other partition dimension set. -// -// A partition ID is empty if all of its fields are unset. -// -// Partition dimension: -// A dimension may be unset. -// A dimension's value must never be "". -// A dimension's value must match [A-Za-z\d\.\-_]{1,100} -// If the value of any dimension matches regex "__.*__", -// the partition is reserved/read-only. -// A reserved/read-only partition ID is forbidden in certain documented contexts. -// -// Dataset ID: -// A dataset id's value must never be "". -// A dataset id's value must match -// ([a-z\d\-]{1,100}~)?([a-z\d][a-z\d\-\.]{0,99}:)?([a-z\d][a-z\d\-]{0,99} -type PartitionId struct { - // The dataset ID. - DatasetId *string `protobuf:"bytes,3,opt,name=dataset_id" json:"dataset_id,omitempty"` - // The namespace. - Namespace *string `protobuf:"bytes,4,opt,name=namespace" json:"namespace,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PartitionId) Reset() { *m = PartitionId{} } -func (m *PartitionId) String() string { return proto.CompactTextString(m) } -func (*PartitionId) ProtoMessage() {} - -func (m *PartitionId) GetDatasetId() string { - if m != nil && m.DatasetId != nil { - return *m.DatasetId - } - return "" -} - -func (m *PartitionId) GetNamespace() string { - if m != nil && m.Namespace != nil { - return *m.Namespace - } - return "" -} - -// A unique identifier for an entity. -// If a key's partition id or any of its path kinds or names are -// reserved/read-only, the key is reserved/read-only. -// A reserved/read-only key is forbidden in certain documented contexts. -type Key struct { - // Entities are partitioned into subsets, currently identified by a dataset - // (usually implicitly specified by the project) and namespace ID. - // Queries are scoped to a single partition. - PartitionId *PartitionId `protobuf:"bytes,1,opt,name=partition_id" json:"partition_id,omitempty"` - // The entity path. - // An entity path consists of one or more elements composed of a kind and a - // string or numerical identifier, which identify entities. The first - // element identifies a root entity, the second element identifies - // a child of the root entity, the third element a child of the - // second entity, and so forth. The entities identified by all prefixes of - // the path are called the element's ancestors. - // An entity path is always fully complete: ALL of the entity's ancestors - // are required to be in the path along with the entity identifier itself. - // The only exception is that in some documented cases, the identifier in the - // last path element (for the entity) itself may be omitted. A path can never - // be empty. - PathElement []*Key_PathElement `protobuf:"bytes,2,rep,name=path_element" json:"path_element,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Key) Reset() { *m = Key{} } -func (m *Key) String() string { return proto.CompactTextString(m) } -func (*Key) ProtoMessage() {} - -func (m *Key) GetPartitionId() *PartitionId { - if m != nil { - return m.PartitionId - } - return nil -} - -func (m *Key) GetPathElement() []*Key_PathElement { - if m != nil { - return m.PathElement - } - return nil -} - -// A (kind, ID/name) pair used to construct a key path. -// -// At most one of name or ID may be set. -// If either is set, the element is complete. -// If neither is set, the element is incomplete. -type Key_PathElement struct { - // The kind of the entity. - // A kind matching regex "__.*__" is reserved/read-only. - // A kind must not contain more than 500 characters. - // Cannot be "". - Kind *string `protobuf:"bytes,1,req,name=kind" json:"kind,omitempty"` - // The ID of the entity. - // Never equal to zero. Values less than zero are discouraged and will not - // be supported in the future. - Id *int64 `protobuf:"varint,2,opt,name=id" json:"id,omitempty"` - // The name of the entity. - // A name matching regex "__.*__" is reserved/read-only. - // A name must not be more than 500 characters. - // Cannot be "". - Name *string `protobuf:"bytes,3,opt,name=name" json:"name,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Key_PathElement) Reset() { *m = Key_PathElement{} } -func (m *Key_PathElement) String() string { return proto.CompactTextString(m) } -func (*Key_PathElement) ProtoMessage() {} - -func (m *Key_PathElement) GetKind() string { - if m != nil && m.Kind != nil { - return *m.Kind - } - return "" -} - -func (m *Key_PathElement) GetId() int64 { - if m != nil && m.Id != nil { - return *m.Id - } - return 0 -} - -func (m *Key_PathElement) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -// A message that can hold any of the supported value types and associated -// metadata. -// -// At most one of the Value fields may be set. -// If none are set the value is "null". -// -type Value struct { - // A boolean value. - BooleanValue *bool `protobuf:"varint,1,opt,name=boolean_value" json:"boolean_value,omitempty"` - // An integer value. - IntegerValue *int64 `protobuf:"varint,2,opt,name=integer_value" json:"integer_value,omitempty"` - // A double value. - DoubleValue *float64 `protobuf:"fixed64,3,opt,name=double_value" json:"double_value,omitempty"` - // A timestamp value. - TimestampMicrosecondsValue *int64 `protobuf:"varint,4,opt,name=timestamp_microseconds_value" json:"timestamp_microseconds_value,omitempty"` - // A key value. - KeyValue *Key `protobuf:"bytes,5,opt,name=key_value" json:"key_value,omitempty"` - // A blob key value. - BlobKeyValue *string `protobuf:"bytes,16,opt,name=blob_key_value" json:"blob_key_value,omitempty"` - // A UTF-8 encoded string value. - StringValue *string `protobuf:"bytes,17,opt,name=string_value" json:"string_value,omitempty"` - // A blob value. - BlobValue []byte `protobuf:"bytes,18,opt,name=blob_value" json:"blob_value,omitempty"` - // An entity value. - // May have no key. - // May have a key with an incomplete key path. - // May have a reserved/read-only key. - EntityValue *Entity `protobuf:"bytes,6,opt,name=entity_value" json:"entity_value,omitempty"` - // A list value. - // Cannot contain another list value. - // Cannot also have a meaning and indexing set. - ListValue []*Value `protobuf:"bytes,7,rep,name=list_value" json:"list_value,omitempty"` - // The meaning field is reserved and should not be used. - Meaning *int32 `protobuf:"varint,14,opt,name=meaning" json:"meaning,omitempty"` - // If the value should be indexed. - // - // The indexed property may be set for a - // null value. - // When indexed is true, stringValue - // is limited to 500 characters and the blob value is limited to 500 bytes. - // Exception: If meaning is set to 2, string_value is limited to 2038 - // characters regardless of indexed. - // When indexed is true, meaning 15 and 22 are not allowed, and meaning 16 - // will be ignored on input (and will never be set on output). - // Input values by default have indexed set to - // true; however, you can explicitly set indexed to - // true if you want. (An output value never has - // indexed explicitly set to true.) If a value is - // itself an entity, it cannot have indexed set to - // true. - // Exception: An entity value with meaning 9, 20 or 21 may be indexed. - Indexed *bool `protobuf:"varint,15,opt,name=indexed,def=1" json:"indexed,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Value) Reset() { *m = Value{} } -func (m *Value) String() string { return proto.CompactTextString(m) } -func (*Value) ProtoMessage() {} - -const Default_Value_Indexed bool = true - -func (m *Value) GetBooleanValue() bool { - if m != nil && m.BooleanValue != nil { - return *m.BooleanValue - } - return false -} - -func (m *Value) GetIntegerValue() int64 { - if m != nil && m.IntegerValue != nil { - return *m.IntegerValue - } - return 0 -} - -func (m *Value) GetDoubleValue() float64 { - if m != nil && m.DoubleValue != nil { - return *m.DoubleValue - } - return 0 -} - -func (m *Value) GetTimestampMicrosecondsValue() int64 { - if m != nil && m.TimestampMicrosecondsValue != nil { - return *m.TimestampMicrosecondsValue - } - return 0 -} - -func (m *Value) GetKeyValue() *Key { - if m != nil { - return m.KeyValue - } - return nil -} - -func (m *Value) GetBlobKeyValue() string { - if m != nil && m.BlobKeyValue != nil { - return *m.BlobKeyValue - } - return "" -} - -func (m *Value) GetStringValue() string { - if m != nil && m.StringValue != nil { - return *m.StringValue - } - return "" -} - -func (m *Value) GetBlobValue() []byte { - if m != nil { - return m.BlobValue - } - return nil -} - -func (m *Value) GetEntityValue() *Entity { - if m != nil { - return m.EntityValue - } - return nil -} - -func (m *Value) GetListValue() []*Value { - if m != nil { - return m.ListValue - } - return nil -} - -func (m *Value) GetMeaning() int32 { - if m != nil && m.Meaning != nil { - return *m.Meaning - } - return 0 -} - -func (m *Value) GetIndexed() bool { - if m != nil && m.Indexed != nil { - return *m.Indexed - } - return Default_Value_Indexed -} - -// An entity property. -type Property struct { - // The name of the property. - // A property name matching regex "__.*__" is reserved. - // A reserved property name is forbidden in certain documented contexts. - // The name must not contain more than 500 characters. - // Cannot be "". - Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` - // The value(s) of the property. - // Each value can have only one value property populated. For example, - // you cannot have a values list of { value: { integerValue: 22, - // stringValue: "a" } }, but you can have { value: { listValue: - // [ { integerValue: 22 }, { stringValue: "a" } ] }. - Value *Value `protobuf:"bytes,4,req,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Property) Reset() { *m = Property{} } -func (m *Property) String() string { return proto.CompactTextString(m) } -func (*Property) ProtoMessage() {} - -func (m *Property) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *Property) GetValue() *Value { - if m != nil { - return m.Value - } - return nil -} - -// An entity. -// -// An entity is limited to 1 megabyte when stored. That roughly -// corresponds to a limit of 1 megabyte for the serialized form of this -// message. -type Entity struct { - // The entity's key. - // - // An entity must have a key, unless otherwise documented (for example, - // an entity in Value.entityValue may have no key). - // An entity's kind is its key's path's last element's kind, - // or null if it has no key. - Key *Key `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"` - // The entity's properties. - // Each property's name must be unique for its entity. - Property []*Property `protobuf:"bytes,2,rep,name=property" json:"property,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Entity) Reset() { *m = Entity{} } -func (m *Entity) String() string { return proto.CompactTextString(m) } -func (*Entity) ProtoMessage() {} - -func (m *Entity) GetKey() *Key { - if m != nil { - return m.Key - } - return nil -} - -func (m *Entity) GetProperty() []*Property { - if m != nil { - return m.Property - } - return nil -} - -// The result of fetching an entity from the datastore. -type EntityResult struct { - // The resulting entity. - Entity *Entity `protobuf:"bytes,1,req,name=entity" json:"entity,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *EntityResult) Reset() { *m = EntityResult{} } -func (m *EntityResult) String() string { return proto.CompactTextString(m) } -func (*EntityResult) ProtoMessage() {} - -func (m *EntityResult) GetEntity() *Entity { - if m != nil { - return m.Entity - } - return nil -} - -// A query. -type Query struct { - // The projection to return. If not set the entire entity is returned. - Projection []*PropertyExpression `protobuf:"bytes,2,rep,name=projection" json:"projection,omitempty"` - // The kinds to query (if empty, returns entities from all kinds). - Kind []*KindExpression `protobuf:"bytes,3,rep,name=kind" json:"kind,omitempty"` - // The filter to apply (optional). - Filter *Filter `protobuf:"bytes,4,opt,name=filter" json:"filter,omitempty"` - // The order to apply to the query results (if empty, order is unspecified). - Order []*PropertyOrder `protobuf:"bytes,5,rep,name=order" json:"order,omitempty"` - // The properties to group by (if empty, no grouping is applied to the - // result set). - GroupBy []*PropertyReference `protobuf:"bytes,6,rep,name=group_by" json:"group_by,omitempty"` - // A starting point for the query results. Optional. Query cursors are - // returned in query result batches. - StartCursor []byte `protobuf:"bytes,7,opt,name=start_cursor" json:"start_cursor,omitempty"` - // An ending point for the query results. Optional. Query cursors are - // returned in query result batches. - EndCursor []byte `protobuf:"bytes,8,opt,name=end_cursor" json:"end_cursor,omitempty"` - // The number of results to skip. Applies before limit, but after all other - // constraints (optional, defaults to 0). - Offset *int32 `protobuf:"varint,10,opt,name=offset,def=0" json:"offset,omitempty"` - // The maximum number of results to return. Applies after all other - // constraints. Optional. - Limit *int32 `protobuf:"varint,11,opt,name=limit" json:"limit,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Query) Reset() { *m = Query{} } -func (m *Query) String() string { return proto.CompactTextString(m) } -func (*Query) ProtoMessage() {} - -const Default_Query_Offset int32 = 0 - -func (m *Query) GetProjection() []*PropertyExpression { - if m != nil { - return m.Projection - } - return nil -} - -func (m *Query) GetKind() []*KindExpression { - if m != nil { - return m.Kind - } - return nil -} - -func (m *Query) GetFilter() *Filter { - if m != nil { - return m.Filter - } - return nil -} - -func (m *Query) GetOrder() []*PropertyOrder { - if m != nil { - return m.Order - } - return nil -} - -func (m *Query) GetGroupBy() []*PropertyReference { - if m != nil { - return m.GroupBy - } - return nil -} - -func (m *Query) GetStartCursor() []byte { - if m != nil { - return m.StartCursor - } - return nil -} - -func (m *Query) GetEndCursor() []byte { - if m != nil { - return m.EndCursor - } - return nil -} - -func (m *Query) GetOffset() int32 { - if m != nil && m.Offset != nil { - return *m.Offset - } - return Default_Query_Offset -} - -func (m *Query) GetLimit() int32 { - if m != nil && m.Limit != nil { - return *m.Limit - } - return 0 -} - -// A representation of a kind. -type KindExpression struct { - // The name of the kind. - Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *KindExpression) Reset() { *m = KindExpression{} } -func (m *KindExpression) String() string { return proto.CompactTextString(m) } -func (*KindExpression) ProtoMessage() {} - -func (m *KindExpression) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -// A reference to a property relative to the kind expressions. -// exactly. -type PropertyReference struct { - // The name of the property. - Name *string `protobuf:"bytes,2,req,name=name" json:"name,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PropertyReference) Reset() { *m = PropertyReference{} } -func (m *PropertyReference) String() string { return proto.CompactTextString(m) } -func (*PropertyReference) ProtoMessage() {} - -func (m *PropertyReference) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -// A representation of a property in a projection. -type PropertyExpression struct { - // The property to project. - Property *PropertyReference `protobuf:"bytes,1,req,name=property" json:"property,omitempty"` - // The aggregation function to apply to the property. Optional. - // Can only be used when grouping by at least one property. Must - // then be set on all properties in the projection that are not - // being grouped by. - AggregationFunction *PropertyExpression_AggregationFunction `protobuf:"varint,2,opt,name=aggregation_function,enum=datastore.PropertyExpression_AggregationFunction" json:"aggregation_function,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PropertyExpression) Reset() { *m = PropertyExpression{} } -func (m *PropertyExpression) String() string { return proto.CompactTextString(m) } -func (*PropertyExpression) ProtoMessage() {} - -func (m *PropertyExpression) GetProperty() *PropertyReference { - if m != nil { - return m.Property - } - return nil -} - -func (m *PropertyExpression) GetAggregationFunction() PropertyExpression_AggregationFunction { - if m != nil && m.AggregationFunction != nil { - return *m.AggregationFunction - } - return PropertyExpression_FIRST -} - -// The desired order for a specific property. -type PropertyOrder struct { - // The property to order by. - Property *PropertyReference `protobuf:"bytes,1,req,name=property" json:"property,omitempty"` - // The direction to order by. - Direction *PropertyOrder_Direction `protobuf:"varint,2,opt,name=direction,enum=datastore.PropertyOrder_Direction,def=1" json:"direction,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PropertyOrder) Reset() { *m = PropertyOrder{} } -func (m *PropertyOrder) String() string { return proto.CompactTextString(m) } -func (*PropertyOrder) ProtoMessage() {} - -const Default_PropertyOrder_Direction PropertyOrder_Direction = PropertyOrder_ASCENDING - -func (m *PropertyOrder) GetProperty() *PropertyReference { - if m != nil { - return m.Property - } - return nil -} - -func (m *PropertyOrder) GetDirection() PropertyOrder_Direction { - if m != nil && m.Direction != nil { - return *m.Direction - } - return Default_PropertyOrder_Direction -} - -// A holder for any type of filter. Exactly one field should be specified. -type Filter struct { - // A composite filter. - CompositeFilter *CompositeFilter `protobuf:"bytes,1,opt,name=composite_filter" json:"composite_filter,omitempty"` - // A filter on a property. - PropertyFilter *PropertyFilter `protobuf:"bytes,2,opt,name=property_filter" json:"property_filter,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Filter) Reset() { *m = Filter{} } -func (m *Filter) String() string { return proto.CompactTextString(m) } -func (*Filter) ProtoMessage() {} - -func (m *Filter) GetCompositeFilter() *CompositeFilter { - if m != nil { - return m.CompositeFilter - } - return nil -} - -func (m *Filter) GetPropertyFilter() *PropertyFilter { - if m != nil { - return m.PropertyFilter - } - return nil -} - -// A filter that merges the multiple other filters using the given operation. -type CompositeFilter struct { - // The operator for combining multiple filters. - Operator *CompositeFilter_Operator `protobuf:"varint,1,req,name=operator,enum=datastore.CompositeFilter_Operator" json:"operator,omitempty"` - // The list of filters to combine. - // Must contain at least one filter. - Filter []*Filter `protobuf:"bytes,2,rep,name=filter" json:"filter,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CompositeFilter) Reset() { *m = CompositeFilter{} } -func (m *CompositeFilter) String() string { return proto.CompactTextString(m) } -func (*CompositeFilter) ProtoMessage() {} - -func (m *CompositeFilter) GetOperator() CompositeFilter_Operator { - if m != nil && m.Operator != nil { - return *m.Operator - } - return CompositeFilter_AND -} - -func (m *CompositeFilter) GetFilter() []*Filter { - if m != nil { - return m.Filter - } - return nil -} - -// A filter on a specific property. -type PropertyFilter struct { - // The property to filter by. - Property *PropertyReference `protobuf:"bytes,1,req,name=property" json:"property,omitempty"` - // The operator to filter by. - Operator *PropertyFilter_Operator `protobuf:"varint,2,req,name=operator,enum=datastore.PropertyFilter_Operator" json:"operator,omitempty"` - // The value to compare the property to. - Value *Value `protobuf:"bytes,3,req,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PropertyFilter) Reset() { *m = PropertyFilter{} } -func (m *PropertyFilter) String() string { return proto.CompactTextString(m) } -func (*PropertyFilter) ProtoMessage() {} - -func (m *PropertyFilter) GetProperty() *PropertyReference { - if m != nil { - return m.Property - } - return nil -} - -func (m *PropertyFilter) GetOperator() PropertyFilter_Operator { - if m != nil && m.Operator != nil { - return *m.Operator - } - return PropertyFilter_LESS_THAN -} - -func (m *PropertyFilter) GetValue() *Value { - if m != nil { - return m.Value - } - return nil -} - -// A GQL query. -type GqlQuery struct { - QueryString *string `protobuf:"bytes,1,req,name=query_string" json:"query_string,omitempty"` - // When false, the query string must not contain a literal. - AllowLiteral *bool `protobuf:"varint,2,opt,name=allow_literal,def=0" json:"allow_literal,omitempty"` - // A named argument must set field GqlQueryArg.name. - // No two named arguments may have the same name. - // For each non-reserved named binding site in the query string, - // there must be a named argument with that name, - // but not necessarily the inverse. - NameArg []*GqlQueryArg `protobuf:"bytes,3,rep,name=name_arg" json:"name_arg,omitempty"` - // Numbered binding site @1 references the first numbered argument, - // effectively using 1-based indexing, rather than the usual 0. - // A numbered argument must NOT set field GqlQueryArg.name. - // For each binding site numbered i in query_string, - // there must be an ith numbered argument. - // The inverse must also be true. - NumberArg []*GqlQueryArg `protobuf:"bytes,4,rep,name=number_arg" json:"number_arg,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GqlQuery) Reset() { *m = GqlQuery{} } -func (m *GqlQuery) String() string { return proto.CompactTextString(m) } -func (*GqlQuery) ProtoMessage() {} - -const Default_GqlQuery_AllowLiteral bool = false - -func (m *GqlQuery) GetQueryString() string { - if m != nil && m.QueryString != nil { - return *m.QueryString - } - return "" -} - -func (m *GqlQuery) GetAllowLiteral() bool { - if m != nil && m.AllowLiteral != nil { - return *m.AllowLiteral - } - return Default_GqlQuery_AllowLiteral -} - -func (m *GqlQuery) GetNameArg() []*GqlQueryArg { - if m != nil { - return m.NameArg - } - return nil -} - -func (m *GqlQuery) GetNumberArg() []*GqlQueryArg { - if m != nil { - return m.NumberArg - } - return nil -} - -// A binding argument for a GQL query. -// Exactly one of fields value and cursor must be set. -type GqlQueryArg struct { - // Must match regex "[A-Za-z_$][A-Za-z_$0-9]*". - // Must not match regex "__.*__". - // Must not be "". - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Value *Value `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` - Cursor []byte `protobuf:"bytes,3,opt,name=cursor" json:"cursor,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GqlQueryArg) Reset() { *m = GqlQueryArg{} } -func (m *GqlQueryArg) String() string { return proto.CompactTextString(m) } -func (*GqlQueryArg) ProtoMessage() {} - -func (m *GqlQueryArg) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *GqlQueryArg) GetValue() *Value { - if m != nil { - return m.Value - } - return nil -} - -func (m *GqlQueryArg) GetCursor() []byte { - if m != nil { - return m.Cursor - } - return nil -} - -// A batch of results produced by a query. -type QueryResultBatch struct { - // The result type for every entity in entityResults. - EntityResultType *EntityResult_ResultType `protobuf:"varint,1,req,name=entity_result_type,enum=datastore.EntityResult_ResultType" json:"entity_result_type,omitempty"` - // The results for this batch. - EntityResult []*EntityResult `protobuf:"bytes,2,rep,name=entity_result" json:"entity_result,omitempty"` - // A cursor that points to the position after the last result in the batch. - // May be absent. - EndCursor []byte `protobuf:"bytes,4,opt,name=end_cursor" json:"end_cursor,omitempty"` - // The state of the query after the current batch. - MoreResults *QueryResultBatch_MoreResultsType `protobuf:"varint,5,req,name=more_results,enum=datastore.QueryResultBatch_MoreResultsType" json:"more_results,omitempty"` - // The number of results skipped because of Query.offset. - SkippedResults *int32 `protobuf:"varint,6,opt,name=skipped_results" json:"skipped_results,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *QueryResultBatch) Reset() { *m = QueryResultBatch{} } -func (m *QueryResultBatch) String() string { return proto.CompactTextString(m) } -func (*QueryResultBatch) ProtoMessage() {} - -func (m *QueryResultBatch) GetEntityResultType() EntityResult_ResultType { - if m != nil && m.EntityResultType != nil { - return *m.EntityResultType - } - return EntityResult_FULL -} - -func (m *QueryResultBatch) GetEntityResult() []*EntityResult { - if m != nil { - return m.EntityResult - } - return nil -} - -func (m *QueryResultBatch) GetEndCursor() []byte { - if m != nil { - return m.EndCursor - } - return nil -} - -func (m *QueryResultBatch) GetMoreResults() QueryResultBatch_MoreResultsType { - if m != nil && m.MoreResults != nil { - return *m.MoreResults - } - return QueryResultBatch_NOT_FINISHED -} - -func (m *QueryResultBatch) GetSkippedResults() int32 { - if m != nil && m.SkippedResults != nil { - return *m.SkippedResults - } - return 0 -} - -// A set of changes to apply. -// -// No entity in this message may have a reserved property name, -// not even a property in an entity in a value. -// No value in this message may have meaning 18, -// not even a value in an entity in another value. -// -// If entities with duplicate keys are present, an arbitrary choice will -// be made as to which is written. -type Mutation struct { - // Entities to upsert. - // Each upserted entity's key must have a complete path and - // must not be reserved/read-only. - Upsert []*Entity `protobuf:"bytes,1,rep,name=upsert" json:"upsert,omitempty"` - // Entities to update. - // Each updated entity's key must have a complete path and - // must not be reserved/read-only. - Update []*Entity `protobuf:"bytes,2,rep,name=update" json:"update,omitempty"` - // Entities to insert. - // Each inserted entity's key must have a complete path and - // must not be reserved/read-only. - Insert []*Entity `protobuf:"bytes,3,rep,name=insert" json:"insert,omitempty"` - // Insert entities with a newly allocated ID. - // Each inserted entity's key must omit the final identifier in its path and - // must not be reserved/read-only. - InsertAutoId []*Entity `protobuf:"bytes,4,rep,name=insert_auto_id" json:"insert_auto_id,omitempty"` - // Keys of entities to delete. - // Each key must have a complete key path and must not be reserved/read-only. - Delete []*Key `protobuf:"bytes,5,rep,name=delete" json:"delete,omitempty"` - // Ignore a user specified read-only period. Optional. - Force *bool `protobuf:"varint,6,opt,name=force" json:"force,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Mutation) Reset() { *m = Mutation{} } -func (m *Mutation) String() string { return proto.CompactTextString(m) } -func (*Mutation) ProtoMessage() {} - -func (m *Mutation) GetUpsert() []*Entity { - if m != nil { - return m.Upsert - } - return nil -} - -func (m *Mutation) GetUpdate() []*Entity { - if m != nil { - return m.Update - } - return nil -} - -func (m *Mutation) GetInsert() []*Entity { - if m != nil { - return m.Insert - } - return nil -} - -func (m *Mutation) GetInsertAutoId() []*Entity { - if m != nil { - return m.InsertAutoId - } - return nil -} - -func (m *Mutation) GetDelete() []*Key { - if m != nil { - return m.Delete - } - return nil -} - -func (m *Mutation) GetForce() bool { - if m != nil && m.Force != nil { - return *m.Force - } - return false -} - -// The result of applying a mutation. -type MutationResult struct { - // Number of index writes. - IndexUpdates *int32 `protobuf:"varint,1,req,name=index_updates" json:"index_updates,omitempty"` - // Keys for insertAutoId entities. One per entity from the - // request, in the same order. - InsertAutoIdKey []*Key `protobuf:"bytes,2,rep,name=insert_auto_id_key" json:"insert_auto_id_key,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MutationResult) Reset() { *m = MutationResult{} } -func (m *MutationResult) String() string { return proto.CompactTextString(m) } -func (*MutationResult) ProtoMessage() {} - -func (m *MutationResult) GetIndexUpdates() int32 { - if m != nil && m.IndexUpdates != nil { - return *m.IndexUpdates - } - return 0 -} - -func (m *MutationResult) GetInsertAutoIdKey() []*Key { - if m != nil { - return m.InsertAutoIdKey - } - return nil -} - -// Options shared by read requests. -type ReadOptions struct { - // The read consistency to use. - // Cannot be set when transaction is set. - // Lookup and ancestor queries default to STRONG, global queries default to - // EVENTUAL and cannot be set to STRONG. - ReadConsistency *ReadOptions_ReadConsistency `protobuf:"varint,1,opt,name=read_consistency,enum=datastore.ReadOptions_ReadConsistency,def=0" json:"read_consistency,omitempty"` - // The transaction to use. Optional. - Transaction []byte `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ReadOptions) Reset() { *m = ReadOptions{} } -func (m *ReadOptions) String() string { return proto.CompactTextString(m) } -func (*ReadOptions) ProtoMessage() {} - -const Default_ReadOptions_ReadConsistency ReadOptions_ReadConsistency = ReadOptions_DEFAULT - -func (m *ReadOptions) GetReadConsistency() ReadOptions_ReadConsistency { - if m != nil && m.ReadConsistency != nil { - return *m.ReadConsistency - } - return Default_ReadOptions_ReadConsistency -} - -func (m *ReadOptions) GetTransaction() []byte { - if m != nil { - return m.Transaction - } - return nil -} - -// The request for Lookup. -type LookupRequest struct { - // Options for this lookup request. Optional. - ReadOptions *ReadOptions `protobuf:"bytes,1,opt,name=read_options" json:"read_options,omitempty"` - // Keys of entities to look up from the datastore. - Key []*Key `protobuf:"bytes,3,rep,name=key" json:"key,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *LookupRequest) Reset() { *m = LookupRequest{} } -func (m *LookupRequest) String() string { return proto.CompactTextString(m) } -func (*LookupRequest) ProtoMessage() {} - -func (m *LookupRequest) GetReadOptions() *ReadOptions { - if m != nil { - return m.ReadOptions - } - return nil -} - -func (m *LookupRequest) GetKey() []*Key { - if m != nil { - return m.Key - } - return nil -} - -// The response for Lookup. -type LookupResponse struct { - // Entities found as ResultType.FULL entities. - Found []*EntityResult `protobuf:"bytes,1,rep,name=found" json:"found,omitempty"` - // Entities not found as ResultType.KEY_ONLY entities. - Missing []*EntityResult `protobuf:"bytes,2,rep,name=missing" json:"missing,omitempty"` - // A list of keys that were not looked up due to resource constraints. - Deferred []*Key `protobuf:"bytes,3,rep,name=deferred" json:"deferred,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *LookupResponse) Reset() { *m = LookupResponse{} } -func (m *LookupResponse) String() string { return proto.CompactTextString(m) } -func (*LookupResponse) ProtoMessage() {} - -func (m *LookupResponse) GetFound() []*EntityResult { - if m != nil { - return m.Found - } - return nil -} - -func (m *LookupResponse) GetMissing() []*EntityResult { - if m != nil { - return m.Missing - } - return nil -} - -func (m *LookupResponse) GetDeferred() []*Key { - if m != nil { - return m.Deferred - } - return nil -} - -// The request for RunQuery. -type RunQueryRequest struct { - // The options for this query. - ReadOptions *ReadOptions `protobuf:"bytes,1,opt,name=read_options" json:"read_options,omitempty"` - // Entities are partitioned into subsets, identified by a dataset (usually - // implicitly specified by the project) and namespace ID. Queries are scoped - // to a single partition. - // This partition ID is normalized with the standard default context - // partition ID, but all other partition IDs in RunQueryRequest are - // normalized with this partition ID as the context partition ID. - PartitionId *PartitionId `protobuf:"bytes,2,opt,name=partition_id" json:"partition_id,omitempty"` - // The query to run. - // Either this field or field gql_query must be set, but not both. - Query *Query `protobuf:"bytes,3,opt,name=query" json:"query,omitempty"` - // The GQL query to run. - // Either this field or field query must be set, but not both. - GqlQuery *GqlQuery `protobuf:"bytes,7,opt,name=gql_query" json:"gql_query,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *RunQueryRequest) Reset() { *m = RunQueryRequest{} } -func (m *RunQueryRequest) String() string { return proto.CompactTextString(m) } -func (*RunQueryRequest) ProtoMessage() {} - -func (m *RunQueryRequest) GetReadOptions() *ReadOptions { - if m != nil { - return m.ReadOptions - } - return nil -} - -func (m *RunQueryRequest) GetPartitionId() *PartitionId { - if m != nil { - return m.PartitionId - } - return nil -} - -func (m *RunQueryRequest) GetQuery() *Query { - if m != nil { - return m.Query - } - return nil -} - -func (m *RunQueryRequest) GetGqlQuery() *GqlQuery { - if m != nil { - return m.GqlQuery - } - return nil -} - -// The response for RunQuery. -type RunQueryResponse struct { - // A batch of query results (always present). - Batch *QueryResultBatch `protobuf:"bytes,1,opt,name=batch" json:"batch,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *RunQueryResponse) Reset() { *m = RunQueryResponse{} } -func (m *RunQueryResponse) String() string { return proto.CompactTextString(m) } -func (*RunQueryResponse) ProtoMessage() {} - -func (m *RunQueryResponse) GetBatch() *QueryResultBatch { - if m != nil { - return m.Batch - } - return nil -} - -// The request for BeginTransaction. -type BeginTransactionRequest struct { - // The transaction isolation level. - IsolationLevel *BeginTransactionRequest_IsolationLevel `protobuf:"varint,1,opt,name=isolation_level,enum=datastore.BeginTransactionRequest_IsolationLevel,def=0" json:"isolation_level,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *BeginTransactionRequest) Reset() { *m = BeginTransactionRequest{} } -func (m *BeginTransactionRequest) String() string { return proto.CompactTextString(m) } -func (*BeginTransactionRequest) ProtoMessage() {} - -const Default_BeginTransactionRequest_IsolationLevel BeginTransactionRequest_IsolationLevel = BeginTransactionRequest_SNAPSHOT - -func (m *BeginTransactionRequest) GetIsolationLevel() BeginTransactionRequest_IsolationLevel { - if m != nil && m.IsolationLevel != nil { - return *m.IsolationLevel - } - return Default_BeginTransactionRequest_IsolationLevel -} - -// The response for BeginTransaction. -type BeginTransactionResponse struct { - // The transaction identifier (always present). - Transaction []byte `protobuf:"bytes,1,opt,name=transaction" json:"transaction,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *BeginTransactionResponse) Reset() { *m = BeginTransactionResponse{} } -func (m *BeginTransactionResponse) String() string { return proto.CompactTextString(m) } -func (*BeginTransactionResponse) ProtoMessage() {} - -func (m *BeginTransactionResponse) GetTransaction() []byte { - if m != nil { - return m.Transaction - } - return nil -} - -// The request for Rollback. -type RollbackRequest struct { - // The transaction identifier, returned by a call to - // beginTransaction. - Transaction []byte `protobuf:"bytes,1,req,name=transaction" json:"transaction,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *RollbackRequest) Reset() { *m = RollbackRequest{} } -func (m *RollbackRequest) String() string { return proto.CompactTextString(m) } -func (*RollbackRequest) ProtoMessage() {} - -func (m *RollbackRequest) GetTransaction() []byte { - if m != nil { - return m.Transaction - } - return nil -} - -// The response for Rollback. -type RollbackResponse struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *RollbackResponse) Reset() { *m = RollbackResponse{} } -func (m *RollbackResponse) String() string { return proto.CompactTextString(m) } -func (*RollbackResponse) ProtoMessage() {} - -// The request for Commit. -type CommitRequest struct { - // The transaction identifier, returned by a call to - // beginTransaction. Must be set when mode is TRANSACTIONAL. - Transaction []byte `protobuf:"bytes,1,opt,name=transaction" json:"transaction,omitempty"` - // The mutation to perform. Optional. - Mutation *Mutation `protobuf:"bytes,2,opt,name=mutation" json:"mutation,omitempty"` - // The type of commit to perform. Either TRANSACTIONAL or NON_TRANSACTIONAL. - Mode *CommitRequest_Mode `protobuf:"varint,5,opt,name=mode,enum=datastore.CommitRequest_Mode,def=1" json:"mode,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CommitRequest) Reset() { *m = CommitRequest{} } -func (m *CommitRequest) String() string { return proto.CompactTextString(m) } -func (*CommitRequest) ProtoMessage() {} - -const Default_CommitRequest_Mode CommitRequest_Mode = CommitRequest_TRANSACTIONAL - -func (m *CommitRequest) GetTransaction() []byte { - if m != nil { - return m.Transaction - } - return nil -} - -func (m *CommitRequest) GetMutation() *Mutation { - if m != nil { - return m.Mutation - } - return nil -} - -func (m *CommitRequest) GetMode() CommitRequest_Mode { - if m != nil && m.Mode != nil { - return *m.Mode - } - return Default_CommitRequest_Mode -} - -// The response for Commit. -type CommitResponse struct { - // The result of performing the mutation (if any). - MutationResult *MutationResult `protobuf:"bytes,1,opt,name=mutation_result" json:"mutation_result,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CommitResponse) Reset() { *m = CommitResponse{} } -func (m *CommitResponse) String() string { return proto.CompactTextString(m) } -func (*CommitResponse) ProtoMessage() {} - -func (m *CommitResponse) GetMutationResult() *MutationResult { - if m != nil { - return m.MutationResult - } - return nil -} - -// The request for AllocateIds. -type AllocateIdsRequest struct { - // A list of keys with incomplete key paths to allocate IDs for. - // No key may be reserved/read-only. - Key []*Key `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *AllocateIdsRequest) Reset() { *m = AllocateIdsRequest{} } -func (m *AllocateIdsRequest) String() string { return proto.CompactTextString(m) } -func (*AllocateIdsRequest) ProtoMessage() {} - -func (m *AllocateIdsRequest) GetKey() []*Key { - if m != nil { - return m.Key - } - return nil -} - -// The response for AllocateIds. -type AllocateIdsResponse struct { - // The keys specified in the request (in the same order), each with - // its key path completed with a newly allocated ID. - Key []*Key `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *AllocateIdsResponse) Reset() { *m = AllocateIdsResponse{} } -func (m *AllocateIdsResponse) String() string { return proto.CompactTextString(m) } -func (*AllocateIdsResponse) ProtoMessage() {} - -func (m *AllocateIdsResponse) GetKey() []*Key { - if m != nil { - return m.Key - } - return nil -} - -func init() { - proto.RegisterEnum("datastore.EntityResult_ResultType", EntityResult_ResultType_name, EntityResult_ResultType_value) - proto.RegisterEnum("datastore.PropertyExpression_AggregationFunction", PropertyExpression_AggregationFunction_name, PropertyExpression_AggregationFunction_value) - proto.RegisterEnum("datastore.PropertyOrder_Direction", PropertyOrder_Direction_name, PropertyOrder_Direction_value) - proto.RegisterEnum("datastore.CompositeFilter_Operator", CompositeFilter_Operator_name, CompositeFilter_Operator_value) - proto.RegisterEnum("datastore.PropertyFilter_Operator", PropertyFilter_Operator_name, PropertyFilter_Operator_value) - proto.RegisterEnum("datastore.QueryResultBatch_MoreResultsType", QueryResultBatch_MoreResultsType_name, QueryResultBatch_MoreResultsType_value) - proto.RegisterEnum("datastore.ReadOptions_ReadConsistency", ReadOptions_ReadConsistency_name, ReadOptions_ReadConsistency_value) - proto.RegisterEnum("datastore.BeginTransactionRequest_IsolationLevel", BeginTransactionRequest_IsolationLevel_name, BeginTransactionRequest_IsolationLevel_value) - proto.RegisterEnum("datastore.CommitRequest_Mode", CommitRequest_Mode_name, CommitRequest_Mode_value) -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/internal/datastore/datastore_v1.proto b/Godeps/_workspace/src/google.golang.org/cloud/internal/datastore/datastore_v1.proto deleted file mode 100644 index 36bb9df7bb..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/internal/datastore/datastore_v1.proto +++ /dev/null @@ -1,594 +0,0 @@ -// Copyright 2013 Google Inc. All Rights Reserved. -// -// The datastore v1 service proto definitions - -syntax = "proto2"; - -package datastore; -option java_package = "com.google.api.services.datastore"; - - -// An identifier for a particular subset of entities. -// -// Entities are partitioned into various subsets, each used by different -// datasets and different namespaces within a dataset and so forth. -// -// All input partition IDs are normalized before use. -// A partition ID is normalized as follows: -// If the partition ID is unset or is set to an empty partition ID, replace it -// with the context partition ID. -// Otherwise, if the partition ID has no dataset ID, assign it the context -// partition ID's dataset ID. -// Unless otherwise documented, the context partition ID has the dataset ID set -// to the context dataset ID and no other partition dimension set. -// -// A partition ID is empty if all of its fields are unset. -// -// Partition dimension: -// A dimension may be unset. -// A dimension's value must never be "". -// A dimension's value must match [A-Za-z\d\.\-_]{1,100} -// If the value of any dimension matches regex "__.*__", -// the partition is reserved/read-only. -// A reserved/read-only partition ID is forbidden in certain documented contexts. -// -// Dataset ID: -// A dataset id's value must never be "". -// A dataset id's value must match -// ([a-z\d\-]{1,100}~)?([a-z\d][a-z\d\-\.]{0,99}:)?([a-z\d][a-z\d\-]{0,99} -message PartitionId { - // The dataset ID. - optional string dataset_id = 3; - // The namespace. - optional string namespace = 4; -} - -// A unique identifier for an entity. -// If a key's partition id or any of its path kinds or names are -// reserved/read-only, the key is reserved/read-only. -// A reserved/read-only key is forbidden in certain documented contexts. -message Key { - // Entities are partitioned into subsets, currently identified by a dataset - // (usually implicitly specified by the project) and namespace ID. - // Queries are scoped to a single partition. - optional PartitionId partition_id = 1; - - // A (kind, ID/name) pair used to construct a key path. - // - // At most one of name or ID may be set. - // If either is set, the element is complete. - // If neither is set, the element is incomplete. - message PathElement { - // The kind of the entity. - // A kind matching regex "__.*__" is reserved/read-only. - // A kind must not contain more than 500 characters. - // Cannot be "". - required string kind = 1; - // The ID of the entity. - // Never equal to zero. Values less than zero are discouraged and will not - // be supported in the future. - optional int64 id = 2; - // The name of the entity. - // A name matching regex "__.*__" is reserved/read-only. - // A name must not be more than 500 characters. - // Cannot be "". - optional string name = 3; - } - - // The entity path. - // An entity path consists of one or more elements composed of a kind and a - // string or numerical identifier, which identify entities. The first - // element identifies a root entity, the second element identifies - // a child of the root entity, the third element a child of the - // second entity, and so forth. The entities identified by all prefixes of - // the path are called the element's ancestors. - // An entity path is always fully complete: ALL of the entity's ancestors - // are required to be in the path along with the entity identifier itself. - // The only exception is that in some documented cases, the identifier in the - // last path element (for the entity) itself may be omitted. A path can never - // be empty. - repeated PathElement path_element = 2; -} - -// A message that can hold any of the supported value types and associated -// metadata. -// -// At most one of the Value fields may be set. -// If none are set the value is "null". -// -message Value { - // A boolean value. - optional bool boolean_value = 1; - // An integer value. - optional int64 integer_value = 2; - // A double value. - optional double double_value = 3; - // A timestamp value. - optional int64 timestamp_microseconds_value = 4; - // A key value. - optional Key key_value = 5; - // A blob key value. - optional string blob_key_value = 16; - // A UTF-8 encoded string value. - optional string string_value = 17; - // A blob value. - optional bytes blob_value = 18; - // An entity value. - // May have no key. - // May have a key with an incomplete key path. - // May have a reserved/read-only key. - optional Entity entity_value = 6; - // A list value. - // Cannot contain another list value. - // Cannot also have a meaning and indexing set. - repeated Value list_value = 7; - - // The meaning field is reserved and should not be used. - optional int32 meaning = 14; - - // If the value should be indexed. - // - // The indexed property may be set for a - // null value. - // When indexed is true, stringValue - // is limited to 500 characters and the blob value is limited to 500 bytes. - // Exception: If meaning is set to 2, string_value is limited to 2038 - // characters regardless of indexed. - // When indexed is true, meaning 15 and 22 are not allowed, and meaning 16 - // will be ignored on input (and will never be set on output). - // Input values by default have indexed set to - // true; however, you can explicitly set indexed to - // true if you want. (An output value never has - // indexed explicitly set to true.) If a value is - // itself an entity, it cannot have indexed set to - // true. - // Exception: An entity value with meaning 9, 20 or 21 may be indexed. - optional bool indexed = 15 [default = true]; -} - -// An entity property. -message Property { - // The name of the property. - // A property name matching regex "__.*__" is reserved. - // A reserved property name is forbidden in certain documented contexts. - // The name must not contain more than 500 characters. - // Cannot be "". - required string name = 1; - - // The value(s) of the property. - // Each value can have only one value property populated. For example, - // you cannot have a values list of { value: { integerValue: 22, - // stringValue: "a" } }, but you can have { value: { listValue: - // [ { integerValue: 22 }, { stringValue: "a" } ] }. - required Value value = 4; -} - -// An entity. -// -// An entity is limited to 1 megabyte when stored. That roughly -// corresponds to a limit of 1 megabyte for the serialized form of this -// message. -message Entity { - // The entity's key. - // - // An entity must have a key, unless otherwise documented (for example, - // an entity in Value.entityValue may have no key). - // An entity's kind is its key's path's last element's kind, - // or null if it has no key. - optional Key key = 1; - // The entity's properties. - // Each property's name must be unique for its entity. - repeated Property property = 2; -} - -// The result of fetching an entity from the datastore. -message EntityResult { - // Specifies what data the 'entity' field contains. - // A ResultType is either implied (for example, in LookupResponse.found it - // is always FULL) or specified by context (for example, in message - // QueryResultBatch, field 'entity_result_type' specifies a ResultType - // for all the values in field 'entity_result'). - enum ResultType { - FULL = 1; // The entire entity. - PROJECTION = 2; // A projected subset of properties. - // The entity may have no key. - // A property value may have meaning 18. - KEY_ONLY = 3; // Only the key. - } - - // The resulting entity. - required Entity entity = 1; -} - -// A query. -message Query { - // The projection to return. If not set the entire entity is returned. - repeated PropertyExpression projection = 2; - - // The kinds to query (if empty, returns entities from all kinds). - repeated KindExpression kind = 3; - - // The filter to apply (optional). - optional Filter filter = 4; - - // The order to apply to the query results (if empty, order is unspecified). - repeated PropertyOrder order = 5; - - // The properties to group by (if empty, no grouping is applied to the - // result set). - repeated PropertyReference group_by = 6; - - // A starting point for the query results. Optional. Query cursors are - // returned in query result batches. - optional bytes /* serialized QueryCursor */ start_cursor = 7; - - // An ending point for the query results. Optional. Query cursors are - // returned in query result batches. - optional bytes /* serialized QueryCursor */ end_cursor = 8; - - // The number of results to skip. Applies before limit, but after all other - // constraints (optional, defaults to 0). - optional int32 offset = 10 [default=0]; - - // The maximum number of results to return. Applies after all other - // constraints. Optional. - optional int32 limit = 11; -} - -// A representation of a kind. -message KindExpression { - // The name of the kind. - required string name = 1; -} - -// A reference to a property relative to the kind expressions. -// exactly. -message PropertyReference { - // The name of the property. - required string name = 2; -} - -// A representation of a property in a projection. -message PropertyExpression { - enum AggregationFunction { - FIRST = 1; - } - // The property to project. - required PropertyReference property = 1; - // The aggregation function to apply to the property. Optional. - // Can only be used when grouping by at least one property. Must - // then be set on all properties in the projection that are not - // being grouped by. - optional AggregationFunction aggregation_function = 2; -} - -// The desired order for a specific property. -message PropertyOrder { - enum Direction { - ASCENDING = 1; - DESCENDING = 2; - } - // The property to order by. - required PropertyReference property = 1; - // The direction to order by. - optional Direction direction = 2 [default=ASCENDING]; -} - -// A holder for any type of filter. Exactly one field should be specified. -message Filter { - // A composite filter. - optional CompositeFilter composite_filter = 1; - // A filter on a property. - optional PropertyFilter property_filter = 2; -} - -// A filter that merges the multiple other filters using the given operation. -message CompositeFilter { - enum Operator { - AND = 1; - } - - // The operator for combining multiple filters. - required Operator operator = 1; - // The list of filters to combine. - // Must contain at least one filter. - repeated Filter filter = 2; -} - -// A filter on a specific property. -message PropertyFilter { - enum Operator { - LESS_THAN = 1; - LESS_THAN_OR_EQUAL = 2; - GREATER_THAN = 3; - GREATER_THAN_OR_EQUAL = 4; - EQUAL = 5; - - HAS_ANCESTOR = 11; - } - - // The property to filter by. - required PropertyReference property = 1; - // The operator to filter by. - required Operator operator = 2; - // The value to compare the property to. - required Value value = 3; -} - -// A GQL query. -message GqlQuery { - required string query_string = 1; - // When false, the query string must not contain a literal. - optional bool allow_literal = 2 [default = false]; - // A named argument must set field GqlQueryArg.name. - // No two named arguments may have the same name. - // For each non-reserved named binding site in the query string, - // there must be a named argument with that name, - // but not necessarily the inverse. - repeated GqlQueryArg name_arg = 3; - // Numbered binding site @1 references the first numbered argument, - // effectively using 1-based indexing, rather than the usual 0. - // A numbered argument must NOT set field GqlQueryArg.name. - // For each binding site numbered i in query_string, - // there must be an ith numbered argument. - // The inverse must also be true. - repeated GqlQueryArg number_arg = 4; -} - -// A binding argument for a GQL query. -// Exactly one of fields value and cursor must be set. -message GqlQueryArg { - // Must match regex "[A-Za-z_$][A-Za-z_$0-9]*". - // Must not match regex "__.*__". - // Must not be "". - optional string name = 1; - optional Value value = 2; - optional bytes cursor = 3; -} - -// A batch of results produced by a query. -message QueryResultBatch { - // The possible values for the 'more_results' field. - enum MoreResultsType { - NOT_FINISHED = 1; // There are additional batches to fetch from this query. - MORE_RESULTS_AFTER_LIMIT = 2; // The query is finished, but there are more - // results after the limit. - NO_MORE_RESULTS = 3; // The query has been exhausted. - } - - // The result type for every entity in entityResults. - required EntityResult.ResultType entity_result_type = 1; - // The results for this batch. - repeated EntityResult entity_result = 2; - - // A cursor that points to the position after the last result in the batch. - // May be absent. - optional bytes /* serialized QueryCursor */ end_cursor = 4; - - // The state of the query after the current batch. - required MoreResultsType more_results = 5; - - // The number of results skipped because of Query.offset. - optional int32 skipped_results = 6; -} - -// A set of changes to apply. -// -// No entity in this message may have a reserved property name, -// not even a property in an entity in a value. -// No value in this message may have meaning 18, -// not even a value in an entity in another value. -// -// If entities with duplicate keys are present, an arbitrary choice will -// be made as to which is written. -message Mutation { - // Entities to upsert. - // Each upserted entity's key must have a complete path and - // must not be reserved/read-only. - repeated Entity upsert = 1; - // Entities to update. - // Each updated entity's key must have a complete path and - // must not be reserved/read-only. - repeated Entity update = 2; - // Entities to insert. - // Each inserted entity's key must have a complete path and - // must not be reserved/read-only. - repeated Entity insert = 3; - // Insert entities with a newly allocated ID. - // Each inserted entity's key must omit the final identifier in its path and - // must not be reserved/read-only. - repeated Entity insert_auto_id = 4; - // Keys of entities to delete. - // Each key must have a complete key path and must not be reserved/read-only. - repeated Key delete = 5; - // Ignore a user specified read-only period. Optional. - optional bool force = 6; -} - -// The result of applying a mutation. -message MutationResult { - // Number of index writes. - required int32 index_updates = 1; - // Keys for insertAutoId entities. One per entity from the - // request, in the same order. - repeated Key insert_auto_id_key = 2; -} - -// Options shared by read requests. -message ReadOptions { - enum ReadConsistency { - DEFAULT = 0; - STRONG = 1; - EVENTUAL = 2; - } - - // The read consistency to use. - // Cannot be set when transaction is set. - // Lookup and ancestor queries default to STRONG, global queries default to - // EVENTUAL and cannot be set to STRONG. - optional ReadConsistency read_consistency = 1 [default=DEFAULT]; - - // The transaction to use. Optional. - optional bytes /* serialized Transaction */ transaction = 2; -} - -// The request for Lookup. -message LookupRequest { - - // Options for this lookup request. Optional. - optional ReadOptions read_options = 1; - // Keys of entities to look up from the datastore. - repeated Key key = 3; -} - -// The response for Lookup. -message LookupResponse { - - // The order of results in these fields is undefined and has no relation to - // the order of the keys in the input. - - // Entities found as ResultType.FULL entities. - repeated EntityResult found = 1; - - // Entities not found as ResultType.KEY_ONLY entities. - repeated EntityResult missing = 2; - - // A list of keys that were not looked up due to resource constraints. - repeated Key deferred = 3; -} - - -// The request for RunQuery. -message RunQueryRequest { - - // The options for this query. - optional ReadOptions read_options = 1; - - // Entities are partitioned into subsets, identified by a dataset (usually - // implicitly specified by the project) and namespace ID. Queries are scoped - // to a single partition. - // This partition ID is normalized with the standard default context - // partition ID, but all other partition IDs in RunQueryRequest are - // normalized with this partition ID as the context partition ID. - optional PartitionId partition_id = 2; - - // The query to run. - // Either this field or field gql_query must be set, but not both. - optional Query query = 3; - // The GQL query to run. - // Either this field or field query must be set, but not both. - optional GqlQuery gql_query = 7; -} - -// The response for RunQuery. -message RunQueryResponse { - - // A batch of query results (always present). - optional QueryResultBatch batch = 1; - -} - -// The request for BeginTransaction. -message BeginTransactionRequest { - - enum IsolationLevel { - SNAPSHOT = 0; // Read from a consistent snapshot. Concurrent transactions - // conflict if their mutations conflict. For example: - // Read(A),Write(B) may not conflict with Read(B),Write(A), - // but Read(B),Write(B) does conflict with Read(B),Write(B). - SERIALIZABLE = 1; // Read from a consistent snapshot. Concurrent - // transactions conflict if they cannot be serialized. - // For example Read(A),Write(B) does conflict with - // Read(B),Write(A) but Read(A) may not conflict with - // Write(A). - } - - // The transaction isolation level. - optional IsolationLevel isolation_level = 1 [default=SNAPSHOT]; -} - -// The response for BeginTransaction. -message BeginTransactionResponse { - - // The transaction identifier (always present). - optional bytes /* serialized Transaction */ transaction = 1; -} - -// The request for Rollback. -message RollbackRequest { - - // The transaction identifier, returned by a call to - // beginTransaction. - required bytes /* serialized Transaction */ transaction = 1; -} - -// The response for Rollback. -message RollbackResponse { -// Empty -} - -// The request for Commit. -message CommitRequest { - - enum Mode { - TRANSACTIONAL = 1; - NON_TRANSACTIONAL = 2; - } - - // The transaction identifier, returned by a call to - // beginTransaction. Must be set when mode is TRANSACTIONAL. - optional bytes /* serialized Transaction */ transaction = 1; - // The mutation to perform. Optional. - optional Mutation mutation = 2; - // The type of commit to perform. Either TRANSACTIONAL or NON_TRANSACTIONAL. - optional Mode mode = 5 [default=TRANSACTIONAL]; -} - -// The response for Commit. -message CommitResponse { - - // The result of performing the mutation (if any). - optional MutationResult mutation_result = 1; -} - -// The request for AllocateIds. -message AllocateIdsRequest { - - // A list of keys with incomplete key paths to allocate IDs for. - // No key may be reserved/read-only. - repeated Key key = 1; -} - -// The response for AllocateIds. -message AllocateIdsResponse { - - // The keys specified in the request (in the same order), each with - // its key path completed with a newly allocated ID. - repeated Key key = 1; -} - -// Each rpc normalizes the partition IDs of the keys in its input entities, -// and always returns entities with keys with normalized partition IDs. -// (Note that applies to all entities, including entities in values.) -service DatastoreService { - // Look up some entities by key. - rpc Lookup(LookupRequest) returns (LookupResponse) { - }; - // Query for entities. - rpc RunQuery(RunQueryRequest) returns (RunQueryResponse) { - }; - // Begin a new transaction. - rpc BeginTransaction(BeginTransactionRequest) returns (BeginTransactionResponse) { - }; - // Commit a transaction, optionally creating, deleting or modifying some - // entities. - rpc Commit(CommitRequest) returns (CommitResponse) { - }; - // Roll back a transaction. - rpc Rollback(RollbackRequest) returns (RollbackResponse) { - }; - // Allocate IDs for incomplete keys (useful for referencing an entity before - // it is inserted). - rpc AllocateIds(AllocateIdsRequest) returns (AllocateIdsResponse) { - }; -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/internal/testutil/context.go b/Godeps/_workspace/src/google.golang.org/cloud/internal/testutil/context.go deleted file mode 100644 index aafd68387e..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/internal/testutil/context.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2014 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package testutil contains helper functions for writing tests. -package testutil - -import ( - "io/ioutil" - "log" - "net/http" - "os" - - "golang.org/x/net/context" - "golang.org/x/oauth2" - "golang.org/x/oauth2/google" - "google.golang.org/cloud" -) - -const ( - envProjID = "GCLOUD_TESTS_GOLANG_PROJECT_ID" - envPrivateKey = "GCLOUD_TESTS_GOLANG_KEY" -) - -func Context(scopes ...string) context.Context { - key, projID := os.Getenv(envPrivateKey), os.Getenv(envProjID) - if key == "" || projID == "" { - log.Fatal("GCLOUD_TESTS_GOLANG_KEY and GCLOUD_TESTS_GOLANG_PROJECT_ID must be set. See CONTRIBUTING.md for details.") - } - jsonKey, err := ioutil.ReadFile(key) - if err != nil { - log.Fatalf("Cannot read the JSON key file, err: %v", err) - } - conf, err := google.JWTConfigFromJSON(jsonKey, scopes...) - if err != nil { - log.Fatal(err) - } - return cloud.NewContext(projID, conf.Client(oauth2.NoContext)) -} - -func NoAuthContext() context.Context { - projID := os.Getenv(envProjID) - if projID == "" { - log.Fatal("GCLOUD_TESTS_GOLANG_PROJECT_ID must be set. See CONTRIBUTING.md for details.") - } - return cloud.NewContext(projID, &http.Client{Transport: http.DefaultTransport}) -} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/.travis.yml b/Godeps/_workspace/src/google.golang.org/grpc/.travis.yml index 8e18792668..3f83776ec5 100644 --- a/Godeps/_workspace/src/google.golang.org/grpc/.travis.yml +++ b/Godeps/_workspace/src/google.golang.org/grpc/.travis.yml @@ -1,10 +1,14 @@ -sudo: false - language: go +before_install: + - go get github.com/axw/gocov/gocov + - go get github.com/mattn/goveralls + - go get golang.org/x/tools/cmd/cover + install: - - go get -v -t -d google.golang.org/grpc/... + - mkdir -p "$GOPATH/src/google.golang.org" + - mv "$TRAVIS_BUILD_DIR" "$GOPATH/src/google.golang.org/grpc" script: - - go test -v -cpu 1,4 google.golang.org/grpc/... - - go test -v -race -cpu 1,4 google.golang.org/grpc/... + - make test testrace + - make coverage diff --git a/Godeps/_workspace/src/google.golang.org/grpc/CONTRIBUTING.md b/Godeps/_workspace/src/google.golang.org/grpc/CONTRIBUTING.md index b6decb6d8c..407d384a7c 100644 --- a/Godeps/_workspace/src/google.golang.org/grpc/CONTRIBUTING.md +++ b/Godeps/_workspace/src/google.golang.org/grpc/CONTRIBUTING.md @@ -20,8 +20,4 @@ When filing an issue, make sure to answer these five questions: 5. What did you see instead? ### Contributing code -Please read the Contribution Guidelines before sending patches. - -We will not accept GitHub pull requests once Gerrit is setup (we will use Gerrit instead for code review). - Unless otherwise noted, the Go source files are distributed under the BSD-style license found in the LICENSE file. diff --git a/Godeps/_workspace/src/google.golang.org/grpc/grpc-auth-support.md b/Godeps/_workspace/src/google.golang.org/grpc/Documentation/grpc-auth-support.md similarity index 57% rename from Godeps/_workspace/src/google.golang.org/grpc/grpc-auth-support.md rename to Godeps/_workspace/src/google.golang.org/grpc/Documentation/grpc-auth-support.md index 80565ca781..5a075f6132 100644 --- a/Godeps/_workspace/src/google.golang.org/grpc/grpc-auth-support.md +++ b/Godeps/_workspace/src/google.golang.org/grpc/Documentation/grpc-auth-support.md @@ -1,6 +1,6 @@ # Authentication -As outlined here gRPC supports a number of different mechanisms for asserting identity between an client and server. We'll present some code-samples here demonstrating how to provide TLS support encryption and identity assertions as well as passing OAuth2 tokens to services that support it. +As outlined in the [gRPC authentication guide](http://www.grpc.io/docs/guides/auth.html) there are a number of different mechanisms for asserting identity between an client and server. We'll present some code-samples here demonstrating how to provide TLS support encryption and identity assertions as well as passing OAuth2 tokens to services that support it. # Enabling TLS on a gRPC client @@ -15,7 +15,10 @@ creds, err := credentials.NewServerTLSFromFile(certFile, keyFile) if err != nil { log.Fatalf("Failed to generate credentials %v", err) } -server.Serve(creds.NewListener(lis)) +lis, err := net.Listen("tcp", ":0") +server := grpc.NewServer(grpc.Creds(creds)) +... +server.Serve(lis) ``` # Authenticating with Google @@ -23,13 +26,13 @@ server.Serve(creds.NewListener(lis)) ## Google Compute Engine (GCE) ```Go -conn, err := grpc.Dial(serverAddr, grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(nil, ""), grpc.WithPerRPCCredentials(credentials.NewComputeEngine()))) +conn, err := grpc.Dial(serverAddr, grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(nil, ""), grpc.WithPerRPCCredentials(oauth.NewComputeEngine()))) ``` ## JWT ```Go -jwtCreds, err := credentials.NewServiceAccountFromFile(*serviceAccountKeyFile, *oauthScope) +jwtCreds, err := oauth.NewServiceAccountFromFile(*serviceAccountKeyFile, *oauthScope) if err != nil { log.Fatalf("Failed to create JWT credentials: %v", err) } diff --git a/Godeps/_workspace/src/google.golang.org/grpc/Makefile b/Godeps/_workspace/src/google.golang.org/grpc/Makefile new file mode 100644 index 0000000000..5bc38be209 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/Makefile @@ -0,0 +1,50 @@ +.PHONY: \ + all \ + deps \ + updatedeps \ + testdeps \ + updatetestdeps \ + build \ + proto \ + test \ + testrace \ + clean \ + +all: test testrace + +deps: + go get -d -v google.golang.org/grpc/... + +updatedeps: + go get -d -v -u -f google.golang.org/grpc/... + +testdeps: + go get -d -v -t google.golang.org/grpc/... + +updatetestdeps: + go get -d -v -t -u -f google.golang.org/grpc/... + +build: deps + go build google.golang.org/grpc/... + +proto: + @ if ! which protoc > /dev/null; then \ + echo "error: protoc not installed" >&2; \ + exit 1; \ + fi + go get -v github.com/golang/protobuf/protoc-gen-go + for file in $$(git ls-files '*.proto'); do \ + protoc -I $$(dirname $$file) --go_out=plugins=grpc:$$(dirname $$file) $$file; \ + done + +test: testdeps + go test -v -cpu 1,4 google.golang.org/grpc/... + +testrace: testdeps + go test -v -race -cpu 1,4 google.golang.org/grpc/... + +clean: + go clean google.golang.org/grpc/... + +coverage: testdeps + goveralls -v google.golang.org/grpc/... diff --git a/Godeps/_workspace/src/google.golang.org/grpc/README.md b/Godeps/_workspace/src/google.golang.org/grpc/README.md index caa40261f9..37b05f0953 100644 --- a/Godeps/_workspace/src/google.golang.org/grpc/README.md +++ b/Godeps/_workspace/src/google.golang.org/grpc/README.md @@ -2,22 +2,31 @@ [![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) [![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)](https://godoc.org/google.golang.org/grpc) -The Go implementation of [gRPC](https://github.com/grpc/grpc) +The Go implementation of [gRPC](http://www.grpc.io/): A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. For more information see the [gRPC Quick Start](http://www.grpc.io/docs/) guide. Installation ------------ -To install this package, you need to install Go 1.4 and setup your Go workspace on your computer. The simplest way to install the library is to run: +To install this package, you need to install Go 1.4 or above and setup your Go workspace on your computer. The simplest way to install the library is to run: ``` $ go get google.golang.org/grpc ``` +Prerequisites +------------- + +This requires Go 1.4 or above. + +Constraints +----------- +The grpc package should only depend on standard Go packages and a small number of exceptions. If your contribution introduces new dependencies which are NOT in the [list](http://godoc.org/google.golang.org/grpc?imports), you need a discussion with gRPC-Go authors and consultants. + Documentation ------------- -You can find more detailed documentation and examples in the [grpc-common repository](http://github.com/grpc/grpc-common). +See [API documentation](https://godoc.org/google.golang.org/grpc) for package and API descriptions and find examples in the [examples directory](examples/). Status ------ -Alpha - ready for early adopters. +Beta release diff --git a/Godeps/_workspace/src/google.golang.org/grpc/benchmark/benchmark.go b/Godeps/_workspace/src/google.golang.org/grpc/benchmark/benchmark.go index d90bec7025..7215d35a51 100644 --- a/Godeps/_workspace/src/google.golang.org/grpc/benchmark/benchmark.go +++ b/Godeps/_workspace/src/google.golang.org/grpc/benchmark/benchmark.go @@ -38,31 +38,29 @@ package benchmark import ( "io" - "log" "math" "net" - "time" - "github.com/golang/protobuf/proto" "golang.org/x/net/context" "google.golang.org/grpc" - testpb "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/benchmark/grpc_testing" + "google.golang.org/grpc/grpclog" ) func newPayload(t testpb.PayloadType, size int) *testpb.Payload { if size < 0 { - log.Fatalf("Requested a response with invalid length %d", size) + grpclog.Fatalf("Requested a response with invalid length %d", size) } body := make([]byte, size) switch t { case testpb.PayloadType_COMPRESSABLE: case testpb.PayloadType_UNCOMPRESSABLE: - log.Fatalf("PayloadType UNCOMPRESSABLE is not supported") + grpclog.Fatalf("PayloadType UNCOMPRESSABLE is not supported") default: - log.Fatalf("Unsupported payload type: %d", t) + grpclog.Fatalf("Unsupported payload type: %d", t) } return &testpb.Payload{ - Type: t.Enum(), + Type: t, Body: body, } } @@ -70,49 +68,13 @@ func newPayload(t testpb.PayloadType, size int) *testpb.Payload { type testServer struct { } -func (s *testServer) EmptyCall(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { - return new(testpb.Empty), nil -} - func (s *testServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { return &testpb.SimpleResponse{ - Payload: newPayload(in.GetResponseType(), int(in.GetResponseSize())), + Payload: newPayload(in.ResponseType, int(in.ResponseSize)), }, nil } -func (s *testServer) StreamingOutputCall(args *testpb.StreamingOutputCallRequest, stream testpb.TestService_StreamingOutputCallServer) error { - cs := args.GetResponseParameters() - for _, c := range cs { - if us := c.GetIntervalUs(); us > 0 { - time.Sleep(time.Duration(us) * time.Microsecond) - } - if err := stream.Send(&testpb.StreamingOutputCallResponse{ - Payload: newPayload(args.GetResponseType(), int(c.GetSize())), - }); err != nil { - return err - } - } - return nil -} - -func (s *testServer) StreamingInputCall(stream testpb.TestService_StreamingInputCallServer) error { - var sum int - for { - in, err := stream.Recv() - if err == io.EOF { - return stream.SendAndClose(&testpb.StreamingInputCallResponse{ - AggregatedPayloadSize: proto.Int32(int32(sum)), - }) - } - if err != nil { - return err - } - p := in.GetPayload().GetBody() - sum += len(p) - } -} - -func (s *testServer) FullDuplexCall(stream testpb.TestService_FullDuplexCallServer) error { +func (s *testServer) StreamingCall(stream testpb.TestService_StreamingCallServer) error { for { in, err := stream.Recv() if err == io.EOF { @@ -122,55 +84,21 @@ func (s *testServer) FullDuplexCall(stream testpb.TestService_FullDuplexCallServ if err != nil { return err } - cs := in.GetResponseParameters() - for _, c := range cs { - if us := c.GetIntervalUs(); us > 0 { - time.Sleep(time.Duration(us) * time.Microsecond) - } - if err := stream.Send(&testpb.StreamingOutputCallResponse{ - Payload: newPayload(in.GetResponseType(), int(c.GetSize())), - }); err != nil { - return err - } - } - } -} - -func (s *testServer) HalfDuplexCall(stream testpb.TestService_HalfDuplexCallServer) error { - msgBuf := make([]*testpb.StreamingOutputCallRequest, 0) - for { - in, err := stream.Recv() - if err == io.EOF { - // read done. - break - } - if err != nil { + if err := stream.Send(&testpb.SimpleResponse{ + Payload: newPayload(in.ResponseType, int(in.ResponseSize)), + }); err != nil { return err } - msgBuf = append(msgBuf, in) } - for _, m := range msgBuf { - cs := m.GetResponseParameters() - for _, c := range cs { - if us := c.GetIntervalUs(); us > 0 { - time.Sleep(time.Duration(us) * time.Microsecond) - } - if err := stream.Send(&testpb.StreamingOutputCallResponse{ - Payload: newPayload(m.GetResponseType(), int(c.GetSize())), - }); err != nil { - return err - } - } - } - return nil } -// StartServer starts a gRPC server serving a benchmark service. It returns its -// listen address and a function to stop the server. -func StartServer() (string, func()) { - lis, err := net.Listen("tcp", ":0") +// StartServer starts a gRPC server serving a benchmark service on the given +// address, which may be something like "localhost:0". It returns its listen +// address and a function to stop the server. +func StartServer(addr string) (string, func()) { + lis, err := net.Listen("tcp", addr) if err != nil { - log.Fatalf("Failed to listen: %v", err) + grpclog.Fatalf("Failed to listen: %v", err) } s := grpc.NewServer(grpc.MaxConcurrentStreams(math.MaxUint32)) testpb.RegisterTestServiceServer(s, &testServer{}) @@ -184,20 +112,36 @@ func StartServer() (string, func()) { func DoUnaryCall(tc testpb.TestServiceClient, reqSize, respSize int) { pl := newPayload(testpb.PayloadType_COMPRESSABLE, reqSize) req := &testpb.SimpleRequest{ - ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), - ResponseSize: proto.Int32(int32(respSize)), + ResponseType: pl.Type, + ResponseSize: int32(respSize), Payload: pl, } if _, err := tc.UnaryCall(context.Background(), req); err != nil { - log.Fatal("/TestService/UnaryCall RPC failed: ", err) + grpclog.Fatal("/TestService/UnaryCall RPC failed: ", err) + } +} + +// DoStreamingRoundTrip performs a round trip for a single streaming rpc. +func DoStreamingRoundTrip(tc testpb.TestServiceClient, stream testpb.TestService_StreamingCallClient, reqSize, respSize int) { + pl := newPayload(testpb.PayloadType_COMPRESSABLE, reqSize) + req := &testpb.SimpleRequest{ + ResponseType: pl.Type, + ResponseSize: int32(respSize), + Payload: pl, + } + if err := stream.Send(req); err != nil { + grpclog.Fatalf("StreamingCall(_).Send: %v", err) + } + if _, err := stream.Recv(); err != nil { + grpclog.Fatalf("StreamingCall(_).Recv: %v", err) } } // NewClientConn creates a gRPC client connection to addr. func NewClientConn(addr string) *grpc.ClientConn { - conn, err := grpc.Dial(addr) + conn, err := grpc.Dial(addr, grpc.WithInsecure()) if err != nil { - log.Fatalf("NewClientConn(%q) failed to create a ClientConn %v", addr, err) + grpclog.Fatalf("NewClientConn(%q) failed to create a ClientConn %v", addr, err) } return conn } diff --git a/Godeps/_workspace/src/google.golang.org/grpc/benchmark/benchmark_test.go b/Godeps/_workspace/src/google.golang.org/grpc/benchmark/benchmark_test.go deleted file mode 100644 index ecdfa1a782..0000000000 --- a/Godeps/_workspace/src/google.golang.org/grpc/benchmark/benchmark_test.go +++ /dev/null @@ -1,74 +0,0 @@ -package benchmark - -import ( - "os" - "sync" - "testing" - "time" - - "google.golang.org/grpc/benchmark/stats" - testpb "google.golang.org/grpc/interop/grpc_testing" -) - -func run(b *testing.B, maxConcurrentCalls int, caller func(testpb.TestServiceClient)) { - s := stats.AddStats(b, 38) - b.StopTimer() - target, stopper := StartServer() - defer stopper() - conn := NewClientConn(target) - tc := testpb.NewTestServiceClient(conn) - - // Warm up connection. - for i := 0; i < 10; i++ { - caller(tc) - } - - ch := make(chan int, maxConcurrentCalls*4) - var wg sync.WaitGroup - wg.Add(maxConcurrentCalls) - - // Distribute the b.N calls over maxConcurrentCalls workers. - for i := 0; i < maxConcurrentCalls; i++ { - go func() { - for _ = range ch { - caller(tc) - } - wg.Done() - }() - } - for i := 0; i < b.N; i++ { - b.StartTimer() - start := time.Now() - ch <- i - elapsed := time.Since(start) - b.StopTimer() - s.Add(elapsed) - } - close(ch) - wg.Wait() - conn.Close() -} - -func smallCaller(client testpb.TestServiceClient) { - DoUnaryCall(client, 1, 1) -} - -func BenchmarkClientSmallc1(b *testing.B) { - run(b, 1, smallCaller) -} - -func BenchmarkClientSmallc8(b *testing.B) { - run(b, 8, smallCaller) -} - -func BenchmarkClientSmallc64(b *testing.B) { - run(b, 64, smallCaller) -} - -func BenchmarkClientSmallc512(b *testing.B) { - run(b, 512, smallCaller) -} - -func TestMain(m *testing.M) { - os.Exit(stats.RunTestMain(m)) -} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/benchmark/client/main.go b/Godeps/_workspace/src/google.golang.org/grpc/benchmark/client/main.go new file mode 100644 index 0000000000..f9e2a83dbd --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/benchmark/client/main.go @@ -0,0 +1,161 @@ +package main + +import ( + "flag" + "math" + "net" + "net/http" + _ "net/http/pprof" + "sync" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/benchmark" + testpb "google.golang.org/grpc/benchmark/grpc_testing" + "google.golang.org/grpc/benchmark/stats" + "google.golang.org/grpc/grpclog" +) + +var ( + server = flag.String("server", "", "The server address") + maxConcurrentRPCs = flag.Int("max_concurrent_rpcs", 1, "The max number of concurrent RPCs") + duration = flag.Int("duration", math.MaxInt32, "The duration in seconds to run the benchmark client") + trace = flag.Bool("trace", true, "Whether tracing is on") + rpcType = flag.Int("rpc_type", 0, + `Configure different client rpc type. Valid options are: + 0 : unary call; + 1 : streaming call.`) +) + +func unaryCaller(client testpb.TestServiceClient) { + benchmark.DoUnaryCall(client, 1, 1) +} + +func streamCaller(client testpb.TestServiceClient, stream testpb.TestService_StreamingCallClient) { + benchmark.DoStreamingRoundTrip(client, stream, 1, 1) +} + +func buildConnection() (s *stats.Stats, conn *grpc.ClientConn, tc testpb.TestServiceClient) { + s = stats.NewStats(256) + conn = benchmark.NewClientConn(*server) + tc = testpb.NewTestServiceClient(conn) + return s, conn, tc +} + +func closeLoopUnary() { + s, conn, tc := buildConnection() + + for i := 0; i < 100; i++ { + unaryCaller(tc) + } + ch := make(chan int, *maxConcurrentRPCs*4) + var ( + mu sync.Mutex + wg sync.WaitGroup + ) + wg.Add(*maxConcurrentRPCs) + + for i := 0; i < *maxConcurrentRPCs; i++ { + go func() { + for _ = range ch { + start := time.Now() + unaryCaller(tc) + elapse := time.Since(start) + mu.Lock() + s.Add(elapse) + mu.Unlock() + } + wg.Done() + }() + } + // Stop the client when time is up. + done := make(chan struct{}) + go func() { + <-time.After(time.Duration(*duration) * time.Second) + close(done) + }() + ok := true + for ok { + select { + case ch <- 0: + case <-done: + ok = false + } + } + close(ch) + wg.Wait() + conn.Close() + grpclog.Println(s.String()) + +} + +func closeLoopStream() { + s, conn, tc := buildConnection() + stream, err := tc.StreamingCall(context.Background()) + if err != nil { + grpclog.Fatalf("%v.StreamingCall(_) = _, %v", tc, err) + } + for i := 0; i < 100; i++ { + streamCaller(tc, stream) + } + ch := make(chan int, *maxConcurrentRPCs*4) + var ( + mu sync.Mutex + wg sync.WaitGroup + ) + wg.Add(*maxConcurrentRPCs) + // Distribute RPCs over maxConcurrentCalls workers. + for i := 0; i < *maxConcurrentRPCs; i++ { + go func() { + for _ = range ch { + start := time.Now() + streamCaller(tc, stream) + elapse := time.Since(start) + mu.Lock() + s.Add(elapse) + mu.Unlock() + } + wg.Done() + }() + } + // Stop the client when time is up. + done := make(chan struct{}) + go func() { + <-time.After(time.Duration(*duration) * time.Second) + close(done) + }() + ok := true + for ok { + select { + case ch <- 0: + case <-done: + ok = false + } + } + close(ch) + wg.Wait() + conn.Close() + grpclog.Println(s.String()) +} + +func main() { + flag.Parse() + grpc.EnableTracing = *trace + go func() { + lis, err := net.Listen("tcp", ":0") + if err != nil { + grpclog.Fatalf("Failed to listen: %v", err) + } + grpclog.Println("Client profiling address: ", lis.Addr().String()) + if err := http.Serve(lis, nil); err != nil { + grpclog.Fatalf("Failed to serve: %v", err) + } + }() + switch *rpcType { + case 0: + closeLoopUnary() + case 1: + closeLoopStream() + } +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/benchmark/grpc_testing/test.pb.go b/Godeps/_workspace/src/google.golang.org/grpc/benchmark/grpc_testing/test.pb.go index 261ba1cc27..74e13c9e95 100644 --- a/Godeps/_workspace/src/google.golang.org/grpc/benchmark/grpc_testing/test.pb.go +++ b/Godeps/_workspace/src/google.golang.org/grpc/benchmark/grpc_testing/test.pb.go @@ -1,28 +1,32 @@ // Code generated by protoc-gen-go. -// source: src/google.golang.org/grpc/test/grpc_testing/test.proto +// source: test.proto // DO NOT EDIT! /* Package grpc_testing is a generated protocol buffer package. It is generated from these files: - src/google.golang.org/grpc/test/grpc_testing/test.proto + test.proto It has these top-level messages: - Empty + StatsRequest + ServerStats Payload + HistogramData + ClientConfig + Mark + ClientArgs + ClientStats + ClientStatus + ServerConfig + ServerArgs + ServerStatus SimpleRequest SimpleResponse - StreamingInputCallRequest - StreamingInputCallResponse - ResponseParameters - StreamingOutputCallRequest - StreamingOutputCallResponse */ package grpc_testing import proto "github.com/golang/protobuf/proto" -import math "math" import ( context "golang.org/x/net/context" @@ -35,9 +39,7 @@ var _ grpc.ClientConn // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal -var _ = math.Inf -// The type of payload that should be returned. type PayloadType int32 const ( @@ -60,93 +62,260 @@ var PayloadType_value = map[string]int32{ "RANDOM": 2, } -func (x PayloadType) Enum() *PayloadType { - p := new(PayloadType) - *p = x - return p -} func (x PayloadType) String() string { return proto.EnumName(PayloadType_name, int32(x)) } -func (x *PayloadType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(PayloadType_value, data, "PayloadType") - if err != nil { - return err - } - *x = PayloadType(value) - return nil + +type ClientType int32 + +const ( + ClientType_SYNCHRONOUS_CLIENT ClientType = 0 + ClientType_ASYNC_CLIENT ClientType = 1 +) + +var ClientType_name = map[int32]string{ + 0: "SYNCHRONOUS_CLIENT", + 1: "ASYNC_CLIENT", +} +var ClientType_value = map[string]int32{ + "SYNCHRONOUS_CLIENT": 0, + "ASYNC_CLIENT": 1, } -type Empty struct { - XXX_unrecognized []byte `json:"-"` +func (x ClientType) String() string { + return proto.EnumName(ClientType_name, int32(x)) } -func (m *Empty) Reset() { *m = Empty{} } -func (m *Empty) String() string { return proto.CompactTextString(m) } -func (*Empty) ProtoMessage() {} +type ServerType int32 + +const ( + ServerType_SYNCHRONOUS_SERVER ServerType = 0 + ServerType_ASYNC_SERVER ServerType = 1 +) + +var ServerType_name = map[int32]string{ + 0: "SYNCHRONOUS_SERVER", + 1: "ASYNC_SERVER", +} +var ServerType_value = map[string]int32{ + "SYNCHRONOUS_SERVER": 0, + "ASYNC_SERVER": 1, +} + +func (x ServerType) String() string { + return proto.EnumName(ServerType_name, int32(x)) +} + +type RpcType int32 + +const ( + RpcType_UNARY RpcType = 0 + RpcType_STREAMING RpcType = 1 +) + +var RpcType_name = map[int32]string{ + 0: "UNARY", + 1: "STREAMING", +} +var RpcType_value = map[string]int32{ + "UNARY": 0, + "STREAMING": 1, +} + +func (x RpcType) String() string { + return proto.EnumName(RpcType_name, int32(x)) +} + +type StatsRequest struct { + // run number + TestNum int32 `protobuf:"varint,1,opt,name=test_num" json:"test_num,omitempty"` +} + +func (m *StatsRequest) Reset() { *m = StatsRequest{} } +func (m *StatsRequest) String() string { return proto.CompactTextString(m) } +func (*StatsRequest) ProtoMessage() {} + +type ServerStats struct { + // wall clock time + TimeElapsed float64 `protobuf:"fixed64,1,opt,name=time_elapsed" json:"time_elapsed,omitempty"` + // user time used by the server process and threads + TimeUser float64 `protobuf:"fixed64,2,opt,name=time_user" json:"time_user,omitempty"` + // server time used by the server process and all threads + TimeSystem float64 `protobuf:"fixed64,3,opt,name=time_system" json:"time_system,omitempty"` +} + +func (m *ServerStats) Reset() { *m = ServerStats{} } +func (m *ServerStats) String() string { return proto.CompactTextString(m) } +func (*ServerStats) ProtoMessage() {} -// A block of data, to simply increase gRPC message size. type Payload struct { // The type of data in body. - Type *PayloadType `protobuf:"varint,1,opt,name=type,enum=grpc.testing.PayloadType" json:"type,omitempty"` + Type PayloadType `protobuf:"varint,1,opt,name=type,enum=grpc.testing.PayloadType" json:"type,omitempty"` // Primary contents of payload. - Body []byte `protobuf:"bytes,2,opt,name=body" json:"body,omitempty"` - XXX_unrecognized []byte `json:"-"` + Body []byte `protobuf:"bytes,2,opt,name=body,proto3" json:"body,omitempty"` } func (m *Payload) Reset() { *m = Payload{} } func (m *Payload) String() string { return proto.CompactTextString(m) } func (*Payload) ProtoMessage() {} -func (m *Payload) GetType() PayloadType { - if m != nil && m.Type != nil { - return *m.Type - } - return PayloadType_COMPRESSABLE +type HistogramData struct { + Bucket []uint32 `protobuf:"varint,1,rep,name=bucket" json:"bucket,omitempty"` + MinSeen float64 `protobuf:"fixed64,2,opt,name=min_seen" json:"min_seen,omitempty"` + MaxSeen float64 `protobuf:"fixed64,3,opt,name=max_seen" json:"max_seen,omitempty"` + Sum float64 `protobuf:"fixed64,4,opt,name=sum" json:"sum,omitempty"` + SumOfSquares float64 `protobuf:"fixed64,5,opt,name=sum_of_squares" json:"sum_of_squares,omitempty"` + Count float64 `protobuf:"fixed64,6,opt,name=count" json:"count,omitempty"` } -func (m *Payload) GetBody() []byte { +func (m *HistogramData) Reset() { *m = HistogramData{} } +func (m *HistogramData) String() string { return proto.CompactTextString(m) } +func (*HistogramData) ProtoMessage() {} + +type ClientConfig struct { + ServerTargets []string `protobuf:"bytes,1,rep,name=server_targets" json:"server_targets,omitempty"` + ClientType ClientType `protobuf:"varint,2,opt,name=client_type,enum=grpc.testing.ClientType" json:"client_type,omitempty"` + EnableSsl bool `protobuf:"varint,3,opt,name=enable_ssl" json:"enable_ssl,omitempty"` + OutstandingRpcsPerChannel int32 `protobuf:"varint,4,opt,name=outstanding_rpcs_per_channel" json:"outstanding_rpcs_per_channel,omitempty"` + ClientChannels int32 `protobuf:"varint,5,opt,name=client_channels" json:"client_channels,omitempty"` + PayloadSize int32 `protobuf:"varint,6,opt,name=payload_size" json:"payload_size,omitempty"` + // only for async client: + AsyncClientThreads int32 `protobuf:"varint,7,opt,name=async_client_threads" json:"async_client_threads,omitempty"` + RpcType RpcType `protobuf:"varint,8,opt,name=rpc_type,enum=grpc.testing.RpcType" json:"rpc_type,omitempty"` +} + +func (m *ClientConfig) Reset() { *m = ClientConfig{} } +func (m *ClientConfig) String() string { return proto.CompactTextString(m) } +func (*ClientConfig) ProtoMessage() {} + +// Request current stats +type Mark struct { +} + +func (m *Mark) Reset() { *m = Mark{} } +func (m *Mark) String() string { return proto.CompactTextString(m) } +func (*Mark) ProtoMessage() {} + +type ClientArgs struct { + Setup *ClientConfig `protobuf:"bytes,1,opt,name=setup" json:"setup,omitempty"` + Mark *Mark `protobuf:"bytes,2,opt,name=mark" json:"mark,omitempty"` +} + +func (m *ClientArgs) Reset() { *m = ClientArgs{} } +func (m *ClientArgs) String() string { return proto.CompactTextString(m) } +func (*ClientArgs) ProtoMessage() {} + +func (m *ClientArgs) GetSetup() *ClientConfig { if m != nil { - return m.Body + return m.Setup + } + return nil +} + +func (m *ClientArgs) GetMark() *Mark { + if m != nil { + return m.Mark + } + return nil +} + +type ClientStats struct { + Latencies *HistogramData `protobuf:"bytes,1,opt,name=latencies" json:"latencies,omitempty"` + TimeElapsed float64 `protobuf:"fixed64,3,opt,name=time_elapsed" json:"time_elapsed,omitempty"` + TimeUser float64 `protobuf:"fixed64,4,opt,name=time_user" json:"time_user,omitempty"` + TimeSystem float64 `protobuf:"fixed64,5,opt,name=time_system" json:"time_system,omitempty"` +} + +func (m *ClientStats) Reset() { *m = ClientStats{} } +func (m *ClientStats) String() string { return proto.CompactTextString(m) } +func (*ClientStats) ProtoMessage() {} + +func (m *ClientStats) GetLatencies() *HistogramData { + if m != nil { + return m.Latencies + } + return nil +} + +type ClientStatus struct { + Stats *ClientStats `protobuf:"bytes,1,opt,name=stats" json:"stats,omitempty"` +} + +func (m *ClientStatus) Reset() { *m = ClientStatus{} } +func (m *ClientStatus) String() string { return proto.CompactTextString(m) } +func (*ClientStatus) ProtoMessage() {} + +func (m *ClientStatus) GetStats() *ClientStats { + if m != nil { + return m.Stats + } + return nil +} + +type ServerConfig struct { + ServerType ServerType `protobuf:"varint,1,opt,name=server_type,enum=grpc.testing.ServerType" json:"server_type,omitempty"` + Threads int32 `protobuf:"varint,2,opt,name=threads" json:"threads,omitempty"` + EnableSsl bool `protobuf:"varint,3,opt,name=enable_ssl" json:"enable_ssl,omitempty"` +} + +func (m *ServerConfig) Reset() { *m = ServerConfig{} } +func (m *ServerConfig) String() string { return proto.CompactTextString(m) } +func (*ServerConfig) ProtoMessage() {} + +type ServerArgs struct { + Setup *ServerConfig `protobuf:"bytes,1,opt,name=setup" json:"setup,omitempty"` + Mark *Mark `protobuf:"bytes,2,opt,name=mark" json:"mark,omitempty"` +} + +func (m *ServerArgs) Reset() { *m = ServerArgs{} } +func (m *ServerArgs) String() string { return proto.CompactTextString(m) } +func (*ServerArgs) ProtoMessage() {} + +func (m *ServerArgs) GetSetup() *ServerConfig { + if m != nil { + return m.Setup + } + return nil +} + +func (m *ServerArgs) GetMark() *Mark { + if m != nil { + return m.Mark + } + return nil +} + +type ServerStatus struct { + Stats *ServerStats `protobuf:"bytes,1,opt,name=stats" json:"stats,omitempty"` + Port int32 `protobuf:"varint,2,opt,name=port" json:"port,omitempty"` +} + +func (m *ServerStatus) Reset() { *m = ServerStatus{} } +func (m *ServerStatus) String() string { return proto.CompactTextString(m) } +func (*ServerStatus) ProtoMessage() {} + +func (m *ServerStatus) GetStats() *ServerStats { + if m != nil { + return m.Stats } return nil } -// Unary request. type SimpleRequest struct { // Desired payload type in the response from the server. // If response_type is RANDOM, server randomly chooses one from other formats. - ResponseType *PayloadType `protobuf:"varint,1,opt,name=response_type,enum=grpc.testing.PayloadType" json:"response_type,omitempty"` + ResponseType PayloadType `protobuf:"varint,1,opt,name=response_type,enum=grpc.testing.PayloadType" json:"response_type,omitempty"` // Desired payload size in the response from the server. // If response_type is COMPRESSABLE, this denotes the size before compression. - ResponseSize *int32 `protobuf:"varint,2,opt,name=response_size" json:"response_size,omitempty"` + ResponseSize int32 `protobuf:"varint,2,opt,name=response_size" json:"response_size,omitempty"` // Optional input payload sent along with the request. Payload *Payload `protobuf:"bytes,3,opt,name=payload" json:"payload,omitempty"` - // Whether SimpleResponse should include username. - FillUsername *bool `protobuf:"varint,4,opt,name=fill_username" json:"fill_username,omitempty"` - // Whether SimpleResponse should include OAuth scope. - FillOauthScope *bool `protobuf:"varint,5,opt,name=fill_oauth_scope" json:"fill_oauth_scope,omitempty"` - XXX_unrecognized []byte `json:"-"` } func (m *SimpleRequest) Reset() { *m = SimpleRequest{} } func (m *SimpleRequest) String() string { return proto.CompactTextString(m) } func (*SimpleRequest) ProtoMessage() {} -func (m *SimpleRequest) GetResponseType() PayloadType { - if m != nil && m.ResponseType != nil { - return *m.ResponseType - } - return PayloadType_COMPRESSABLE -} - -func (m *SimpleRequest) GetResponseSize() int32 { - if m != nil && m.ResponseSize != nil { - return *m.ResponseSize - } - return 0 -} - func (m *SimpleRequest) GetPayload() *Payload { if m != nil { return m.Payload @@ -154,30 +323,8 @@ func (m *SimpleRequest) GetPayload() *Payload { return nil } -func (m *SimpleRequest) GetFillUsername() bool { - if m != nil && m.FillUsername != nil { - return *m.FillUsername - } - return false -} - -func (m *SimpleRequest) GetFillOauthScope() bool { - if m != nil && m.FillOauthScope != nil { - return *m.FillOauthScope - } - return false -} - -// Unary response, as configured by the request. type SimpleResponse struct { - // Payload to increase message size. Payload *Payload `protobuf:"bytes,1,opt,name=payload" json:"payload,omitempty"` - // The user the request came from, for verifying authentication was - // successful when the client expected it. - Username *string `protobuf:"bytes,2,opt,name=username" json:"username,omitempty"` - // OAuth scope. - OauthScope *string `protobuf:"bytes,3,opt,name=oauth_scope" json:"oauth_scope,omitempty"` - XXX_unrecognized []byte `json:"-"` } func (m *SimpleResponse) Reset() { *m = SimpleResponse{} } @@ -191,169 +338,22 @@ func (m *SimpleResponse) GetPayload() *Payload { return nil } -func (m *SimpleResponse) GetUsername() string { - if m != nil && m.Username != nil { - return *m.Username - } - return "" -} - -func (m *SimpleResponse) GetOauthScope() string { - if m != nil && m.OauthScope != nil { - return *m.OauthScope - } - return "" -} - -// Client-streaming request. -type StreamingInputCallRequest struct { - // Optional input payload sent along with the request. - Payload *Payload `protobuf:"bytes,1,opt,name=payload" json:"payload,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *StreamingInputCallRequest) Reset() { *m = StreamingInputCallRequest{} } -func (m *StreamingInputCallRequest) String() string { return proto.CompactTextString(m) } -func (*StreamingInputCallRequest) ProtoMessage() {} - -func (m *StreamingInputCallRequest) GetPayload() *Payload { - if m != nil { - return m.Payload - } - return nil -} - -// Client-streaming response. -type StreamingInputCallResponse struct { - // Aggregated size of payloads received from the client. - AggregatedPayloadSize *int32 `protobuf:"varint,1,opt,name=aggregated_payload_size" json:"aggregated_payload_size,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *StreamingInputCallResponse) Reset() { *m = StreamingInputCallResponse{} } -func (m *StreamingInputCallResponse) String() string { return proto.CompactTextString(m) } -func (*StreamingInputCallResponse) ProtoMessage() {} - -func (m *StreamingInputCallResponse) GetAggregatedPayloadSize() int32 { - if m != nil && m.AggregatedPayloadSize != nil { - return *m.AggregatedPayloadSize - } - return 0 -} - -// Configuration for a particular response. -type ResponseParameters struct { - // Desired payload sizes in responses from the server. - // If response_type is COMPRESSABLE, this denotes the size before compression. - Size *int32 `protobuf:"varint,1,opt,name=size" json:"size,omitempty"` - // Desired interval between consecutive responses in the response stream in - // microseconds. - IntervalUs *int32 `protobuf:"varint,2,opt,name=interval_us" json:"interval_us,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ResponseParameters) Reset() { *m = ResponseParameters{} } -func (m *ResponseParameters) String() string { return proto.CompactTextString(m) } -func (*ResponseParameters) ProtoMessage() {} - -func (m *ResponseParameters) GetSize() int32 { - if m != nil && m.Size != nil { - return *m.Size - } - return 0 -} - -func (m *ResponseParameters) GetIntervalUs() int32 { - if m != nil && m.IntervalUs != nil { - return *m.IntervalUs - } - return 0 -} - -// Server-streaming request. -type StreamingOutputCallRequest struct { - // Desired payload type in the response from the server. - // If response_type is RANDOM, the payload from each response in the stream - // might be of different types. This is to simulate a mixed type of payload - // stream. - ResponseType *PayloadType `protobuf:"varint,1,opt,name=response_type,enum=grpc.testing.PayloadType" json:"response_type,omitempty"` - // Configuration for each expected response message. - ResponseParameters []*ResponseParameters `protobuf:"bytes,2,rep,name=response_parameters" json:"response_parameters,omitempty"` - // Optional input payload sent along with the request. - Payload *Payload `protobuf:"bytes,3,opt,name=payload" json:"payload,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *StreamingOutputCallRequest) Reset() { *m = StreamingOutputCallRequest{} } -func (m *StreamingOutputCallRequest) String() string { return proto.CompactTextString(m) } -func (*StreamingOutputCallRequest) ProtoMessage() {} - -func (m *StreamingOutputCallRequest) GetResponseType() PayloadType { - if m != nil && m.ResponseType != nil { - return *m.ResponseType - } - return PayloadType_COMPRESSABLE -} - -func (m *StreamingOutputCallRequest) GetResponseParameters() []*ResponseParameters { - if m != nil { - return m.ResponseParameters - } - return nil -} - -func (m *StreamingOutputCallRequest) GetPayload() *Payload { - if m != nil { - return m.Payload - } - return nil -} - -// Server-streaming response, as configured by the request and parameters. -type StreamingOutputCallResponse struct { - // Payload to increase response size. - Payload *Payload `protobuf:"bytes,1,opt,name=payload" json:"payload,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *StreamingOutputCallResponse) Reset() { *m = StreamingOutputCallResponse{} } -func (m *StreamingOutputCallResponse) String() string { return proto.CompactTextString(m) } -func (*StreamingOutputCallResponse) ProtoMessage() {} - -func (m *StreamingOutputCallResponse) GetPayload() *Payload { - if m != nil { - return m.Payload - } - return nil -} - func init() { proto.RegisterEnum("grpc.testing.PayloadType", PayloadType_name, PayloadType_value) + proto.RegisterEnum("grpc.testing.ClientType", ClientType_name, ClientType_value) + proto.RegisterEnum("grpc.testing.ServerType", ServerType_name, ServerType_value) + proto.RegisterEnum("grpc.testing.RpcType", RpcType_name, RpcType_value) } // Client API for TestService service type TestServiceClient interface { - // One empty request followed by one empty response. - EmptyCall(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) // One request followed by one response. // The server returns the client payload as-is. UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) - // One request followed by a sequence of responses (streamed download). - // The server returns the payload with client desired type and sizes. - StreamingOutputCall(ctx context.Context, in *StreamingOutputCallRequest, opts ...grpc.CallOption) (TestService_StreamingOutputCallClient, error) - // A sequence of requests followed by one response (streamed upload). - // The server returns the aggregated size of client payload as the result. - StreamingInputCall(ctx context.Context, opts ...grpc.CallOption) (TestService_StreamingInputCallClient, error) - // A sequence of requests with each request served by the server immediately. - // As one request could lead to multiple responses, this interface - // demonstrates the idea of full duplexing. - FullDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_FullDuplexCallClient, error) - // A sequence of requests followed by a sequence of responses. - // The server buffers all the client requests and then serves them in order. A - // stream of responses are returned to the client when the server starts with - // first request. - HalfDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_HalfDuplexCallClient, error) + // One request followed by one response. + // The server returns the client payload as-is. + StreamingCall(ctx context.Context, opts ...grpc.CallOption) (TestService_StreamingCallClient, error) } type testServiceClient struct { @@ -364,15 +364,6 @@ func NewTestServiceClient(cc *grpc.ClientConn) TestServiceClient { return &testServiceClient{cc} } -func (c *testServiceClient) EmptyCall(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { - out := new(Empty) - err := grpc.Invoke(ctx, "/grpc.testing.TestService/EmptyCall", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - func (c *testServiceClient) UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) { out := new(SimpleResponse) err := grpc.Invoke(ctx, "/grpc.testing.TestService/UnaryCall", in, out, c.cc, opts...) @@ -382,128 +373,31 @@ func (c *testServiceClient) UnaryCall(ctx context.Context, in *SimpleRequest, op return out, nil } -func (c *testServiceClient) StreamingOutputCall(ctx context.Context, in *StreamingOutputCallRequest, opts ...grpc.CallOption) (TestService_StreamingOutputCallClient, error) { - stream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[0], c.cc, "/grpc.testing.TestService/StreamingOutputCall", opts...) +func (c *testServiceClient) StreamingCall(ctx context.Context, opts ...grpc.CallOption) (TestService_StreamingCallClient, error) { + stream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[0], c.cc, "/grpc.testing.TestService/StreamingCall", opts...) if err != nil { return nil, err } - x := &testServiceStreamingOutputCallClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } + x := &testServiceStreamingCallClient{stream} return x, nil } -type TestService_StreamingOutputCallClient interface { - Recv() (*StreamingOutputCallResponse, error) +type TestService_StreamingCallClient interface { + Send(*SimpleRequest) error + Recv() (*SimpleResponse, error) grpc.ClientStream } -type testServiceStreamingOutputCallClient struct { +type testServiceStreamingCallClient struct { grpc.ClientStream } -func (x *testServiceStreamingOutputCallClient) Recv() (*StreamingOutputCallResponse, error) { - m := new(StreamingOutputCallResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *testServiceClient) StreamingInputCall(ctx context.Context, opts ...grpc.CallOption) (TestService_StreamingInputCallClient, error) { - stream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[1], c.cc, "/grpc.testing.TestService/StreamingInputCall", opts...) - if err != nil { - return nil, err - } - x := &testServiceStreamingInputCallClient{stream} - return x, nil -} - -type TestService_StreamingInputCallClient interface { - Send(*StreamingInputCallRequest) error - CloseAndRecv() (*StreamingInputCallResponse, error) - grpc.ClientStream -} - -type testServiceStreamingInputCallClient struct { - grpc.ClientStream -} - -func (x *testServiceStreamingInputCallClient) Send(m *StreamingInputCallRequest) error { +func (x *testServiceStreamingCallClient) Send(m *SimpleRequest) error { return x.ClientStream.SendMsg(m) } -func (x *testServiceStreamingInputCallClient) CloseAndRecv() (*StreamingInputCallResponse, error) { - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - m := new(StreamingInputCallResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *testServiceClient) FullDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_FullDuplexCallClient, error) { - stream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[2], c.cc, "/grpc.testing.TestService/FullDuplexCall", opts...) - if err != nil { - return nil, err - } - x := &testServiceFullDuplexCallClient{stream} - return x, nil -} - -type TestService_FullDuplexCallClient interface { - Send(*StreamingOutputCallRequest) error - Recv() (*StreamingOutputCallResponse, error) - grpc.ClientStream -} - -type testServiceFullDuplexCallClient struct { - grpc.ClientStream -} - -func (x *testServiceFullDuplexCallClient) Send(m *StreamingOutputCallRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *testServiceFullDuplexCallClient) Recv() (*StreamingOutputCallResponse, error) { - m := new(StreamingOutputCallResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *testServiceClient) HalfDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_HalfDuplexCallClient, error) { - stream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[3], c.cc, "/grpc.testing.TestService/HalfDuplexCall", opts...) - if err != nil { - return nil, err - } - x := &testServiceHalfDuplexCallClient{stream} - return x, nil -} - -type TestService_HalfDuplexCallClient interface { - Send(*StreamingOutputCallRequest) error - Recv() (*StreamingOutputCallResponse, error) - grpc.ClientStream -} - -type testServiceHalfDuplexCallClient struct { - grpc.ClientStream -} - -func (x *testServiceHalfDuplexCallClient) Send(m *StreamingOutputCallRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *testServiceHalfDuplexCallClient) Recv() (*StreamingOutputCallResponse, error) { - m := new(StreamingOutputCallResponse) +func (x *testServiceStreamingCallClient) Recv() (*SimpleResponse, error) { + m := new(SimpleResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } @@ -513,47 +407,21 @@ func (x *testServiceHalfDuplexCallClient) Recv() (*StreamingOutputCallResponse, // Server API for TestService service type TestServiceServer interface { - // One empty request followed by one empty response. - EmptyCall(context.Context, *Empty) (*Empty, error) // One request followed by one response. // The server returns the client payload as-is. UnaryCall(context.Context, *SimpleRequest) (*SimpleResponse, error) - // One request followed by a sequence of responses (streamed download). - // The server returns the payload with client desired type and sizes. - StreamingOutputCall(*StreamingOutputCallRequest, TestService_StreamingOutputCallServer) error - // A sequence of requests followed by one response (streamed upload). - // The server returns the aggregated size of client payload as the result. - StreamingInputCall(TestService_StreamingInputCallServer) error - // A sequence of requests with each request served by the server immediately. - // As one request could lead to multiple responses, this interface - // demonstrates the idea of full duplexing. - FullDuplexCall(TestService_FullDuplexCallServer) error - // A sequence of requests followed by a sequence of responses. - // The server buffers all the client requests and then serves them in order. A - // stream of responses are returned to the client when the server starts with - // first request. - HalfDuplexCall(TestService_HalfDuplexCallServer) error + // One request followed by one response. + // The server returns the client payload as-is. + StreamingCall(TestService_StreamingCallServer) error } func RegisterTestServiceServer(s *grpc.Server, srv TestServiceServer) { s.RegisterService(&_TestService_serviceDesc, srv) } -func _TestService_EmptyCall_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { - in := new(Empty) - if err := codec.Unmarshal(buf, in); err != nil { - return nil, err - } - out, err := srv.(TestServiceServer).EmptyCall(ctx, in) - if err != nil { - return nil, err - } - return out, nil -} - -func _TestService_UnaryCall_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { +func _TestService_UnaryCall_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { in := new(SimpleRequest) - if err := codec.Unmarshal(buf, in); err != nil { + if err := dec(in); err != nil { return nil, err } out, err := srv.(TestServiceServer).UnaryCall(ctx, in) @@ -563,99 +431,26 @@ func _TestService_UnaryCall_Handler(srv interface{}, ctx context.Context, codec return out, nil } -func _TestService_StreamingOutputCall_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(StreamingOutputCallRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(TestServiceServer).StreamingOutputCall(m, &testServiceStreamingOutputCallServer{stream}) +func _TestService_StreamingCall_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(TestServiceServer).StreamingCall(&testServiceStreamingCallServer{stream}) } -type TestService_StreamingOutputCallServer interface { - Send(*StreamingOutputCallResponse) error +type TestService_StreamingCallServer interface { + Send(*SimpleResponse) error + Recv() (*SimpleRequest, error) grpc.ServerStream } -type testServiceStreamingOutputCallServer struct { +type testServiceStreamingCallServer struct { grpc.ServerStream } -func (x *testServiceStreamingOutputCallServer) Send(m *StreamingOutputCallResponse) error { +func (x *testServiceStreamingCallServer) Send(m *SimpleResponse) error { return x.ServerStream.SendMsg(m) } -func _TestService_StreamingInputCall_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(TestServiceServer).StreamingInputCall(&testServiceStreamingInputCallServer{stream}) -} - -type TestService_StreamingInputCallServer interface { - SendAndClose(*StreamingInputCallResponse) error - Recv() (*StreamingInputCallRequest, error) - grpc.ServerStream -} - -type testServiceStreamingInputCallServer struct { - grpc.ServerStream -} - -func (x *testServiceStreamingInputCallServer) SendAndClose(m *StreamingInputCallResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *testServiceStreamingInputCallServer) Recv() (*StreamingInputCallRequest, error) { - m := new(StreamingInputCallRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func _TestService_FullDuplexCall_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(TestServiceServer).FullDuplexCall(&testServiceFullDuplexCallServer{stream}) -} - -type TestService_FullDuplexCallServer interface { - Send(*StreamingOutputCallResponse) error - Recv() (*StreamingOutputCallRequest, error) - grpc.ServerStream -} - -type testServiceFullDuplexCallServer struct { - grpc.ServerStream -} - -func (x *testServiceFullDuplexCallServer) Send(m *StreamingOutputCallResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *testServiceFullDuplexCallServer) Recv() (*StreamingOutputCallRequest, error) { - m := new(StreamingOutputCallRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func _TestService_HalfDuplexCall_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(TestServiceServer).HalfDuplexCall(&testServiceHalfDuplexCallServer{stream}) -} - -type TestService_HalfDuplexCallServer interface { - Send(*StreamingOutputCallResponse) error - Recv() (*StreamingOutputCallRequest, error) - grpc.ServerStream -} - -type testServiceHalfDuplexCallServer struct { - grpc.ServerStream -} - -func (x *testServiceHalfDuplexCallServer) Send(m *StreamingOutputCallResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *testServiceHalfDuplexCallServer) Recv() (*StreamingOutputCallRequest, error) { - m := new(StreamingOutputCallRequest) +func (x *testServiceStreamingCallServer) Recv() (*SimpleRequest, error) { + m := new(SimpleRequest) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } @@ -666,10 +461,6 @@ var _TestService_serviceDesc = grpc.ServiceDesc{ ServiceName: "grpc.testing.TestService", HandlerType: (*TestServiceServer)(nil), Methods: []grpc.MethodDesc{ - { - MethodName: "EmptyCall", - Handler: _TestService_EmptyCall_Handler, - }, { MethodName: "UnaryCall", Handler: _TestService_UnaryCall_Handler, @@ -677,24 +468,172 @@ var _TestService_serviceDesc = grpc.ServiceDesc{ }, Streams: []grpc.StreamDesc{ { - StreamName: "StreamingOutputCall", - Handler: _TestService_StreamingOutputCall_Handler, - ServerStreams: true, - }, - { - StreamName: "StreamingInputCall", - Handler: _TestService_StreamingInputCall_Handler, - ClientStreams: true, - }, - { - StreamName: "FullDuplexCall", - Handler: _TestService_FullDuplexCall_Handler, - ServerStreams: true, - ClientStreams: true, - }, - { - StreamName: "HalfDuplexCall", - Handler: _TestService_HalfDuplexCall_Handler, + StreamName: "StreamingCall", + Handler: _TestService_StreamingCall_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, +} + +// Client API for Worker service + +type WorkerClient interface { + // Start test with specified workload + RunTest(ctx context.Context, opts ...grpc.CallOption) (Worker_RunTestClient, error) + // Start test with specified workload + RunServer(ctx context.Context, opts ...grpc.CallOption) (Worker_RunServerClient, error) +} + +type workerClient struct { + cc *grpc.ClientConn +} + +func NewWorkerClient(cc *grpc.ClientConn) WorkerClient { + return &workerClient{cc} +} + +func (c *workerClient) RunTest(ctx context.Context, opts ...grpc.CallOption) (Worker_RunTestClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Worker_serviceDesc.Streams[0], c.cc, "/grpc.testing.Worker/RunTest", opts...) + if err != nil { + return nil, err + } + x := &workerRunTestClient{stream} + return x, nil +} + +type Worker_RunTestClient interface { + Send(*ClientArgs) error + Recv() (*ClientStatus, error) + grpc.ClientStream +} + +type workerRunTestClient struct { + grpc.ClientStream +} + +func (x *workerRunTestClient) Send(m *ClientArgs) error { + return x.ClientStream.SendMsg(m) +} + +func (x *workerRunTestClient) Recv() (*ClientStatus, error) { + m := new(ClientStatus) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *workerClient) RunServer(ctx context.Context, opts ...grpc.CallOption) (Worker_RunServerClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Worker_serviceDesc.Streams[1], c.cc, "/grpc.testing.Worker/RunServer", opts...) + if err != nil { + return nil, err + } + x := &workerRunServerClient{stream} + return x, nil +} + +type Worker_RunServerClient interface { + Send(*ServerArgs) error + Recv() (*ServerStatus, error) + grpc.ClientStream +} + +type workerRunServerClient struct { + grpc.ClientStream +} + +func (x *workerRunServerClient) Send(m *ServerArgs) error { + return x.ClientStream.SendMsg(m) +} + +func (x *workerRunServerClient) Recv() (*ServerStatus, error) { + m := new(ServerStatus) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for Worker service + +type WorkerServer interface { + // Start test with specified workload + RunTest(Worker_RunTestServer) error + // Start test with specified workload + RunServer(Worker_RunServerServer) error +} + +func RegisterWorkerServer(s *grpc.Server, srv WorkerServer) { + s.RegisterService(&_Worker_serviceDesc, srv) +} + +func _Worker_RunTest_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(WorkerServer).RunTest(&workerRunTestServer{stream}) +} + +type Worker_RunTestServer interface { + Send(*ClientStatus) error + Recv() (*ClientArgs, error) + grpc.ServerStream +} + +type workerRunTestServer struct { + grpc.ServerStream +} + +func (x *workerRunTestServer) Send(m *ClientStatus) error { + return x.ServerStream.SendMsg(m) +} + +func (x *workerRunTestServer) Recv() (*ClientArgs, error) { + m := new(ClientArgs) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _Worker_RunServer_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(WorkerServer).RunServer(&workerRunServerServer{stream}) +} + +type Worker_RunServerServer interface { + Send(*ServerStatus) error + Recv() (*ServerArgs, error) + grpc.ServerStream +} + +type workerRunServerServer struct { + grpc.ServerStream +} + +func (x *workerRunServerServer) Send(m *ServerStatus) error { + return x.ServerStream.SendMsg(m) +} + +func (x *workerRunServerServer) Recv() (*ServerArgs, error) { + m := new(ServerArgs) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _Worker_serviceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.testing.Worker", + HandlerType: (*WorkerServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "RunTest", + Handler: _Worker_RunTest_Handler, + ServerStreams: true, + ClientStreams: true, + }, + { + StreamName: "RunServer", + Handler: _Worker_RunServer_Handler, ServerStreams: true, ClientStreams: true, }, diff --git a/Godeps/_workspace/src/google.golang.org/grpc/benchmark/grpc_testing/test.proto b/Godeps/_workspace/src/google.golang.org/grpc/benchmark/grpc_testing/test.proto index b5bfe05378..e3a27f861b 100644 --- a/Godeps/_workspace/src/google.golang.org/grpc/benchmark/grpc_testing/test.proto +++ b/Godeps/_workspace/src/google.golang.org/grpc/benchmark/grpc_testing/test.proto @@ -1,140 +1,148 @@ // An integration test service that covers all the method signature permutations // of unary/streaming requests/responses. -syntax = "proto2"; +syntax = "proto3"; package grpc.testing; -message Empty {} - -// The type of payload that should be returned. enum PayloadType { - // Compressable text format. - COMPRESSABLE = 0; + // Compressable text format. + COMPRESSABLE = 0; - // Uncompressable binary format. - UNCOMPRESSABLE = 1; + // Uncompressable binary format. + UNCOMPRESSABLE = 1; - // Randomly chosen from all other formats defined in this enum. - RANDOM = 2; + // Randomly chosen from all other formats defined in this enum. + RANDOM = 2; +} + +message StatsRequest { + // run number + optional int32 test_num = 1; +} + +message ServerStats { + // wall clock time + double time_elapsed = 1; + + // user time used by the server process and threads + double time_user = 2; + + // server time used by the server process and all threads + double time_system = 3; } -// A block of data, to simply increase gRPC message size. message Payload { - // The type of data in body. - optional PayloadType type = 1; - // Primary contents of payload. - optional bytes body = 2; + // The type of data in body. + PayloadType type = 1; + // Primary contents of payload. + bytes body = 2; +} + +message HistogramData { + repeated uint32 bucket = 1; + double min_seen = 2; + double max_seen = 3; + double sum = 4; + double sum_of_squares = 5; + double count = 6; +} + +enum ClientType { + SYNCHRONOUS_CLIENT = 0; + ASYNC_CLIENT = 1; +} + +enum ServerType { + SYNCHRONOUS_SERVER = 0; + ASYNC_SERVER = 1; +} + +enum RpcType { + UNARY = 0; + STREAMING = 1; +} + +message ClientConfig { + repeated string server_targets = 1; + ClientType client_type = 2; + bool enable_ssl = 3; + int32 outstanding_rpcs_per_channel = 4; + int32 client_channels = 5; + int32 payload_size = 6; + // only for async client: + int32 async_client_threads = 7; + RpcType rpc_type = 8; +} + +// Request current stats +message Mark {} + +message ClientArgs { + oneof argtype { + ClientConfig setup = 1; + Mark mark = 2; + } +} + +message ClientStats { + HistogramData latencies = 1; + double time_elapsed = 3; + double time_user = 4; + double time_system = 5; +} + +message ClientStatus { + ClientStats stats = 1; +} + +message ServerConfig { + ServerType server_type = 1; + int32 threads = 2; + bool enable_ssl = 3; +} + +message ServerArgs { + oneof argtype { + ServerConfig setup = 1; + Mark mark = 2; + } +} + +message ServerStatus { + ServerStats stats = 1; + int32 port = 2; } -// Unary request. message SimpleRequest { - // Desired payload type in the response from the server. - // If response_type is RANDOM, server randomly chooses one from other formats. - optional PayloadType response_type = 1; + // Desired payload type in the response from the server. + // If response_type is RANDOM, server randomly chooses one from other formats. + PayloadType response_type = 1; - // Desired payload size in the response from the server. - // If response_type is COMPRESSABLE, this denotes the size before compression. - optional int32 response_size = 2; + // Desired payload size in the response from the server. + // If response_type is COMPRESSABLE, this denotes the size before compression. + int32 response_size = 2; - // Optional input payload sent along with the request. - optional Payload payload = 3; - - // Whether SimpleResponse should include username. - optional bool fill_username = 4; - - // Whether SimpleResponse should include OAuth scope. - optional bool fill_oauth_scope = 5; + // Optional input payload sent along with the request. + Payload payload = 3; } -// Unary response, as configured by the request. message SimpleResponse { - // Payload to increase message size. - optional Payload payload = 1; - - // The user the request came from, for verifying authentication was - // successful when the client expected it. - optional string username = 2; - - // OAuth scope. - optional string oauth_scope = 3; + Payload payload = 1; } -// Client-streaming request. -message StreamingInputCallRequest { - // Optional input payload sent along with the request. - optional Payload payload = 1; - - // Not expecting any payload from the response. -} - -// Client-streaming response. -message StreamingInputCallResponse { - // Aggregated size of payloads received from the client. - optional int32 aggregated_payload_size = 1; -} - -// Configuration for a particular response. -message ResponseParameters { - // Desired payload sizes in responses from the server. - // If response_type is COMPRESSABLE, this denotes the size before compression. - optional int32 size = 1; - - // Desired interval between consecutive responses in the response stream in - // microseconds. - optional int32 interval_us = 2; -} - -// Server-streaming request. -message StreamingOutputCallRequest { - // Desired payload type in the response from the server. - // If response_type is RANDOM, the payload from each response in the stream - // might be of different types. This is to simulate a mixed type of payload - // stream. - optional PayloadType response_type = 1; - - // Configuration for each expected response message. - repeated ResponseParameters response_parameters = 2; - - // Optional input payload sent along with the request. - optional Payload payload = 3; -} - -// Server-streaming response, as configured by the request and parameters. -message StreamingOutputCallResponse { - // Payload to increase response size. - optional Payload payload = 1; -} - -// A simple service to test the various types of RPCs and experiment with -// performance with various types of payload. service TestService { - // One empty request followed by one empty response. - rpc EmptyCall(Empty) returns (Empty); + // One request followed by one response. + // The server returns the client payload as-is. + rpc UnaryCall(SimpleRequest) returns (SimpleResponse); - // One request followed by one response. - // The server returns the client payload as-is. - rpc UnaryCall(SimpleRequest) returns (SimpleResponse); - - // One request followed by a sequence of responses (streamed download). - // The server returns the payload with client desired type and sizes. - rpc StreamingOutputCall(StreamingOutputCallRequest) - returns (stream StreamingOutputCallResponse); - - // A sequence of requests followed by one response (streamed upload). - // The server returns the aggregated size of client payload as the result. - rpc StreamingInputCall(stream StreamingInputCallRequest) - returns (StreamingInputCallResponse); - - // A sequence of requests with each request served by the server immediately. - // As one request could lead to multiple responses, this interface - // demonstrates the idea of full duplexing. - rpc FullDuplexCall(stream StreamingOutputCallRequest) - returns (stream StreamingOutputCallResponse); - - // A sequence of requests followed by a sequence of responses. - // The server buffers all the client requests and then serves them in order. A - // stream of responses are returned to the client when the server starts with - // first request. - rpc HalfDuplexCall(stream StreamingOutputCallRequest) - returns (stream StreamingOutputCallResponse); + // One request followed by one response. + // The server returns the client payload as-is. + rpc StreamingCall(stream SimpleRequest) returns (stream SimpleResponse); +} + +service Worker { + // Start test with specified workload + rpc RunTest(stream ClientArgs) returns (stream ClientStatus); + // Start test with specified workload + rpc RunServer(stream ServerArgs) returns (stream ServerStatus); } diff --git a/Godeps/_workspace/src/google.golang.org/grpc/benchmark/server/main.go b/Godeps/_workspace/src/google.golang.org/grpc/benchmark/server/main.go new file mode 100644 index 0000000000..090f002fa1 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/benchmark/server/main.go @@ -0,0 +1,35 @@ +package main + +import ( + "flag" + "math" + "net" + "net/http" + _ "net/http/pprof" + "time" + + "google.golang.org/grpc/benchmark" + "google.golang.org/grpc/grpclog" +) + +var ( + duration = flag.Int("duration", math.MaxInt32, "The duration in seconds to run the benchmark server") +) + +func main() { + flag.Parse() + go func() { + lis, err := net.Listen("tcp", ":0") + if err != nil { + grpclog.Fatalf("Failed to listen: %v", err) + } + grpclog.Println("Server profiling address: ", lis.Addr().String()) + if err := http.Serve(lis, nil); err != nil { + grpclog.Fatalf("Failed to serve: %v", err) + } + }() + addr, stopper := benchmark.StartServer(":0") // listen on all interfaces + grpclog.Println("Server Address: ", addr) + <-time.After(time.Duration(*duration) * time.Second) + stopper() +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/benchmark/stats/counter.go b/Godeps/_workspace/src/google.golang.org/grpc/benchmark/stats/counter.go index 326b88d9a1..4389bae38c 100644 --- a/Godeps/_workspace/src/google.golang.org/grpc/benchmark/stats/counter.go +++ b/Godeps/_workspace/src/google.golang.org/grpc/benchmark/stats/counter.go @@ -6,8 +6,8 @@ import ( ) var ( - // Used for testing. - TimeNow func() time.Time = time.Now + // TimeNow is used for testing. + TimeNow = time.Now ) const ( diff --git a/Godeps/_workspace/src/google.golang.org/grpc/benchmark/stats/histogram.go b/Godeps/_workspace/src/google.golang.org/grpc/benchmark/stats/histogram.go index 3038118203..727808c8ba 100644 --- a/Godeps/_workspace/src/google.golang.org/grpc/benchmark/stats/histogram.go +++ b/Godeps/_workspace/src/google.golang.org/grpc/benchmark/stats/histogram.go @@ -251,5 +251,5 @@ func (h *Histogram) findBucket(value int64) (int, error) { } min = b + 1 } - return 0, fmt.Errorf("no bucket for value: %f", value) + return 0, fmt.Errorf("no bucket for value: %d", value) } diff --git a/Godeps/_workspace/src/google.golang.org/grpc/call.go b/Godeps/_workspace/src/google.golang.org/grpc/call.go index e15e4f9d5b..b5f929291a 100644 --- a/Godeps/_workspace/src/google.golang.org/grpc/call.go +++ b/Godeps/_workspace/src/google.golang.org/grpc/call.go @@ -35,8 +35,10 @@ package grpc import ( "io" + "time" "golang.org/x/net/context" + "golang.org/x/net/trace" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" "google.golang.org/grpc/transport" @@ -97,11 +99,12 @@ type callInfo struct { failFast bool headerMD metadata.MD trailerMD metadata.MD + traceInfo traceInfo // in trace.go } // Invoke is called by the generated code. It sends the RPC request on the // wire and returns after response is received. -func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error { +func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) (err error) { var c callInfo for _, o := range opts { if err := o.before(&c); err != nil { @@ -113,16 +116,27 @@ func Invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli o.after(&c) } }() - callHdr := &transport.CallHdr{ - Host: cc.authority, - Method: method, + if EnableTracing { + c.traceInfo.tr = trace.New("grpc.Sent."+transport.MethodFamily(method), method) + defer c.traceInfo.tr.Finish() + c.traceInfo.firstLine.client = true + if deadline, ok := ctx.Deadline(); ok { + c.traceInfo.firstLine.deadline = deadline.Sub(time.Now()) + } + c.traceInfo.tr.LazyLog(&c.traceInfo.firstLine, false) + // TODO(dsymonds): Arrange for c.traceInfo.firstLine.remoteAddr to be set. + defer func() { + if err != nil { + c.traceInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + c.traceInfo.tr.SetError() + } + }() } topts := &transport.Options{ Last: true, Delay: false, } var ( - ts int // track the transport sequence number lastErr error // record the error that happened ) for { @@ -130,20 +144,35 @@ func Invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli err error t transport.ClientTransport stream *transport.Stream + conn *Conn ) // TODO(zhaoq): Need a formal spec of retry strategy for non-failfast rpcs. if lastErr != nil && c.failFast { return toRPCErr(lastErr) } - t, ts, err = cc.wait(ctx, ts) + conn, err = cc.dopts.picker.Pick() if err != nil { + return toRPCErr(err) + } + callHdr := &transport.CallHdr{ + Host: conn.authority, + Method: method, + } + t, err = conn.Wait(ctx) + if err != nil { + if err == ErrTransientFailure { + continue + } if lastErr != nil { // This was a retry; return the error from the last attempt. return toRPCErr(lastErr) } return toRPCErr(err) } - stream, err = sendRequest(ctx, cc.dopts.codec, callHdr, t, args, topts) + if c.traceInfo.tr != nil { + c.traceInfo.tr.LazyLog(&payload{sent: true, msg: args}, true) + } + stream, err = sendRequest(ctx, conn.dopts.codec, callHdr, t, args, topts) if err != nil { if _, ok := err.(transport.ConnectionError); ok { lastErr = err @@ -155,10 +184,13 @@ func Invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli return toRPCErr(err) } // Receive the response - lastErr = recvResponse(cc.dopts.codec, t, &c, stream, reply) + lastErr = recvResponse(conn.dopts.codec, t, &c, stream, reply) if _, ok := lastErr.(transport.ConnectionError); ok { continue } + if c.traceInfo.tr != nil { + c.traceInfo.tr.LazyLog(&payload{sent: false, msg: reply}, true) + } t.CloseStream(stream, lastErr) if lastErr != nil { return toRPCErr(lastErr) diff --git a/Godeps/_workspace/src/google.golang.org/grpc/clientconn.go b/Godeps/_workspace/src/google.golang.org/grpc/clientconn.go index e03cabdfe5..f5b28d894a 100644 --- a/Godeps/_workspace/src/google.golang.org/grpc/clientconn.go +++ b/Godeps/_workspace/src/google.golang.org/grpc/clientconn.go @@ -35,33 +35,50 @@ package grpc import ( "errors" - "log" + "fmt" "net" "strings" "sync" "time" "golang.org/x/net/context" + "golang.org/x/net/trace" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" "google.golang.org/grpc/transport" ) var ( // ErrUnspecTarget indicates that the target address is unspecified. ErrUnspecTarget = errors.New("grpc: target is unspecified") + // ErrNoTransportSecurity indicates that there is no transport security + // being set for ClientConn. Users should either set one or explicityly + // call WithInsecure DialOption to disable security. + ErrNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)") + // ErrCredentialsMisuse indicates that users want to transmit security infomation + // (e.g., oauth2 token) which requires secure connection on an insecure + // connection. + ErrCredentialsMisuse = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportAuthenticator() to set)") // ErrClientConnClosing indicates that the operation is illegal because // the session is closing. ErrClientConnClosing = errors.New("grpc: the client connection is closing") // ErrClientConnTimeout indicates that the connection could not be // established or re-established within the specified timeout. ErrClientConnTimeout = errors.New("grpc: timed out trying to connect") + // ErrTransientFailure indicates the connection failed due to a transient error. + ErrTransientFailure = errors.New("grpc: transient connection failure") + // minimum time to give a connection to complete + minConnectTimeout = 20 * time.Second ) // dialOptions configure a Dial call. dialOptions are set by the DialOption // values passed to Dial. type dialOptions struct { - codec Codec - copts transport.ConnectOptions + codec Codec + picker Picker + block bool + insecure bool + copts transport.ConnectOptions } // DialOption configures how we set up the connection. @@ -74,6 +91,21 @@ func WithCodec(c Codec) DialOption { } } +// WithBlock returns a DialOption which makes caller of Dial blocks until the underlying +// connection is up. Without this, Dial returns immediately and connecting the server +// happens in background. +func WithBlock() DialOption { + return func(o *dialOptions) { + o.block = true + } +} + +func WithInsecure() DialOption { + return func(o *dialOptions) { + o.insecure = true + } +} + // WithTransportCredentials returns a DialOption which configures a // connection level security credentials (e.g., TLS/SSL). func WithTransportCredentials(creds credentials.TransportAuthenticator) DialOption { @@ -104,73 +136,239 @@ func WithDialer(f func(addr string, timeout time.Duration) (net.Conn, error)) Di } } -// Dial creates a client connection the given target. -// TODO(zhaoq): Have an option to make Dial return immediately without waiting -// for connection to complete. -func Dial(target string, opts ...DialOption) (*ClientConn, error) { - if target == "" { - return nil, ErrUnspecTarget +// WithUserAgent returns a DialOption that specifies a user agent string for all the RPCs. +func WithUserAgent(s string) DialOption { + return func(o *dialOptions) { + o.copts.UserAgent = s } +} + +// Dial creates a client connection the given target. +func Dial(target string, opts ...DialOption) (*ClientConn, error) { cc := &ClientConn{ target: target, } for _, opt := range opts { opt(&cc.dopts) } - colonPos := strings.LastIndex(target, ":") - if colonPos == -1 { - colonPos = len(target) + if cc.dopts.picker == nil { + cc.dopts.picker = &unicastPicker{} } - cc.authority = target[:colonPos] - if cc.dopts.codec == nil { - // Set the default codec. - cc.dopts.codec = protoCodec{} - } - if err := cc.resetTransport(false); err != nil { + if err := cc.dopts.picker.Init(cc); err != nil { return nil, err } - cc.shutdownChan = make(chan struct{}) - // Start to monitor the error status of transport. - go cc.transportMonitor() return cc, nil } +// ConnectivityState indicates the state of a client connection. +type ConnectivityState int + +const ( + // Idle indicates the ClientConn is idle. + Idle ConnectivityState = iota + // Connecting indicates the ClienConn is connecting. + Connecting + // Ready indicates the ClientConn is ready for work. + Ready + // TransientFailure indicates the ClientConn has seen a failure but expects to recover. + TransientFailure + // Shutdown indicates the ClientConn has started shutting down. + Shutdown +) + +func (s ConnectivityState) String() string { + switch s { + case Idle: + return "IDLE" + case Connecting: + return "CONNECTING" + case Ready: + return "READY" + case TransientFailure: + return "TRANSIENT_FAILURE" + case Shutdown: + return "SHUTDOWN" + default: + panic(fmt.Sprintf("unknown connectivity state: %d", s)) + } +} + // ClientConn represents a client connection to an RPC service. type ClientConn struct { + target string + dopts dialOptions +} + +// State returns the connectivity state of cc. +// This is EXPERIMENTAL API. +func (cc *ClientConn) State() ConnectivityState { + return cc.dopts.picker.State() +} + +// WaitForStateChange blocks until the state changes to something other than the sourceState +// or timeout fires on cc. It returns false if timeout fires, and true otherwise. +// This is EXPERIMENTAL API. +func (cc *ClientConn) WaitForStateChange(timeout time.Duration, sourceState ConnectivityState) bool { + return cc.dopts.picker.WaitForStateChange(timeout, sourceState) +} + +// Close starts to tear down the ClientConn. +func (cc *ClientConn) Close() error { + return cc.dopts.picker.Close() +} + +// Conn is a client connection to a single destination. +type Conn struct { target string authority string dopts dialOptions shutdownChan chan struct{} + events trace.EventLog - mu sync.Mutex + mu sync.Mutex + state ConnectivityState + stateCV *sync.Cond // ready is closed and becomes nil when a new transport is up or failed // due to timeout. - ready chan struct{} - // Indicates the ClientConn is under destruction. - closing bool - // Every time a new transport is created, this is incremented by 1. Used - // to avoid trying to recreate a transport while the new one is already - // under construction. - transportSeq int - transport transport.ClientTransport + ready chan struct{} + transport transport.ClientTransport } -func (cc *ClientConn) resetTransport(closeTransport bool) error { +// NewConn creates a Conn. +func NewConn(cc *ClientConn) (*Conn, error) { + if cc.target == "" { + return nil, ErrUnspecTarget + } + c := &Conn{ + target: cc.target, + dopts: cc.dopts, + shutdownChan: make(chan struct{}), + } + if EnableTracing { + c.events = trace.NewEventLog("grpc.ClientConn", c.target) + } + if !c.dopts.insecure { + var ok bool + for _, cd := range c.dopts.copts.AuthOptions { + if _, ok := cd.(credentials.TransportAuthenticator); !ok { + continue + } + ok = true + } + if !ok { + return nil, ErrNoTransportSecurity + } + } else { + for _, cd := range c.dopts.copts.AuthOptions { + if cd.RequireTransportSecurity() { + return nil, ErrCredentialsMisuse + } + } + } + colonPos := strings.LastIndex(c.target, ":") + if colonPos == -1 { + colonPos = len(c.target) + } + c.authority = c.target[:colonPos] + if c.dopts.codec == nil { + // Set the default codec. + c.dopts.codec = protoCodec{} + } + c.stateCV = sync.NewCond(&c.mu) + if c.dopts.block { + if err := c.resetTransport(false); err != nil { + c.Close() + return nil, err + } + // Start to monitor the error status of transport. + go c.transportMonitor() + } else { + // Start a goroutine connecting to the server asynchronously. + go func() { + if err := c.resetTransport(false); err != nil { + grpclog.Printf("Failed to dial %s: %v; please retry.", c.target, err) + c.Close() + return + } + c.transportMonitor() + }() + } + return c, nil +} + +// printf records an event in cc's event log, unless cc has been closed. +// REQUIRES cc.mu is held. +func (cc *Conn) printf(format string, a ...interface{}) { + if cc.events != nil { + cc.events.Printf(format, a...) + } +} + +// errorf records an error in cc's event log, unless cc has been closed. +// REQUIRES cc.mu is held. +func (cc *Conn) errorf(format string, a ...interface{}) { + if cc.events != nil { + cc.events.Errorf(format, a...) + } +} + +// State returns the connectivity state of the Conn +func (cc *Conn) State() ConnectivityState { + cc.mu.Lock() + defer cc.mu.Unlock() + return cc.state +} + +// WaitForStateChange blocks until the state changes to something other than the sourceState +// or timeout fires. It returns false if timeout fires and true otherwise. +// TODO(zhaoq): Rewrite for complex Picker. +func (cc *Conn) WaitForStateChange(timeout time.Duration, sourceState ConnectivityState) bool { + start := time.Now() + cc.mu.Lock() + defer cc.mu.Unlock() + if sourceState != cc.state { + return true + } + expired := timeout <= time.Since(start) + if expired { + return false + } + done := make(chan struct{}) + go func() { + select { + case <-time.After(timeout - time.Since(start)): + cc.mu.Lock() + expired = true + cc.stateCV.Broadcast() + cc.mu.Unlock() + case <-done: + } + }() + defer close(done) + for sourceState == cc.state { + cc.stateCV.Wait() + if expired { + return false + } + } + return true +} + +func (cc *Conn) resetTransport(closeTransport bool) error { var retries int start := time.Now() for { cc.mu.Lock() - t := cc.transport - ts := cc.transportSeq - // Avoid wait() picking up a dying transport unnecessarily. - cc.transportSeq = 0 - if cc.closing { + cc.printf("connecting") + if cc.state == Shutdown { cc.mu.Unlock() return ErrClientConnClosing } + cc.state = Connecting + cc.stateCV.Broadcast() cc.mu.Unlock() if closeTransport { - t.Close() + cc.transport.Close() } // Adjust timeout for the current try. copts := cc.dopts.copts @@ -185,30 +383,55 @@ func (cc *ClientConn) resetTransport(closeTransport bool) error { return ErrClientConnTimeout } } + sleepTime := backoff(retries) + timeout := sleepTime + if timeout < minConnectTimeout { + timeout = minConnectTimeout + } + if copts.Timeout == 0 || copts.Timeout > timeout { + copts.Timeout = timeout + } + connectTime := time.Now() newTransport, err := transport.NewClientTransport(cc.target, &copts) if err != nil { - sleepTime := backoff(retries) + cc.mu.Lock() + cc.errorf("transient failure: %v", err) + cc.state = TransientFailure + cc.stateCV.Broadcast() + if cc.ready != nil { + close(cc.ready) + cc.ready = nil + } + cc.mu.Unlock() + sleepTime -= time.Since(connectTime) + if sleepTime < 0 { + sleepTime = 0 + } // Fail early before falling into sleep. if cc.dopts.copts.Timeout > 0 && cc.dopts.copts.Timeout < sleepTime+time.Since(start) { + cc.mu.Lock() + cc.errorf("connection timeout") + cc.mu.Unlock() cc.Close() return ErrClientConnTimeout } closeTransport = false time.Sleep(sleepTime) retries++ - // TODO(zhaoq): Record the error with glog.V. - log.Printf("grpc: ClientConn.resetTransport failed to create client transport: %v; Reconnecting to %q", err, cc.target) + grpclog.Printf("grpc: ClientConn.resetTransport failed to create client transport: %v; Reconnecting to %q", err, cc.target) continue } cc.mu.Lock() - if cc.closing { + cc.printf("ready") + if cc.state == Shutdown { // cc.Close() has been invoked. cc.mu.Unlock() newTransport.Close() return ErrClientConnClosing } + cc.state = Ready + cc.stateCV.Broadcast() cc.transport = newTransport - cc.transportSeq = ts + 1 if cc.ready != nil { close(cc.ready) cc.ready = nil @@ -220,18 +443,24 @@ func (cc *ClientConn) resetTransport(closeTransport bool) error { // Run in a goroutine to track the error in transport and create the // new transport if an error happens. It returns when the channel is closing. -func (cc *ClientConn) transportMonitor() { +func (cc *Conn) transportMonitor() { for { select { - // shutdownChan is needed to detect the channel teardown when + // shutdownChan is needed to detect the teardown when // the ClientConn is idle (i.e., no RPC in flight). case <-cc.shutdownChan: return case <-cc.transport.Error(): + cc.mu.Lock() + cc.state = TransientFailure + cc.stateCV.Broadcast() + cc.mu.Unlock() if err := cc.resetTransport(true); err != nil { - // The channel is closing. - // TODO(zhaoq): Record the error with glog.V. - log.Printf("grpc: ClientConn.transportMonitor exits due to: %v", err) + // The ClientConn is closing. + cc.mu.Lock() + cc.printf("transport exiting: %v", err) + cc.mu.Unlock() + grpclog.Printf("grpc: ClientConn.transportMonitor exits due to: %v", err) return } continue @@ -239,20 +468,22 @@ func (cc *ClientConn) transportMonitor() { } } -// When wait returns, either the new transport is up or ClientConn is -// closing. Used to avoid working on a dying transport. It updates and -// returns the transport and its version when there is no error. -func (cc *ClientConn) wait(ctx context.Context, ts int) (transport.ClientTransport, int, error) { +// Wait blocks until i) the new transport is up or ii) ctx is done or iii) +func (cc *Conn) Wait(ctx context.Context) (transport.ClientTransport, error) { for { cc.mu.Lock() switch { - case cc.closing: + case cc.state == Shutdown: cc.mu.Unlock() - return nil, 0, ErrClientConnClosing - case ts < cc.transportSeq: - // Worked on a dying transport. Try the new one immediately. - defer cc.mu.Unlock() - return cc.transport, cc.transportSeq, nil + return nil, ErrClientConnClosing + case cc.state == Ready: + cc.mu.Unlock() + return cc.transport, nil + case cc.state == TransientFailure: + cc.mu.Unlock() + // Break out so that the caller gets chance to pick another transport to + // perform rpc instead of sticking to this transport. + return nil, ErrTransientFailure default: ready := cc.ready if ready == nil { @@ -262,7 +493,7 @@ func (cc *ClientConn) wait(ctx context.Context, ts int) (transport.ClientTranspo cc.mu.Unlock() select { case <-ctx.Done(): - return nil, 0, transport.ContextErr(ctx.Err()) + return nil, transport.ContextErr(ctx.Err()) // Wait until the new transport is ready or failed. case <-ready: } @@ -270,18 +501,23 @@ func (cc *ClientConn) wait(ctx context.Context, ts int) (transport.ClientTranspo } } -// Close starts to tear down the ClientConn. Returns ErrClientConnClosing if +// Close starts to tear down the Conn. Returns ErrClientConnClosing if // it has been closed (mostly due to dial time-out). // TODO(zhaoq): Make this synchronous to avoid unbounded memory consumption in // some edge cases (e.g., the caller opens and closes many ClientConn's in a // tight loop. -func (cc *ClientConn) Close() error { +func (cc *Conn) Close() error { cc.mu.Lock() defer cc.mu.Unlock() - if cc.closing { + if cc.state == Shutdown { return ErrClientConnClosing } - cc.closing = true + cc.state = Shutdown + cc.stateCV.Broadcast() + if cc.events != nil { + cc.events.Finish() + cc.events = nil + } if cc.ready != nil { close(cc.ready) cc.ready = nil diff --git a/Godeps/_workspace/src/google.golang.org/grpc/codegen.sh b/Godeps/_workspace/src/google.golang.org/grpc/codegen.sh index 4e7a9e75d0..b009488842 100644 --- a/Godeps/_workspace/src/google.golang.org/grpc/codegen.sh +++ b/Godeps/_workspace/src/google.golang.org/grpc/codegen.sh @@ -8,7 +8,7 @@ # plugin at https://github.com/golang/protobuf (after 2015-02-20). If you have # not, please install them first. # -# We recommend running this script at $GOPATH or $GOPATH/src. +# We recommend running this script at $GOPATH/src. # # If this is not what you need, feel free to make your own scripts. Again, this # script is for demonstration purpose. diff --git a/Godeps/_workspace/src/google.golang.org/grpc/credentials/credentials.go b/Godeps/_workspace/src/google.golang.org/grpc/credentials/credentials.go index 74f9c39f22..d94fa2fbd8 100644 --- a/Godeps/_workspace/src/google.golang.org/grpc/credentials/credentials.go +++ b/Godeps/_workspace/src/google.golang.org/grpc/credentials/credentials.go @@ -47,14 +47,11 @@ import ( "time" "golang.org/x/net/context" - "golang.org/x/oauth2" - "golang.org/x/oauth2/google" - "golang.org/x/oauth2/jwt" ) var ( // alpnProtoStr are the specified application level protocols for gRPC. - alpnProtoStr = []string{"h2-14", "h2-15", "h2-16"} + alpnProtoStr = []string{"h2"} ) // Credentials defines the common interface all supported credentials must @@ -63,44 +60,102 @@ type Credentials interface { // GetRequestMetadata gets the current request metadata, refreshing // tokens if required. This should be called by the transport layer on // each request, and the data should be populated in headers or other - // context. When supported by the underlying implementation, ctx can - // be used for timeout and cancellation. + // context. uri is the URI of the entry point for the request. When + // supported by the underlying implementation, ctx can be used for + // timeout and cancellation. // TODO(zhaoq): Define the set of the qualified keys instead of leaving // it as an arbitrary string. - GetRequestMetadata(ctx context.Context) (map[string]string, error) + GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) + // RequireTransportSecurity indicates whether the credentails requires + // transport security. + RequireTransportSecurity() bool } -// TransportAuthenticator defines the common interface all supported transport -// authentication protocols (e.g., TLS, SSL) must implement. +// ProtocolInfo provides information regarding the gRPC wire protocol version, +// security protocol, security protocol version in use, etc. +type ProtocolInfo struct { + // ProtocolVersion is the gRPC wire protocol version. + ProtocolVersion string + // SecurityProtocol is the security protocol in use. + SecurityProtocol string + // SecurityVersion is the security protocol version. + SecurityVersion string +} + +// AuthInfo defines the common interface for the auth information the users are interested in. +type AuthInfo interface { + AuthType() string +} + +type authInfoKey struct{} + +// NewContext creates a new context with authInfo attached. +func NewContext(ctx context.Context, authInfo AuthInfo) context.Context { + return context.WithValue(ctx, authInfoKey{}, authInfo) +} + +// FromContext returns the authInfo in ctx if it exists. +func FromContext(ctx context.Context) (authInfo AuthInfo, ok bool) { + authInfo, ok = ctx.Value(authInfoKey{}).(AuthInfo) + return +} + +// TransportAuthenticator defines the common interface for all the live gRPC wire +// protocols and supported transport security protocols (e.g., TLS, SSL). type TransportAuthenticator interface { - // Handshake does the authentication handshake specified by the corresponding - // authentication protocol on rawConn. - Handshake(addr string, rawConn net.Conn, timeout time.Duration) (net.Conn, error) - // NewListener creates a listener which accepts connections with requested - // authentication handshake. - NewListener(lis net.Listener) net.Listener + // ClientHandshake does the authentication handshake specified by the corresponding + // authentication protocol on rawConn for clients. It returns the authenticated + // connection and the corresponding auth information about the connection. + ClientHandshake(addr string, rawConn net.Conn, timeout time.Duration) (net.Conn, AuthInfo, error) + // ServerHandshake does the authentication handshake for servers. It returns + // the authenticated connection and the corresponding auth information about + // the connection. + ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) + // Info provides the ProtocolInfo of this TransportAuthenticator. + Info() ProtocolInfo Credentials } +// TLSInfo contains the auth information for a TLS authenticated connection. +// It implements the AuthInfo interface. +type TLSInfo struct { + State tls.ConnectionState +} + +func (t TLSInfo) AuthType() string { + return "tls" +} + // tlsCreds is the credentials required for authenticating a connection using TLS. type tlsCreds struct { // TLS configuration config tls.Config } +func (c tlsCreds) Info() ProtocolInfo { + return ProtocolInfo{ + SecurityProtocol: "tls", + SecurityVersion: "1.2", + } +} + // GetRequestMetadata returns nil, nil since TLS credentials does not have // metadata. -func (c *tlsCreds) GetRequestMetadata(ctx context.Context) (map[string]string, error) { +func (c *tlsCreds) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { return nil, nil } +func (c *tlsCreds) RequireTransportSecurity() bool { + return true +} + type timeoutError struct{} func (timeoutError) Error() string { return "credentials: Dial timed out" } func (timeoutError) Timeout() bool { return true } func (timeoutError) Temporary() bool { return true } -func (c *tlsCreds) Handshake(addr string, rawConn net.Conn, timeout time.Duration) (_ net.Conn, err error) { +func (c *tlsCreds) ClientHandshake(addr string, rawConn net.Conn, timeout time.Duration) (_ net.Conn, _ AuthInfo, err error) { // borrow some code from tls.DialWithDialer var errChannel chan error if timeout != 0 { @@ -127,14 +182,20 @@ func (c *tlsCreds) Handshake(addr string, rawConn net.Conn, timeout time.Duratio } if err != nil { rawConn.Close() - return nil, err + return nil, nil, err } - return conn, nil + // TODO(zhaoq): Omit the auth info for client now. It is more for + // information than anything else. + return conn, nil, nil } -// NewListener creates a net.Listener using the information in tlsCreds. -func (c *tlsCreds) NewListener(lis net.Listener) net.Listener { - return tls.NewListener(lis, &c.config) +func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) { + conn := tls.Server(rawConn, &c.config) + if err := conn.Handshake(); err != nil { + rawConn.Close() + return nil, nil, err + } + return conn, TLSInfo{conn.ConnectionState()}, nil } // NewTLS uses c to construct a TransportAuthenticator based on TLS. @@ -176,71 +237,3 @@ func NewServerTLSFromFile(certFile, keyFile string) (TransportAuthenticator, err } return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil } - -// TokenSource supplies credentials from an oauth2.TokenSource. -type TokenSource struct { - oauth2.TokenSource -} - -func (ts TokenSource) GetRequestMetadata(ctx context.Context) (map[string]string, error) { - token, err := ts.Token() - if err != nil { - return nil, err - } - return map[string]string{ - "authorization": token.TokenType + " " + token.AccessToken, - }, nil -} - -// NewComputeEngine constructs the credentials that fetches access tokens from -// Google Compute Engine (GCE)'s metadata server. It is only valid to use this -// if your program is running on a GCE instance. -// TODO(dsymonds): Deprecate and remove this. -func NewComputeEngine() Credentials { - return TokenSource{google.ComputeTokenSource("")} -} - -// serviceAccount represents credentials via JWT signing key. -type serviceAccount struct { - config *jwt.Config -} - -func (s serviceAccount) GetRequestMetadata(ctx context.Context) (map[string]string, error) { - token, err := s.config.TokenSource(ctx).Token() - if err != nil { - return nil, err - } - return map[string]string{ - "authorization": token.TokenType + " " + token.AccessToken, - }, nil -} - -// NewServiceAccountFromKey constructs the credentials using the JSON key slice -// from a Google Developers service account. -func NewServiceAccountFromKey(jsonKey []byte, scope ...string) (Credentials, error) { - config, err := google.JWTConfigFromJSON(jsonKey, scope...) - if err != nil { - return nil, err - } - return serviceAccount{config: config}, nil -} - -// NewServiceAccountFromFile constructs the credentials using the JSON key file -// of a Google Developers service account. -func NewServiceAccountFromFile(keyFile string, scope ...string) (Credentials, error) { - jsonKey, err := ioutil.ReadFile(keyFile) - if err != nil { - return nil, fmt.Errorf("credentials: failed to read the service account key file: %v", err) - } - return NewServiceAccountFromKey(jsonKey, scope...) -} - -// NewApplicationDefault returns "Application Default Credentials". For more -// detail, see https://developers.google.com/accounts/docs/application-default-credentials. -func NewApplicationDefault(ctx context.Context, scope ...string) (Credentials, error) { - t, err := google.DefaultTokenSource(ctx, scope...) - if err != nil { - return nil, err - } - return TokenSource{t}, nil -} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/credentials/oauth/oauth.go b/Godeps/_workspace/src/google.golang.org/grpc/credentials/oauth/oauth.go new file mode 100644 index 0000000000..04943fdf03 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/credentials/oauth/oauth.go @@ -0,0 +1,177 @@ +/* + * + * Copyright 2015, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +// Package oauth implements gRPC credentials using OAuth. +package oauth + +import ( + "fmt" + "io/ioutil" + + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + "golang.org/x/oauth2/jwt" + "google.golang.org/grpc/credentials" +) + +// TokenSource supplies credentials from an oauth2.TokenSource. +type TokenSource struct { + oauth2.TokenSource +} + +// GetRequestMetadata gets the request metadata as a map from a TokenSource. +func (ts TokenSource) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + token, err := ts.Token() + if err != nil { + return nil, err + } + return map[string]string{ + "authorization": token.TokenType + " " + token.AccessToken, + }, nil +} + +func (ts TokenSource) RequireTransportSecurity() bool { + return true +} + +type jwtAccess struct { + jsonKey []byte +} + +func NewJWTAccessFromFile(keyFile string) (credentials.Credentials, error) { + jsonKey, err := ioutil.ReadFile(keyFile) + if err != nil { + return nil, fmt.Errorf("credentials: failed to read the service account key file: %v", err) + } + return NewJWTAccessFromKey(jsonKey) +} + +func NewJWTAccessFromKey(jsonKey []byte) (credentials.Credentials, error) { + return jwtAccess{jsonKey}, nil +} + +func (j jwtAccess) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + ts, err := google.JWTAccessTokenSourceFromJSON(j.jsonKey, uri[0]) + if err != nil { + return nil, err + } + token, err := ts.Token() + if err != nil { + return nil, err + } + return map[string]string{ + "authorization": token.TokenType + " " + token.AccessToken, + }, nil +} + +func (j jwtAccess) RequireTransportSecurity() bool { + return true +} + +// oauthAccess supplies credentials from a given token. +type oauthAccess struct { + token oauth2.Token +} + +// NewOauthAccess constructs the credentials using a given token. +func NewOauthAccess(token *oauth2.Token) credentials.Credentials { + return oauthAccess{token: *token} +} + +func (oa oauthAccess) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + return map[string]string{ + "authorization": oa.token.TokenType + " " + oa.token.AccessToken, + }, nil +} + +func (oa oauthAccess) RequireTransportSecurity() bool { + return true +} + +// NewComputeEngine constructs the credentials that fetches access tokens from +// Google Compute Engine (GCE)'s metadata server. It is only valid to use this +// if your program is running on a GCE instance. +// TODO(dsymonds): Deprecate and remove this. +func NewComputeEngine() credentials.Credentials { + return TokenSource{google.ComputeTokenSource("")} +} + +// serviceAccount represents credentials via JWT signing key. +type serviceAccount struct { + config *jwt.Config +} + +func (s serviceAccount) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + token, err := s.config.TokenSource(ctx).Token() + if err != nil { + return nil, err + } + return map[string]string{ + "authorization": token.TokenType + " " + token.AccessToken, + }, nil +} + +func (s serviceAccount) RequireTransportSecurity() bool { + return true +} + +// NewServiceAccountFromKey constructs the credentials using the JSON key slice +// from a Google Developers service account. +func NewServiceAccountFromKey(jsonKey []byte, scope ...string) (credentials.Credentials, error) { + config, err := google.JWTConfigFromJSON(jsonKey, scope...) + if err != nil { + return nil, err + } + return serviceAccount{config: config}, nil +} + +// NewServiceAccountFromFile constructs the credentials using the JSON key file +// of a Google Developers service account. +func NewServiceAccountFromFile(keyFile string, scope ...string) (credentials.Credentials, error) { + jsonKey, err := ioutil.ReadFile(keyFile) + if err != nil { + return nil, fmt.Errorf("credentials: failed to read the service account key file: %v", err) + } + return NewServiceAccountFromKey(jsonKey, scope...) +} + +// NewApplicationDefault returns "Application Default Credentials". For more +// detail, see https://developers.google.com/accounts/docs/application-default-credentials. +func NewApplicationDefault(ctx context.Context, scope ...string) (credentials.Credentials, error) { + t, err := google.DefaultTokenSource(ctx, scope...) + if err != nil { + return nil, err + } + return TokenSource{t}, nil +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/examples/README.md b/Godeps/_workspace/src/google.golang.org/grpc/examples/README.md new file mode 100644 index 0000000000..aea0f02068 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/examples/README.md @@ -0,0 +1,57 @@ +gRPC in 3 minutes (Go) +====================== + +BACKGROUND +------------- +For this sample, we've already generated the server and client stubs from [helloworld.proto](helloworld/helloworld/helloworld.proto). + +PREREQUISITES +------------- + +- This requires Go 1.4 +- Requires that [GOPATH is set](https://golang.org/doc/code.html#GOPATH) + +``` +$ go help gopath +$ # ensure the PATH contains $GOPATH/bin +$ export PATH=$PATH:$GOPATH/bin +``` + +INSTALL +------- + +``` +$ go get -u google.golang.org/grpc/examples/helloworld/greeter_client +$ go get -u google.golang.org/grpc/examples/helloworld/greeter_server +``` + +TRY IT! +------- + +- Run the server + +``` +$ greeter_server & +``` + +- Run the client + +``` +$ greeter_client +``` + +OPTIONAL - Rebuilding the generated code +---------------------------------------- + +1 First [install protoc](https://github.com/google/protobuf/blob/master/INSTALL.txt) + - For now, this needs to be installed from source + - This is will change once proto3 is officially released + +2 Install the protoc Go plugin. + +``` +$ go get -a github.com/golang/protobuf/protoc-gen-go +$ +$ # from this dir; invoke protoc +$ protoc -I ./helloworld/helloworld/ ./helloworld/helloworld/helloworld.proto --go_out=plugins=grpc:helloworld +``` diff --git a/Godeps/_workspace/src/google.golang.org/grpc/examples/gotutorial.md b/Godeps/_workspace/src/google.golang.org/grpc/examples/gotutorial.md new file mode 100644 index 0000000000..39833fdf3d --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/examples/gotutorial.md @@ -0,0 +1,431 @@ +#gRPC Basics: Go + +This tutorial provides a basic Go programmer's introduction to working with gRPC. By walking through this example you'll learn how to: + +- Define a service in a .proto file. +- Generate server and client code using the protocol buffer compiler. +- Use the Go gRPC API to write a simple client and server for your service. + +It assumes that you have read the [Getting started](https://github.com/grpc/grpc/tree/master/examples) guide and are familiar with [protocol buffers] (https://developers.google.com/protocol-buffers/docs/overview). Note that the example in this tutorial uses the proto3 version of the protocol buffers language, which is currently in alpha release:you can find out more in the [proto3 language guide](https://developers.google.com/protocol-buffers/docs/proto3) and see the [release notes](https://github.com/google/protobuf/releases) for the new version in the protocol buffers Github repository. + +This isn't a comprehensive guide to using gRPC in Go: more reference documentation is coming soon. + +## Why use gRPC? + +Our example is a simple route mapping application that lets clients get information about features on their route, create a summary of their route, and exchange route information such as traffic updates with the server and other clients. + +With gRPC we can define our service once in a .proto file and implement clients and servers in any of gRPC's supported languages, which in turn can be run in environments ranging from servers inside Google to your own tablet - all the complexity of communication between different languages and environments is handled for you by gRPC. We also get all the advantages of working with protocol buffers, including efficient serialization, a simple IDL, and easy interface updating. + +## Example code and setup + +The example code for our tutorial is in [grpc/grpc-go/examples/route_guide](https://github.com/grpc/grpc-go/tree/master/examples/route_guide). To download the example, clone the `grpc-go` repository by running the following command: +```shell +$ go get google.golang.org/grpc +``` + +Then change your current directory to `grpc-go/examples/route_guide`: +```shell +$ cd $GOPATH/src/google.golang.org/grpc/examples/route_guide +``` + +You also should have the relevant tools installed to generate the server and client interface code - if you don't already, follow the setup instructions in [the Go quick start guide](examples/). + + +## Defining the service + +Our first step (as you'll know from the [quick start](http://www.grpc.io/docs/#quick-start)) is to define the gRPC *service* and the method *request* and *response* types using [protocol buffers] (https://developers.google.com/protocol-buffers/docs/overview). You can see the complete .proto file in [`examples/route_guide/proto/route_guide.proto`](examples/route_guide/proto/route_guide.proto). + +To define a service, you specify a named `service` in your .proto file: + +```proto +service RouteGuide { + ... +} +``` + +Then you define `rpc` methods inside your service definition, specifying their request and response types. gRPC lets you define four kinds of service method, all of which are used in the `RouteGuide` service: + +- A *simple RPC* where the client sends a request to the server using the stub and waits for a response to come back, just like a normal function call. +```proto + // Obtains the feature at a given position. + rpc GetFeature(Point) returns (Feature) {} +``` + +- A *server-side streaming RPC* where the client sends a request to the server and gets a stream to read a sequence of messages back. The client reads from the returned stream until there are no more messages. As you can see in our example, you specify a server-side streaming method by placing the `stream` keyword before the *response* type. +```proto + // Obtains the Features available within the given Rectangle. Results are + // streamed rather than returned at once (e.g. in a response message with a + // repeated field), as the rectangle may cover a large area and contain a + // huge number of features. + rpc ListFeatures(Rectangle) returns (stream Feature) {} +``` + +- A *client-side streaming RPC* where the client writes a sequence of messages and sends them to the server, again using a provided stream. Once the client has finished writing the messages, it waits for the server to read them all and return its response. You specify a client-side streaming method by placing the `stream` keyword before the *request* type. +```proto + // Accepts a stream of Points on a route being traversed, returning a + // RouteSummary when traversal is completed. + rpc RecordRoute(stream Point) returns (RouteSummary) {} +``` + +- A *bidirectional streaming RPC* where both sides send a sequence of messages using a read-write stream. The two streams operate independently, so clients and servers can read and write in whatever order they like: for example, the server could wait to receive all the client messages before writing its responses, or it could alternately read a message then write a message, or some other combination of reads and writes. The order of messages in each stream is preserved. You specify this type of method by placing the `stream` keyword before both the request and the response. +```proto + // Accepts a stream of RouteNotes sent while a route is being traversed, + // while receiving other RouteNotes (e.g. from other users). + rpc RouteChat(stream RouteNote) returns (stream RouteNote) {} +``` + +Our .proto file also contains protocol buffer message type definitions for all the request and response types used in our service methods - for example, here's the `Point` message type: +```proto +// Points are represented as latitude-longitude pairs in the E7 representation +// (degrees multiplied by 10**7 and rounded to the nearest integer). +// Latitudes should be in the range +/- 90 degrees and longitude should be in +// the range +/- 180 degrees (inclusive). +message Point { + int32 latitude = 1; + int32 longitude = 2; +} +``` + + +## Generating client and server code + +Next we need to generate the gRPC client and server interfaces from our .proto service definition. We do this using the protocol buffer compiler `protoc` with a special gRPC Go plugin. + +For simplicity, we've provided a [bash script](https://github.com/grpc/grpc-go/blob/master/codegen.sh) that runs `protoc` for you with the appropriate plugin, input, and output (if you want to run this by yourself, make sure you've installed protoc and followed the gRPC-Go [installation instructions](https://github.com/grpc/grpc-go/blob/master/README.md) first): + +```shell +$ codegen.sh route_guide.proto +``` + +which actually runs: + +```shell +$ protoc --go_out=plugins=grpc:. route_guide.proto +``` + +Running this command generates the following file in your current directory: +- `route_guide.pb.go` + +This contains: +- All the protocol buffer code to populate, serialize, and retrieve our request and response message types +- An interface type (or *stub*) for clients to call with the methods defined in the `RouteGuide` service. +- An interface type for servers to implement, also with the methods defined in the `RouteGuide` service. + + + +## Creating the server + +First let's look at how we create a `RouteGuide` server. If you're only interested in creating gRPC clients, you can skip this section and go straight to [Creating the client](#client) (though you might find it interesting anyway!). + +There are two parts to making our `RouteGuide` service do its job: +- Implementing the service interface generated from our service definition: doing the actual "work" of our service. +- Running a gRPC server to listen for requests from clients and dispatch them to the right service implementation. + +You can find our example `RouteGuide` server in [grpc-go/examples/route_guide/server/server.go](https://github.com/grpc/grpc-go/tree/master/examples/route_guide/server/server.go). Let's take a closer look at how it works. + +### Implementing RouteGuide + +As you can see, our server has a `routeGuideServer` struct type that implements the generated `RouteGuideServer` interface: + +```go +type routeGuideServer struct { + ... +} +... + +func (s *routeGuideServer) GetFeature(ctx context.Context, point *pb.Point) (*pb.Feature, error) { + ... +} +... + +func (s *routeGuideServer) ListFeatures(rect *pb.Rectangle, stream pb.RouteGuide_ListFeaturesServer) error { + ... +} +... + +func (s *routeGuideServer) RecordRoute(stream pb.RouteGuide_RecordRouteServer) error { + ... +} +... + +func (s *routeGuideServer) RouteChat(stream pb.RouteGuide_RouteChatServer) error { + ... +} +... +``` + +#### Simple RPC +`routeGuideServer` implements all our service methods. Let's look at the simplest type first, `GetFeature`, which just gets a `Point` from the client and returns the corresponding feature information from its database in a `Feature`. + +```go +func (s *routeGuideServer) GetFeature(ctx context.Context, point *pb.Point) (*pb.Feature, error) { + for _, feature := range s.savedFeatures { + if proto.Equal(feature.Location, point) { + return feature, nil + } + } + // No feature was found, return an unnamed feature + return &pb.Feature{"", point}, nil +} +``` + +The method is passed a context object for the RPC and the client's `Point` protocol buffer request. It returns a `Feature` protocol buffer object with the response information and an `error`. In the method we populate the `Feature` with the appropriate information, and then `return` it along with an `nil` error to tell gRPC that we've finished dealing with the RPC and that the `Feature` can be returned to the client. + +#### Server-side streaming RPC +Now let's look at one of our streaming RPCs. `ListFeatures` is a server-side streaming RPC, so we need to send back multiple `Feature`s to our client. + +```go +func (s *routeGuideServer) ListFeatures(rect *pb.Rectangle, stream pb.RouteGuide_ListFeaturesServer) error { + for _, feature := range s.savedFeatures { + if inRange(feature.Location, rect) { + if err := stream.Send(feature); err != nil { + return err + } + } + } + return nil +} +``` + +As you can see, instead of getting simple request and response objects in our method parameters, this time we get a request object (the `Rectangle` in which our client wants to find `Feature`s) and a special `RouteGuide_ListFeaturesServer` object to write our responses. + +In the method, we populate as many `Feature` objects as we need to return, writing them to the `RouteGuide_ListFeaturesServer` using its `Send()` method. Finally, as in our simple RPC, we return a `nil` error to tell gRPC that we've finished writing responses. Should any error happen in this call, we return a non-`nil` error; the gRPC layer will translate it into an appropriate RPC status to be sent on the wire. + +#### Client-side streaming RPC +Now let's look at something a little more complicated: the client-side streaming method `RecordRoute`, where we get a stream of `Point`s from the client and return a single `RouteSummary` with information about their trip. As you can see, this time the method doesn't have a request parameter at all. Instead, it gets a `RouteGuide_RecordRouteServer` stream, which the server can use to both read *and* write messages - it can receive client messages using its `Recv()` method and return its single response using its `SendAndClose()` method. + +```go +func (s *routeGuideServer) RecordRoute(stream pb.RouteGuide_RecordRouteServer) error { + var pointCount, featureCount, distance int32 + var lastPoint *pb.Point + startTime := time.Now() + for { + point, err := stream.Recv() + if err == io.EOF { + endTime := time.Now() + return stream.SendAndClose(&pb.RouteSummary{ + PointCount: pointCount, + FeatureCount: featureCount, + Distance: distance, + ElapsedTime: int32(endTime.Sub(startTime).Seconds()), + }) + } + if err != nil { + return err + } + pointCount++ + for _, feature := range s.savedFeatures { + if proto.Equal(feature.Location, point) { + featureCount++ + } + } + if lastPoint != nil { + distance += calcDistance(lastPoint, point) + } + lastPoint = point + } +} +``` + +In the method body we use the `RouteGuide_RecordRouteServer`s `Recv()` method to repeatedly read in our client's requests to a request object (in this case a `Point`) until there are no more messages: the server needs to check the the error returned from `Recv()` after each call. If this is `nil`, the stream is still good and it can continue reading; if it's `io.EOF` the message stream has ended and the server can return its `RouteSummary`. If it has any other value, we return the error "as is" so that it'll be translated to an RPC status by the gRPC layer. + +#### Bidirectional streaming RPC +Finally, let's look at our bidirectional streaming RPC `RouteChat()`. + +```go +func (s *routeGuideServer) RouteChat(stream pb.RouteGuide_RouteChatServer) error { + for { + in, err := stream.Recv() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + key := serialize(in.Location) + ... // look for notes to be sent to client + for _, note := range s.routeNotes[key] { + if err := stream.Send(note); err != nil { + return err + } + } + } +} +``` + +This time we get a `RouteGuide_RouteChatServer` stream that, as in our client-side streaming example, can be used to read and write messages. However, this time we return values via our method's stream while the client is still writing messages to *their* message stream. + +The syntax for reading and writing here is very similar to our client-streaming method, except the server uses the stream's `Send()` method rather than `SendAndClose()` because it's writing multiple responses. Although each side will always get the other's messages in the order they were written, both the client and server can read and write in any order — the streams operate completely independently. + +### Starting the server + +Once we've implemented all our methods, we also need to start up a gRPC server so that clients can actually use our service. The following snippet shows how we do this for our `RouteGuide` service: + +```go +flag.Parse() +lis, err := net.Listen("tcp", fmt.Sprintf(":%d", *port)) +if err != nil { + log.Fatalf("failed to listen: %v", err) +} +grpcServer := grpc.NewServer() +pb.RegisterRouteGuideServer(grpcServer, &routeGuideServer{}) +... // determine whether to use TLS +grpcServer.Serve(lis) +``` +To build and start a server, we: + +1. Specify the port we want to use to listen for client requests using `lis, err := net.Listen("tcp", fmt.Sprintf(":%d", *port))`. +2. Create an instance of the gRPC server using `grpc.NewServer()`. +3. Register our service implementation with the gRPC server. +4. Call `Serve()` on the server with our port details to do a blocking wait until the process is killed or `Stop()` is called. + + +## Creating the client + +In this section, we'll look at creating a Go client for our `RouteGuide` service. You can see our complete example client code in [grpc-go/examples/route_guide/client/client.go](https://github.com/grpc/grpc-go/tree/master/examples/route_guide/client/client.go). + +### Creating a stub + +To call service methods, we first need to create a gRPC *channel* to communicate with the server. We create this by passing the server address and port number to `grpc.Dial()` as follows: + +```go +conn, err := grpc.Dial(*serverAddr) +if err != nil { + ... +} +defer conn.Close() +``` + +You can use `DialOptions` to set the auth credentials (e.g., TLS, GCE credentials, JWT credentials) in `grpc.Dial` if the service you request requires that - however, we don't need to do this for our `RouteGuide` service. + +Once the gRPC *channel* is setup, we need a client *stub* to perform RPCs. We get this using the `NewRouteGuideClient` method provided in the `pb` package we generated from our .proto. + +```go +client := pb.NewRouteGuideClient(conn) +``` + +### Calling service methods + +Now let's look at how we call our service methods. Note that in gRPC-Go, RPCs operate in a blocking/synchronous mode, which means that the RPC call waits for the server to respond, and will either return a response or an error. + +#### Simple RPC + +Calling the simple RPC `GetFeature` is nearly as straightforward as calling a local method. + +```go +feature, err := client.GetFeature(context.Background(), &pb.Point{409146138, -746188906}) +if err != nil { + ... +} +``` + +As you can see, we call the method on the stub we got earlier. In our method parameters we create and populate a request protocol buffer object (in our case `Point`). We also pass a `context.Context` object which lets us change our RPC's behaviour if necessary, such as time-out/cancel an RPC in flight. If the call doesn't return an error, then we can read the response information from the server from the first return value. + +```go +log.Println(feature) +``` + +#### Server-side streaming RPC + +Here's where we call the server-side streaming method `ListFeatures`, which returns a stream of geographical `Feature`s. If you've already read [Creating the server](#server) some of this may look very familiar - streaming RPCs are implemented in a similar way on both sides. + +```go +rect := &pb.Rectangle{ ... } // initialize a pb.Rectangle +stream, err := client.ListFeatures(context.Background(), rect) +if err != nil { + ... +} +for { + feature, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + log.Fatalf("%v.ListFeatures(_) = _, %v", client, err) + } + log.Println(feature) +} +``` + +As in the simple RPC, we pass the method a context and a request. However, instead of getting a response object back, we get back an instance of `RouteGuide_ListFeaturesClient`. The client can use the `RouteGuide_ListFeaturesClient` stream to read the server's responses. + +We use the `RouteGuide_ListFeaturesClient`'s `Recv()` method to repeatedly read in the server's responses to a response protocol buffer object (in this case a `Feature`) until there are no more messages: the client needs to check the error `err` returned from `Recv()` after each call. If `nil`, the stream is still good and it can continue reading; if it's `io.EOF` then the message stream has ended; otherwise there must be an RPC error, which is passed over through `err`. + +#### Client-side streaming RPC + +The client-side streaming method `RecordRoute` is similar to the server-side method, except that we only pass the method a context and get a `RouteGuide_RecordRouteClient` stream back, which we can use to both write *and* read messages. + +```go +// Create a random number of random points +r := rand.New(rand.NewSource(time.Now().UnixNano())) +pointCount := int(r.Int31n(100)) + 2 // Traverse at least two points +var points []*pb.Point +for i := 0; i < pointCount; i++ { + points = append(points, randomPoint(r)) +} +log.Printf("Traversing %d points.", len(points)) +stream, err := client.RecordRoute(context.Background()) +if err != nil { + log.Fatalf("%v.RecordRoute(_) = _, %v", client, err) +} +for _, point := range points { + if err := stream.Send(point); err != nil { + log.Fatalf("%v.Send(%v) = %v", stream, point, err) + } +} +reply, err := stream.CloseAndRecv() +if err != nil { + log.Fatalf("%v.CloseAndRecv() got error %v, want %v", stream, err, nil) +} +log.Printf("Route summary: %v", reply) +``` + +The `RouteGuide_RecordRouteClient` has a `Send()` method that we can use to send requests to the server. Once we've finished writing our client's requests to the stream using `Send()`, we need to call `CloseAndRecv()` on the stream to let gRPC know that we've finished writing and are expecting to receive a response. We get our RPC status from the `err` returned from `CloseAndRecv()`. If the status is `nil`, then the first return value from `CloseAndRecv()` will be a valid server response. + +#### Bidirectional streaming RPC + +Finally, let's look at our bidirectional streaming RPC `RouteChat()`. As in the case of `RecordRoute`, we only pass the method a context object and get back a stream that we can use to both write and read messages. However, this time we return values via our method's stream while the server is still writing messages to *their* message stream. + +```go +stream, err := client.RouteChat(context.Background()) +waitc := make(chan struct{}) +go func() { + for { + in, err := stream.Recv() + if err == io.EOF { + // read done. + close(waitc) + return + } + if err != nil { + log.Fatalf("Failed to receive a note : %v", err) + } + log.Printf("Got message %s at point(%d, %d)", in.Message, in.Location.Latitude, in.Location.Longitude) + } +}() +for _, note := range notes { + if err := stream.Send(note); err != nil { + log.Fatalf("Failed to send a note: %v", err) + } +} +stream.CloseSend() +<-waitc +``` + +The syntax for reading and writing here is very similar to our client-side streaming method, except we use the stream's `CloseSend()` method once we've finished our call. Although each side will always get the other's messages in the order they were written, both the client and server can read and write in any order — the streams operate completely independently. + +## Try it out! + +To compile and run the server, assuming you are in the folder +`$GOPATH/src/google.golang.org/grpc/examples/route_guide`, simply: + +```sh +$ go run server/server.go +``` + +Likewise, to run the client: + +```sh +$ go run client/client.go +``` + diff --git a/Godeps/_workspace/src/google.golang.org/grpc/metadata/metadata_test.go b/Godeps/_workspace/src/google.golang.org/grpc/examples/helloworld/greeter_client/main.go similarity index 59% rename from Godeps/_workspace/src/google.golang.org/grpc/metadata/metadata_test.go rename to Godeps/_workspace/src/google.golang.org/grpc/examples/helloworld/greeter_client/main.go index ef3a1b8c40..1e02ab154d 100644 --- a/Godeps/_workspace/src/google.golang.org/grpc/metadata/metadata_test.go +++ b/Godeps/_workspace/src/google.golang.org/grpc/examples/helloworld/greeter_client/main.go @@ -1,6 +1,6 @@ /* * - * Copyright 2014, Google Inc. + * Copyright 2015, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -31,52 +31,39 @@ * */ -package metadata +package main import ( - "reflect" - "testing" + "log" + "os" + + pb "google.golang.org/grpc/examples/helloworld/helloworld" + "golang.org/x/net/context" + "google.golang.org/grpc" ) -const binaryValue = string(128) +const ( + address = "localhost:50051" + defaultName = "world" +) -func TestDecodeKeyValue(t *testing.T) { - for _, test := range []struct { - // input - kin string - vin string - // output - kout string - vout string - err error - }{ - {"a", "abc", "a", "abc", nil}, - {"key-bin", "Zm9vAGJhcg==", "key", "foo\x00bar", nil}, - {"key-bin", "woA=", "key", binaryValue, nil}, - } { - k, v, err := DecodeKeyValue(test.kin, test.vin) - if k != test.kout || !reflect.DeepEqual(v, test.vout) || !reflect.DeepEqual(err, test.err) { - t.Fatalf("DecodeKeyValue(%q, %q) = %q, %q, %v, want %q, %q, %v", test.kin, test.vin, k, v, err, test.kout, test.vout, test.err) - } +func main() { + // Set up a connection to the server. + conn, err := grpc.Dial(address, grpc.WithInsecure()) + if err != nil { + log.Fatalf("did not connect: %v", err) } -} + defer conn.Close() + c := pb.NewGreeterClient(conn) -func TestPairsMD(t *testing.T) { - for _, test := range []struct { - // input - kv []string - // output - md MD - }{ - {[]string{}, MD{}}, - {[]string{"k1", "v1", "k2", binaryValue}, New(map[string]string{ - "k1": "v1", - "k2-bin": "woA=", - })}, - } { - md := Pairs(test.kv...) - if !reflect.DeepEqual(md, test.md) { - t.Fatalf("Pairs(%v) = %v, want %v", test.kv, md, test.md) - } + // Contact the server and print out its response. + name := defaultName + if len(os.Args) > 1 { + name = os.Args[1] } + r, err := c.SayHello(context.Background(), &pb.HelloRequest{Name: name}) + if err != nil { + log.Fatalf("could not greet: %v", err) + } + log.Printf("Greeting: %s", r.Message) } diff --git a/Godeps/_workspace/src/google.golang.org/grpc/examples/helloworld/greeter_server/main.go b/Godeps/_workspace/src/google.golang.org/grpc/examples/helloworld/greeter_server/main.go new file mode 100644 index 0000000000..ba985df4c2 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/examples/helloworld/greeter_server/main.go @@ -0,0 +1,65 @@ +/* + * + * Copyright 2015, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package main + +import ( + "log" + "net" + + pb "google.golang.org/grpc/examples/helloworld/helloworld" + "golang.org/x/net/context" + "google.golang.org/grpc" +) + +const ( + port = ":50051" +) + +// server is used to implement hellowrld.GreeterServer. +type server struct{} + +// SayHello implements helloworld.GreeterServer +func (s *server) SayHello(ctx context.Context, in *pb.HelloRequest) (*pb.HelloReply, error) { + return &pb.HelloReply{Message: "Hello " + in.Name}, nil +} + +func main() { + lis, err := net.Listen("tcp", port) + if err != nil { + log.Fatalf("failed to listen: %v", err) + } + s := grpc.NewServer() + pb.RegisterGreeterServer(s, &server{}) + s.Serve(lis) +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/examples/helloworld/helloworld/helloworld.pb.go b/Godeps/_workspace/src/google.golang.org/grpc/examples/helloworld/helloworld/helloworld.pb.go new file mode 100644 index 0000000000..366b23b6dd --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/examples/helloworld/helloworld/helloworld.pb.go @@ -0,0 +1,109 @@ +// Code generated by protoc-gen-go. +// source: helloworld.proto +// DO NOT EDIT! + +/* +Package helloworld is a generated protocol buffer package. + +It is generated from these files: + helloworld.proto + +It has these top-level messages: + HelloRequest + HelloReply +*/ +package helloworld + +import proto "github.com/golang/protobuf/proto" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal + +// The request message containing the user's name. +type HelloRequest struct { + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *HelloRequest) Reset() { *m = HelloRequest{} } +func (m *HelloRequest) String() string { return proto.CompactTextString(m) } +func (*HelloRequest) ProtoMessage() {} + +// The response message containing the greetings +type HelloReply struct { + Message string `protobuf:"bytes,1,opt,name=message" json:"message,omitempty"` +} + +func (m *HelloReply) Reset() { *m = HelloReply{} } +func (m *HelloReply) String() string { return proto.CompactTextString(m) } +func (*HelloReply) ProtoMessage() {} + +func init() { +} + +// Client API for Greeter service + +type GreeterClient interface { + // Sends a greeting + SayHello(ctx context.Context, in *HelloRequest, opts ...grpc.CallOption) (*HelloReply, error) +} + +type greeterClient struct { + cc *grpc.ClientConn +} + +func NewGreeterClient(cc *grpc.ClientConn) GreeterClient { + return &greeterClient{cc} +} + +func (c *greeterClient) SayHello(ctx context.Context, in *HelloRequest, opts ...grpc.CallOption) (*HelloReply, error) { + out := new(HelloReply) + err := grpc.Invoke(ctx, "/helloworld.Greeter/SayHello", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Greeter service + +type GreeterServer interface { + // Sends a greeting + SayHello(context.Context, *HelloRequest) (*HelloReply, error) +} + +func RegisterGreeterServer(s *grpc.Server, srv GreeterServer) { + s.RegisterService(&_Greeter_serviceDesc, srv) +} + +func _Greeter_SayHello_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { + in := new(HelloRequest) + if err := dec(in); err != nil { + return nil, err + } + out, err := srv.(GreeterServer).SayHello(ctx, in) + if err != nil { + return nil, err + } + return out, nil +} + +var _Greeter_serviceDesc = grpc.ServiceDesc{ + ServiceName: "helloworld.Greeter", + HandlerType: (*GreeterServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "SayHello", + Handler: _Greeter_SayHello_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, +} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/size2_test.go b/Godeps/_workspace/src/google.golang.org/grpc/examples/helloworld/helloworld/helloworld.proto similarity index 64% rename from Godeps/_workspace/src/github.com/golang/protobuf/proto/size2_test.go rename to Godeps/_workspace/src/google.golang.org/grpc/examples/helloworld/helloworld/helloworld.proto index a2729c39a1..7d58870a70 100644 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/size2_test.go +++ b/Godeps/_workspace/src/google.golang.org/grpc/examples/helloworld/helloworld/helloworld.proto @@ -1,7 +1,5 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf +// Copyright 2015, Google Inc. +// All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are @@ -29,35 +27,25 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -package proto +syntax = "proto3"; -import ( - "testing" -) +option java_package = "io.grpc.examples"; +option objc_class_prefix = "HLW"; -// This is a separate file and package from size_test.go because that one uses -// generated messages and thus may not be in package proto without having a circular -// dependency, whereas this file tests unexported details of size.go. +package helloworld; -func TestVarintSize(t *testing.T) { - // Check the edge cases carefully. - testCases := []struct { - n uint64 - size int - }{ - {0, 1}, - {1, 1}, - {127, 1}, - {128, 2}, - {16383, 2}, - {16384, 3}, - {1<<63 - 1, 9}, - {1 << 63, 10}, - } - for _, tc := range testCases { - size := sizeVarint(tc.n) - if size != tc.size { - t.Errorf("sizeVarint(%d) = %d, want %d", tc.n, size, tc.size) - } - } +// The greeting service definition. +service Greeter { + // Sends a greeting + rpc SayHello (HelloRequest) returns (HelloReply) {} +} + +// The request message containing the user's name. +message HelloRequest { + string name = 1; +} + +// The response message containing the greetings +message HelloReply { + string message = 1; } diff --git a/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/README.md b/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/README.md index 02a43f1976..a7c0c2b0e5 100644 --- a/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/README.md +++ b/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/README.md @@ -2,7 +2,7 @@ The route guide server and client demonstrate how to use grpc go libraries to perform unary, client streaming, server streaming and full duplex RPCs. -Please refer to [Getting Started Guide for Go] (https://github.com/grpc/grpc-common/blob/master/go/gotutorial.md) for more information. +Please refer to [gRPC Basics: Go] (http://www.grpc.io/docs/tutorials/basic/go.html) for more information. See the definition of the route guide service in proto/route_guide.proto. diff --git a/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/client/client.go b/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/client/client.go index dfc19da3a9..a96c0302ce 100644 --- a/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/client/client.go +++ b/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/client/client.go @@ -40,14 +40,14 @@ package main import ( "flag" "io" - "log" "math/rand" "time" "golang.org/x/net/context" "google.golang.org/grpc" "google.golang.org/grpc/credentials" - pb "google.golang.org/grpc/examples/route_guide/proto" + pb "google.golang.org/grpc/examples/route_guide/routeguide" + "google.golang.org/grpc/grpclog" ) var ( @@ -59,20 +59,20 @@ var ( // printFeature gets the feature for the given point. func printFeature(client pb.RouteGuideClient, point *pb.Point) { - log.Printf("Getting feature for point (%d, %d)", point.Latitude, point.Longitude) + grpclog.Printf("Getting feature for point (%d, %d)", point.Latitude, point.Longitude) feature, err := client.GetFeature(context.Background(), point) if err != nil { - log.Fatalf("%v.GetFeatures(_) = _, %v: ", client, err) + grpclog.Fatalf("%v.GetFeatures(_) = _, %v: ", client, err) } - log.Println(feature) + grpclog.Println(feature) } // printFeatures lists all the features within the given bounding Rectangle. func printFeatures(client pb.RouteGuideClient, rect *pb.Rectangle) { - log.Printf("Looking for features within %v", rect) + grpclog.Printf("Looking for features within %v", rect) stream, err := client.ListFeatures(context.Background(), rect) if err != nil { - log.Fatalf("%v.ListFeatures(_) = _, %v", client, err) + grpclog.Fatalf("%v.ListFeatures(_) = _, %v", client, err) } for { feature, err := stream.Recv() @@ -80,9 +80,9 @@ func printFeatures(client pb.RouteGuideClient, rect *pb.Rectangle) { break } if err != nil { - log.Fatalf("%v.ListFeatures(_) = _, %v", client, err) + grpclog.Fatalf("%v.ListFeatures(_) = _, %v", client, err) } - log.Println(feature) + grpclog.Println(feature) } } @@ -95,21 +95,21 @@ func runRecordRoute(client pb.RouteGuideClient) { for i := 0; i < pointCount; i++ { points = append(points, randomPoint(r)) } - log.Printf("Traversing %d points.", len(points)) + grpclog.Printf("Traversing %d points.", len(points)) stream, err := client.RecordRoute(context.Background()) if err != nil { - log.Fatalf("%v.RecordRoute(_) = _, %v", client, err) + grpclog.Fatalf("%v.RecordRoute(_) = _, %v", client, err) } for _, point := range points { if err := stream.Send(point); err != nil { - log.Fatalf("%v.Send(%v) = %v", stream, point, err) + grpclog.Fatalf("%v.Send(%v) = %v", stream, point, err) } } reply, err := stream.CloseAndRecv() if err != nil { - log.Fatalf("%v.CloseAndRecv() got error %v, want %v", stream, err, nil) + grpclog.Fatalf("%v.CloseAndRecv() got error %v, want %v", stream, err, nil) } - log.Printf("Route summary: %v", reply) + grpclog.Printf("Route summary: %v", reply) } // runRouteChat receives a sequence of route notes, while sending notes for various locations. @@ -124,7 +124,7 @@ func runRouteChat(client pb.RouteGuideClient) { } stream, err := client.RouteChat(context.Background()) if err != nil { - log.Fatalf("%v.RouteChat(_) = _, %v", client, err) + grpclog.Fatalf("%v.RouteChat(_) = _, %v", client, err) } waitc := make(chan struct{}) go func() { @@ -136,14 +136,14 @@ func runRouteChat(client pb.RouteGuideClient) { return } if err != nil { - log.Fatalf("Failed to receive a note : %v", err) + grpclog.Fatalf("Failed to receive a note : %v", err) } - log.Printf("Got message %s at point(%d, %d)", in.Message, in.Location.Latitude, in.Location.Longitude) + grpclog.Printf("Got message %s at point(%d, %d)", in.Message, in.Location.Latitude, in.Location.Longitude) } }() for _, note := range notes { if err := stream.Send(note); err != nil { - log.Fatalf("Failed to send a note: %v", err) + grpclog.Fatalf("Failed to send a note: %v", err) } } stream.CloseSend() @@ -169,16 +169,18 @@ func main() { var err error creds, err = credentials.NewClientTLSFromFile(*caFile, sn) if err != nil { - log.Fatalf("Failed to create TLS credentials %v", err) + grpclog.Fatalf("Failed to create TLS credentials %v", err) } } else { creds = credentials.NewClientTLSFromCert(nil, sn) } opts = append(opts, grpc.WithTransportCredentials(creds)) + } else { + opts = append(opts, grpc.WithInsecure()) } conn, err := grpc.Dial(*serverAddr, opts...) if err != nil { - log.Fatalf("fail to dial: %v", err) + grpclog.Fatalf("fail to dial: %v", err) } defer conn.Close() client := pb.NewRouteGuideClient(conn) diff --git a/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/proto/route_guide.pb.go b/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/routeguide/route_guide.pb.go similarity index 98% rename from Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/proto/route_guide.pb.go rename to Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/routeguide/route_guide.pb.go index 5851e023f9..9ac9029aae 100644 --- a/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/proto/route_guide.pb.go +++ b/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/routeguide/route_guide.pb.go @@ -15,7 +15,7 @@ It has these top-level messages: RouteNote RouteSummary */ -package proto +package routeguide import proto1 "github.com/golang/protobuf/proto" @@ -310,9 +310,9 @@ func RegisterRouteGuideServer(s *grpc.Server, srv RouteGuideServer) { s.RegisterService(&_RouteGuide_serviceDesc, srv) } -func _RouteGuide_GetFeature_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { +func _RouteGuide_GetFeature_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { in := new(Point) - if err := codec.Unmarshal(buf, in); err != nil { + if err := dec(in); err != nil { return nil, err } out, err := srv.(RouteGuideServer).GetFeature(ctx, in) diff --git a/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/proto/route_guide.proto b/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/routeguide/route_guide.proto similarity index 99% rename from Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/proto/route_guide.proto rename to Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/routeguide/route_guide.proto index 5ea4fcf5b6..bee7ac51ab 100644 --- a/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/proto/route_guide.proto +++ b/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/routeguide/route_guide.proto @@ -29,7 +29,7 @@ syntax = "proto3"; -package proto; +package routeguide; // Interface exported by the server. service RouteGuide { diff --git a/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/server/server.go b/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/server/server.go index ff421a6fb0..09b3942d19 100644 --- a/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/server/server.go +++ b/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/server/server.go @@ -43,7 +43,6 @@ import ( "fmt" "io" "io/ioutil" - "log" "math" "net" "time" @@ -52,10 +51,11 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" - proto "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/proto" - pb "google.golang.org/grpc/examples/route_guide/proto" + pb "google.golang.org/grpc/examples/route_guide/routeguide" ) var ( @@ -159,10 +159,10 @@ func (s *routeGuideServer) RouteChat(stream pb.RouteGuide_RouteChatServer) error func (s *routeGuideServer) loadFeatures(filePath string) { file, err := ioutil.ReadFile(filePath) if err != nil { - log.Fatalf("Failed to load default features: %v", err) + grpclog.Fatalf("Failed to load default features: %v", err) } if err := json.Unmarshal(file, &s.savedFeatures); err != nil { - log.Fatalf("Failed to load default features: %v", err) + grpclog.Fatalf("Failed to load default features: %v", err) } } @@ -223,17 +223,17 @@ func main() { flag.Parse() lis, err := net.Listen("tcp", fmt.Sprintf(":%d", *port)) if err != nil { - log.Fatalf("failed to listen: %v", err) + grpclog.Fatalf("failed to listen: %v", err) } - grpcServer := grpc.NewServer() - pb.RegisterRouteGuideServer(grpcServer, newServer()) + var opts []grpc.ServerOption if *tls { creds, err := credentials.NewServerTLSFromFile(*certFile, *keyFile) if err != nil { - log.Fatalf("Failed to generate credentials %v", err) + grpclog.Fatalf("Failed to generate credentials %v", err) } - grpcServer.Serve(creds.NewListener(lis)) - } else { - grpcServer.Serve(lis) + opts = []grpc.ServerOption{grpc.Creds(creds)} } + grpcServer := grpc.NewServer(opts...) + pb.RegisterRouteGuideServer(grpcServer, newServer()) + grpcServer.Serve(lis) } diff --git a/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/testdata/ca.pem b/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/testdata/ca.pem deleted file mode 100644 index 999da89775..0000000000 --- a/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/testdata/ca.pem +++ /dev/null @@ -1,14 +0,0 @@ ------BEGIN CERTIFICATE----- -MIICIzCCAYwCCQCFTbF7XNSvvjANBgkqhkiG9w0BAQUFADBWMQswCQYDVQQGEwJB -VTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0 -cyBQdHkgTHRkMQ8wDQYDVQQDDAZ0ZXN0Y2EwHhcNMTQwNzE3MjMxNzUxWhcNMjQw -NzE0MjMxNzUxWjBWMQswCQYDVQQGEwJBVTETMBEGA1UECAwKU29tZS1TdGF0ZTEh -MB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMQ8wDQYDVQQDDAZ0ZXN0 -Y2EwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAMBA3wVeTGHZR1Rye/i+J8a2 -cu5gXwFV6TnObzGM7bLFCO5i9v4mLo4iFzPsHmWDUxKS3Y8iXbu0eYBlLoNY0lSv -xDx33O+DuwMmVN+DzSD+Eod9zfvwOWHsazYCZT2PhNxnVWIuJXViY4JAHUGodjx+ -QAi6yCAurUZGvYXGgZSBAgMBAAEwDQYJKoZIhvcNAQEFBQADgYEAQoQVD8bwdtWJ -AniGBwcCfqYyH+/KpA10AcebJVVTyYbYvI9Q8d6RSVu4PZy9OALHR/QrWBdYTAyz -fNAmc2cmdkSRJzjhIaOstnQy1J+Fk0T9XyvQtq499yFbq9xogUVlEGH62xP6vH0Y -5ukK//dCPAzA11YuX2rnex0JhuTQfcI= ------END CERTIFICATE----- diff --git a/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/testdata/route_guide_db.json b/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/testdata/route_guide_db.json deleted file mode 100644 index 9d6a980ab7..0000000000 --- a/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/testdata/route_guide_db.json +++ /dev/null @@ -1,601 +0,0 @@ -[{ - "location": { - "latitude": 407838351, - "longitude": -746143763 - }, - "name": "Patriots Path, Mendham, NJ 07945, USA" -}, { - "location": { - "latitude": 408122808, - "longitude": -743999179 - }, - "name": "101 New Jersey 10, Whippany, NJ 07981, USA" -}, { - "location": { - "latitude": 413628156, - "longitude": -749015468 - }, - "name": "U.S. 6, Shohola, PA 18458, USA" -}, { - "location": { - "latitude": 419999544, - "longitude": -740371136 - }, - "name": "5 Conners Road, Kingston, NY 12401, USA" -}, { - "location": { - "latitude": 414008389, - "longitude": -743951297 - }, - "name": "Mid Hudson Psychiatric Center, New Hampton, NY 10958, USA" -}, { - "location": { - "latitude": 419611318, - "longitude": -746524769 - }, - "name": "287 Flugertown Road, Livingston Manor, NY 12758, USA" -}, { - "location": { - "latitude": 406109563, - "longitude": -742186778 - }, - "name": "4001 Tremley Point Road, Linden, NJ 07036, USA" -}, { - "location": { - "latitude": 416802456, - "longitude": -742370183 - }, - "name": "352 South Mountain Road, Wallkill, NY 12589, USA" -}, { - "location": { - "latitude": 412950425, - "longitude": -741077389 - }, - "name": "Bailey Turn Road, Harriman, NY 10926, USA" -}, { - "location": { - "latitude": 412144655, - "longitude": -743949739 - }, - "name": "193-199 Wawayanda Road, Hewitt, NJ 07421, USA" -}, { - "location": { - "latitude": 415736605, - "longitude": -742847522 - }, - "name": "406-496 Ward Avenue, Pine Bush, NY 12566, USA" -}, { - "location": { - "latitude": 413843930, - "longitude": -740501726 - }, - "name": "162 Merrill Road, Highland Mills, NY 10930, USA" -}, { - "location": { - "latitude": 410873075, - "longitude": -744459023 - }, - "name": "Clinton Road, West Milford, NJ 07480, USA" -}, { - "location": { - "latitude": 412346009, - "longitude": -744026814 - }, - "name": "16 Old Brook Lane, Warwick, NY 10990, USA" -}, { - "location": { - "latitude": 402948455, - "longitude": -747903913 - }, - "name": "3 Drake Lane, Pennington, NJ 08534, USA" -}, { - "location": { - "latitude": 406337092, - "longitude": -740122226 - }, - "name": "6324 8th Avenue, Brooklyn, NY 11220, USA" -}, { - "location": { - "latitude": 406421967, - "longitude": -747727624 - }, - "name": "1 Merck Access Road, Whitehouse Station, NJ 08889, USA" -}, { - "location": { - "latitude": 416318082, - "longitude": -749677716 - }, - "name": "78-98 Schalck Road, Narrowsburg, NY 12764, USA" -}, { - "location": { - "latitude": 415301720, - "longitude": -748416257 - }, - "name": "282 Lakeview Drive Road, Highland Lake, NY 12743, USA" -}, { - "location": { - "latitude": 402647019, - "longitude": -747071791 - }, - "name": "330 Evelyn Avenue, Hamilton Township, NJ 08619, USA" -}, { - "location": { - "latitude": 412567807, - "longitude": -741058078 - }, - "name": "New York State Reference Route 987E, Southfields, NY 10975, USA" -}, { - "location": { - "latitude": 416855156, - "longitude": -744420597 - }, - "name": "103-271 Tempaloni Road, Ellenville, NY 12428, USA" -}, { - "location": { - "latitude": 404663628, - "longitude": -744820157 - }, - "name": "1300 Airport Road, North Brunswick Township, NJ 08902, USA" -}, { - "location": { - "latitude": 407113723, - "longitude": -749746483 - }, - "name": "" -}, { - "location": { - "latitude": 402133926, - "longitude": -743613249 - }, - "name": "" -}, { - "location": { - "latitude": 400273442, - "longitude": -741220915 - }, - "name": "" -}, { - "location": { - "latitude": 411236786, - "longitude": -744070769 - }, - "name": "" -}, { - "location": { - "latitude": 411633782, - "longitude": -746784970 - }, - "name": "211-225 Plains Road, Augusta, NJ 07822, USA" -}, { - "location": { - "latitude": 415830701, - "longitude": -742952812 - }, - "name": "" -}, { - "location": { - "latitude": 413447164, - "longitude": -748712898 - }, - "name": "165 Pedersen Ridge Road, Milford, PA 18337, USA" -}, { - "location": { - "latitude": 405047245, - "longitude": -749800722 - }, - "name": "100-122 Locktown Road, Frenchtown, NJ 08825, USA" -}, { - "location": { - "latitude": 418858923, - "longitude": -746156790 - }, - "name": "" -}, { - "location": { - "latitude": 417951888, - "longitude": -748484944 - }, - "name": "650-652 Willi Hill Road, Swan Lake, NY 12783, USA" -}, { - "location": { - "latitude": 407033786, - "longitude": -743977337 - }, - "name": "26 East 3rd Street, New Providence, NJ 07974, USA" -}, { - "location": { - "latitude": 417548014, - "longitude": -740075041 - }, - "name": "" -}, { - "location": { - "latitude": 410395868, - "longitude": -744972325 - }, - "name": "" -}, { - "location": { - "latitude": 404615353, - "longitude": -745129803 - }, - "name": "" -}, { - "location": { - "latitude": 406589790, - "longitude": -743560121 - }, - "name": "611 Lawrence Avenue, Westfield, NJ 07090, USA" -}, { - "location": { - "latitude": 414653148, - "longitude": -740477477 - }, - "name": "18 Lannis Avenue, New Windsor, NY 12553, USA" -}, { - "location": { - "latitude": 405957808, - "longitude": -743255336 - }, - "name": "82-104 Amherst Avenue, Colonia, NJ 07067, USA" -}, { - "location": { - "latitude": 411733589, - "longitude": -741648093 - }, - "name": "170 Seven Lakes Drive, Sloatsburg, NY 10974, USA" -}, { - "location": { - "latitude": 412676291, - "longitude": -742606606 - }, - "name": "1270 Lakes Road, Monroe, NY 10950, USA" -}, { - "location": { - "latitude": 409224445, - "longitude": -748286738 - }, - "name": "509-535 Alphano Road, Great Meadows, NJ 07838, USA" -}, { - "location": { - "latitude": 406523420, - "longitude": -742135517 - }, - "name": "652 Garden Street, Elizabeth, NJ 07202, USA" -}, { - "location": { - "latitude": 401827388, - "longitude": -740294537 - }, - "name": "349 Sea Spray Court, Neptune City, NJ 07753, USA" -}, { - "location": { - "latitude": 410564152, - "longitude": -743685054 - }, - "name": "13-17 Stanley Street, West Milford, NJ 07480, USA" -}, { - "location": { - "latitude": 408472324, - "longitude": -740726046 - }, - "name": "47 Industrial Avenue, Teterboro, NJ 07608, USA" -}, { - "location": { - "latitude": 412452168, - "longitude": -740214052 - }, - "name": "5 White Oak Lane, Stony Point, NY 10980, USA" -}, { - "location": { - "latitude": 409146138, - "longitude": -746188906 - }, - "name": "Berkshire Valley Management Area Trail, Jefferson, NJ, USA" -}, { - "location": { - "latitude": 404701380, - "longitude": -744781745 - }, - "name": "1007 Jersey Avenue, New Brunswick, NJ 08901, USA" -}, { - "location": { - "latitude": 409642566, - "longitude": -746017679 - }, - "name": "6 East Emerald Isle Drive, Lake Hopatcong, NJ 07849, USA" -}, { - "location": { - "latitude": 408031728, - "longitude": -748645385 - }, - "name": "1358-1474 New Jersey 57, Port Murray, NJ 07865, USA" -}, { - "location": { - "latitude": 413700272, - "longitude": -742135189 - }, - "name": "367 Prospect Road, Chester, NY 10918, USA" -}, { - "location": { - "latitude": 404310607, - "longitude": -740282632 - }, - "name": "10 Simon Lake Drive, Atlantic Highlands, NJ 07716, USA" -}, { - "location": { - "latitude": 409319800, - "longitude": -746201391 - }, - "name": "11 Ward Street, Mount Arlington, NJ 07856, USA" -}, { - "location": { - "latitude": 406685311, - "longitude": -742108603 - }, - "name": "300-398 Jefferson Avenue, Elizabeth, NJ 07201, USA" -}, { - "location": { - "latitude": 419018117, - "longitude": -749142781 - }, - "name": "43 Dreher Road, Roscoe, NY 12776, USA" -}, { - "location": { - "latitude": 412856162, - "longitude": -745148837 - }, - "name": "Swan Street, Pine Island, NY 10969, USA" -}, { - "location": { - "latitude": 416560744, - "longitude": -746721964 - }, - "name": "66 Pleasantview Avenue, Monticello, NY 12701, USA" -}, { - "location": { - "latitude": 405314270, - "longitude": -749836354 - }, - "name": "" -}, { - "location": { - "latitude": 414219548, - "longitude": -743327440 - }, - "name": "" -}, { - "location": { - "latitude": 415534177, - "longitude": -742900616 - }, - "name": "565 Winding Hills Road, Montgomery, NY 12549, USA" -}, { - "location": { - "latitude": 406898530, - "longitude": -749127080 - }, - "name": "231 Rocky Run Road, Glen Gardner, NJ 08826, USA" -}, { - "location": { - "latitude": 407586880, - "longitude": -741670168 - }, - "name": "100 Mount Pleasant Avenue, Newark, NJ 07104, USA" -}, { - "location": { - "latitude": 400106455, - "longitude": -742870190 - }, - "name": "517-521 Huntington Drive, Manchester Township, NJ 08759, USA" -}, { - "location": { - "latitude": 400066188, - "longitude": -746793294 - }, - "name": "" -}, { - "location": { - "latitude": 418803880, - "longitude": -744102673 - }, - "name": "40 Mountain Road, Napanoch, NY 12458, USA" -}, { - "location": { - "latitude": 414204288, - "longitude": -747895140 - }, - "name": "" -}, { - "location": { - "latitude": 414777405, - "longitude": -740615601 - }, - "name": "" -}, { - "location": { - "latitude": 415464475, - "longitude": -747175374 - }, - "name": "48 North Road, Forestburgh, NY 12777, USA" -}, { - "location": { - "latitude": 404062378, - "longitude": -746376177 - }, - "name": "" -}, { - "location": { - "latitude": 405688272, - "longitude": -749285130 - }, - "name": "" -}, { - "location": { - "latitude": 400342070, - "longitude": -748788996 - }, - "name": "" -}, { - "location": { - "latitude": 401809022, - "longitude": -744157964 - }, - "name": "" -}, { - "location": { - "latitude": 404226644, - "longitude": -740517141 - }, - "name": "9 Thompson Avenue, Leonardo, NJ 07737, USA" -}, { - "location": { - "latitude": 410322033, - "longitude": -747871659 - }, - "name": "" -}, { - "location": { - "latitude": 407100674, - "longitude": -747742727 - }, - "name": "" -}, { - "location": { - "latitude": 418811433, - "longitude": -741718005 - }, - "name": "213 Bush Road, Stone Ridge, NY 12484, USA" -}, { - "location": { - "latitude": 415034302, - "longitude": -743850945 - }, - "name": "" -}, { - "location": { - "latitude": 411349992, - "longitude": -743694161 - }, - "name": "" -}, { - "location": { - "latitude": 404839914, - "longitude": -744759616 - }, - "name": "1-17 Bergen Court, New Brunswick, NJ 08901, USA" -}, { - "location": { - "latitude": 414638017, - "longitude": -745957854 - }, - "name": "35 Oakland Valley Road, Cuddebackville, NY 12729, USA" -}, { - "location": { - "latitude": 412127800, - "longitude": -740173578 - }, - "name": "" -}, { - "location": { - "latitude": 401263460, - "longitude": -747964303 - }, - "name": "" -}, { - "location": { - "latitude": 412843391, - "longitude": -749086026 - }, - "name": "" -}, { - "location": { - "latitude": 418512773, - "longitude": -743067823 - }, - "name": "" -}, { - "location": { - "latitude": 404318328, - "longitude": -740835638 - }, - "name": "42-102 Main Street, Belford, NJ 07718, USA" -}, { - "location": { - "latitude": 419020746, - "longitude": -741172328 - }, - "name": "" -}, { - "location": { - "latitude": 404080723, - "longitude": -746119569 - }, - "name": "" -}, { - "location": { - "latitude": 401012643, - "longitude": -744035134 - }, - "name": "" -}, { - "location": { - "latitude": 404306372, - "longitude": -741079661 - }, - "name": "" -}, { - "location": { - "latitude": 403966326, - "longitude": -748519297 - }, - "name": "" -}, { - "location": { - "latitude": 405002031, - "longitude": -748407866 - }, - "name": "" -}, { - "location": { - "latitude": 409532885, - "longitude": -742200683 - }, - "name": "" -}, { - "location": { - "latitude": 416851321, - "longitude": -742674555 - }, - "name": "" -}, { - "location": { - "latitude": 406411633, - "longitude": -741722051 - }, - "name": "3387 Richmond Terrace, Staten Island, NY 10303, USA" -}, { - "location": { - "latitude": 413069058, - "longitude": -744597778 - }, - "name": "261 Van Sickle Road, Goshen, NY 10924, USA" -}, { - "location": { - "latitude": 418465462, - "longitude": -746859398 - }, - "name": "" -}, { - "location": { - "latitude": 411733222, - "longitude": -744228360 - }, - "name": "" -}, { - "location": { - "latitude": 410248224, - "longitude": -747127767 - }, - "name": "3 Hasta Way, Newton, NJ 07860, USA" -}] diff --git a/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/testdata/server1.key b/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/testdata/server1.key deleted file mode 100644 index ed00cd500e..0000000000 --- a/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/testdata/server1.key +++ /dev/null @@ -1,15 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIICWwIBAAKBgQDhwxUnKCwlSaWAwzOB2LSHVegJHv7DDWminTg4wzLLsf+LQ8nZ -bpjfn5vgIzxCuRh4Rp9QYM5FhfrJX9wcYawP/HTbJ7p7LVQO2QYAP+akMTHxgKuM -BzVV++3wWToKfVZUjFX8nfTfGMGwWAHJDnlEGnU4tl9UujoCV4ENJtzFoQIDAQAB -AoGAJ+6hpzNr24yTQZtFWQpDpEyFplddKJMOxDya3S9ppK3vTWrIITV2xNcucw7I -ceTbdyrGsyjsU0/HdCcIf9ym2jfmGLUwmyhltKVw0QYcFB0XLkc0nI5YvEYoeVDg -omZIXn1E3EW+sSIWSbkMu9bY2kstKXR2UZmMgWDtmBEPMaECQQD6yT4TAZM5hGBb -ciBKgMUP6PwOhPhOMPIvijO50Aiu6iuCV88l1QIy38gWVhxjNrq6P346j4IBg+kB -9alwpCODAkEA5nSnm9k6ykYeQWNS0fNWiRinCdl23A7usDGSuKKlm019iomJ/Rgd -MKDOp0q/2OostbteOWM2MRFf4jMH3wyVCwJAfAdjJ8szoNKTRSagSbh9vWygnB2v -IByc6l4TTuZQJRGzCveafz9lovuB3WohCABdQRd9ukCXL2CpsEpqzkafOQJAJUjc -USedDlq3zGZwYM1Yw8d8RuirBUFZNqJelYai+nRYClDkRVFgb5yksoYycbq5TxGo -VeqKOvgPpj4RWPHlLwJAGUMk3bqT91xBUCnLRs/vfoCpHpg6eywQTBDAV6xkyz4a -RH3I7/+yj3ZxR2JoWHgUwZ7lZk1VnhffFye7SBXyag== ------END RSA PRIVATE KEY----- diff --git a/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/testdata/server1.pem b/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/testdata/server1.pem deleted file mode 100644 index 8e582e571f..0000000000 --- a/Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/testdata/server1.pem +++ /dev/null @@ -1,16 +0,0 @@ ------BEGIN CERTIFICATE----- -MIICmzCCAgSgAwIBAgIBAzANBgkqhkiG9w0BAQUFADBWMQswCQYDVQQGEwJBVTET -MBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0cyBQ -dHkgTHRkMQ8wDQYDVQQDDAZ0ZXN0Y2EwHhcNMTQwNzIyMDYwMDU3WhcNMjQwNzE5 -MDYwMDU3WjBkMQswCQYDVQQGEwJVUzERMA8GA1UECBMISWxsaW5vaXMxEDAOBgNV -BAcTB0NoaWNhZ28xFDASBgNVBAoTC0dvb2dsZSBJbmMuMRowGAYDVQQDFBEqLnRl -c3QuZ29vZ2xlLmNvbTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA4cMVJygs -JUmlgMMzgdi0h1XoCR7+ww1pop04OMMyy7H/i0PJ2W6Y35+b4CM8QrkYeEafUGDO -RYX6yV/cHGGsD/x02ye6ey1UDtkGAD/mpDEx8YCrjAc1Vfvt8Fk6Cn1WVIxV/J30 -3xjBsFgByQ55RBp1OLZfVLo6AleBDSbcxaECAwEAAaNrMGkwCQYDVR0TBAIwADAL -BgNVHQ8EBAMCBeAwTwYDVR0RBEgwRoIQKi50ZXN0Lmdvb2dsZS5mcoIYd2F0ZXJ6 -b29pLnRlc3QuZ29vZ2xlLmJlghIqLnRlc3QueW91dHViZS5jb22HBMCoAQMwDQYJ -KoZIhvcNAQEFBQADgYEAM2Ii0LgTGbJ1j4oqX9bxVcxm+/R5Yf8oi0aZqTJlnLYS -wXcBykxTx181s7WyfJ49WwrYXo78zTDAnf1ma0fPq3e4mpspvyndLh1a+OarHa1e -aT0DIIYk7qeEa1YcVljx2KyLd0r1BBAfrwyGaEPVeJQVYWaOJRU2we/KD4ojf9s= ------END CERTIFICATE----- diff --git a/Godeps/_workspace/src/google.golang.org/grpc/grpclog/glogger/glogger.go b/Godeps/_workspace/src/google.golang.org/grpc/grpclog/glogger/glogger.go new file mode 100644 index 0000000000..53e3c539fb --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/grpclog/glogger/glogger.go @@ -0,0 +1,72 @@ +/* + * + * Copyright 2015, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +/* +Package glogger defines glog-based logging for grpc. +*/ +package glogger + +import ( + "github.com/golang/glog" + "google.golang.org/grpc/grpclog" +) + +func init() { + grpclog.SetLogger(&glogger{}) +} + +type glogger struct{} + +func (g *glogger) Fatal(args ...interface{}) { + glog.Fatal(args...) +} + +func (g *glogger) Fatalf(format string, args ...interface{}) { + glog.Fatalf(format, args...) +} + +func (g *glogger) Fatalln(args ...interface{}) { + glog.Fatalln(args...) +} + +func (g *glogger) Print(args ...interface{}) { + glog.Info(args...) +} + +func (g *glogger) Printf(format string, args ...interface{}) { + glog.Infof(format, args...) +} + +func (g *glogger) Println(args ...interface{}) { + glog.Infoln(args...) +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/grpclog/logger.go b/Godeps/_workspace/src/google.golang.org/grpc/grpclog/logger.go new file mode 100644 index 0000000000..cc6e27c064 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/grpclog/logger.go @@ -0,0 +1,90 @@ +/* + * + * Copyright 2015, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +/* +Package grpclog defines logging for grpc. +*/ +package grpclog + +import ( + "log" + "os" +) + +// Use golang's standard logger by default. +var logger Logger = log.New(os.Stderr, "", log.LstdFlags) + +// Logger mimics golang's standard Logger as an interface. +type Logger interface { + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) + Fatalln(args ...interface{}) + Print(args ...interface{}) + Printf(format string, args ...interface{}) + Println(args ...interface{}) +} + +// SetLogger sets the logger that is used in grpc. +func SetLogger(l Logger) { + logger = l +} + +// Fatal is equivalent to Print() followed by a call to os.Exit() with a non-zero exit code. +func Fatal(args ...interface{}) { + logger.Fatal(args...) +} + +// Fatalf is equivalent to Printf() followed by a call to os.Exit() with a non-zero exit code. +func Fatalf(format string, args ...interface{}) { + logger.Fatalf(format, args...) +} + +// Fatalln is equivalent to Println() followed by a call to os.Exit()) with a non-zero exit code. +func Fatalln(args ...interface{}) { + logger.Fatalln(args...) +} + +// Print prints to the logger. Arguments are handled in the manner of fmt.Print. +func Print(args ...interface{}) { + logger.Print(args...) +} + +// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. +func Printf(format string, args ...interface{}) { + logger.Printf(format, args...) +} + +// Println prints to the logger. Arguments are handled in the manner of fmt.Println. +func Println(args ...interface{}) { + logger.Println(args...) +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/health/grpc_health_v1alpha/health.pb.go b/Godeps/_workspace/src/google.golang.org/grpc/health/grpc_health_v1alpha/health.pb.go new file mode 100644 index 0000000000..96eba6f85e --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/health/grpc_health_v1alpha/health.pb.go @@ -0,0 +1,133 @@ +// Code generated by protoc-gen-go. +// source: health.proto +// DO NOT EDIT! + +/* +Package grpc_health_v1alpha is a generated protocol buffer package. + +It is generated from these files: + health.proto + +It has these top-level messages: + HealthCheckRequest + HealthCheckResponse +*/ +package grpc_health_v1alpha + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type HealthCheckResponse_ServingStatus int32 + +const ( + HealthCheckResponse_UNKNOWN HealthCheckResponse_ServingStatus = 0 + HealthCheckResponse_SERVING HealthCheckResponse_ServingStatus = 1 + HealthCheckResponse_NOT_SERVING HealthCheckResponse_ServingStatus = 2 +) + +var HealthCheckResponse_ServingStatus_name = map[int32]string{ + 0: "UNKNOWN", + 1: "SERVING", + 2: "NOT_SERVING", +} +var HealthCheckResponse_ServingStatus_value = map[string]int32{ + "UNKNOWN": 0, + "SERVING": 1, + "NOT_SERVING": 2, +} + +func (x HealthCheckResponse_ServingStatus) String() string { + return proto.EnumName(HealthCheckResponse_ServingStatus_name, int32(x)) +} + +type HealthCheckRequest struct { + Service string `protobuf:"bytes,1,opt,name=service" json:"service,omitempty"` +} + +func (m *HealthCheckRequest) Reset() { *m = HealthCheckRequest{} } +func (m *HealthCheckRequest) String() string { return proto.CompactTextString(m) } +func (*HealthCheckRequest) ProtoMessage() {} + +type HealthCheckResponse struct { + Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,enum=grpc.health.v1alpha.HealthCheckResponse_ServingStatus" json:"status,omitempty"` +} + +func (m *HealthCheckResponse) Reset() { *m = HealthCheckResponse{} } +func (m *HealthCheckResponse) String() string { return proto.CompactTextString(m) } +func (*HealthCheckResponse) ProtoMessage() {} + +func init() { + proto.RegisterEnum("grpc.health.v1alpha.HealthCheckResponse_ServingStatus", HealthCheckResponse_ServingStatus_name, HealthCheckResponse_ServingStatus_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// Client API for Health service + +type HealthClient interface { + Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) +} + +type healthClient struct { + cc *grpc.ClientConn +} + +func NewHealthClient(cc *grpc.ClientConn) HealthClient { + return &healthClient{cc} +} + +func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) { + out := new(HealthCheckResponse) + err := grpc.Invoke(ctx, "/grpc.health.v1alpha.Health/Check", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Health service + +type HealthServer interface { + Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) +} + +func RegisterHealthServer(s *grpc.Server, srv HealthServer) { + s.RegisterService(&_Health_serviceDesc, srv) +} + +func _Health_Check_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { + in := new(HealthCheckRequest) + if err := dec(in); err != nil { + return nil, err + } + out, err := srv.(HealthServer).Check(ctx, in) + if err != nil { + return nil, err + } + return out, nil +} + +var _Health_serviceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.health.v1alpha.Health", + HandlerType: (*HealthServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Check", + Handler: _Health_Check_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/health/grpc_health_v1alpha/health.proto b/Godeps/_workspace/src/google.golang.org/grpc/health/grpc_health_v1alpha/health.proto new file mode 100644 index 0000000000..91c7f06e9b --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/health/grpc_health_v1alpha/health.proto @@ -0,0 +1,20 @@ +syntax = "proto3"; + +package grpc.health.v1alpha; + +message HealthCheckRequest { + string service = 1; +} + +message HealthCheckResponse { + enum ServingStatus { + UNKNOWN = 0; + SERVING = 1; + NOT_SERVING = 2; + } + ServingStatus status = 1; +} + +service Health{ + rpc Check(HealthCheckRequest) returns (HealthCheckResponse); +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/health/health.go b/Godeps/_workspace/src/google.golang.org/grpc/health/health.go new file mode 100644 index 0000000000..7930bde7f3 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/health/health.go @@ -0,0 +1,49 @@ +// Package health provides some utility functions to health-check a server. The implementation +// is based on protobuf. Users need to write their own implementations if other IDLs are used. +package health + +import ( + "sync" + + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + healthpb "google.golang.org/grpc/health/grpc_health_v1alpha" +) + +type HealthServer struct { + mu sync.Mutex + // statusMap stores the serving status of the services this HealthServer monitors. + statusMap map[string]healthpb.HealthCheckResponse_ServingStatus +} + +func NewHealthServer() *HealthServer { + return &HealthServer{ + statusMap: make(map[string]healthpb.HealthCheckResponse_ServingStatus), + } +} + +func (s *HealthServer) Check(ctx context.Context, in *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) { + s.mu.Lock() + defer s.mu.Unlock() + if in.Service == "" { + // check the server overall health status. + return &healthpb.HealthCheckResponse{ + Status: healthpb.HealthCheckResponse_SERVING, + }, nil + } + if status, ok := s.statusMap[in.Service]; ok { + return &healthpb.HealthCheckResponse{ + Status: status, + }, nil + } + return nil, grpc.Errorf(codes.NotFound, "unknown service") +} + +// SetServingStatus is called when need to reset the serving status of a service +// or insert a new service entry into the statusMap. +func (s *HealthServer) SetServingStatus(service string, status healthpb.HealthCheckResponse_ServingStatus) { + s.mu.Lock() + s.statusMap[service] = status + s.mu.Unlock() +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/interop/client/client.go b/Godeps/_workspace/src/google.golang.org/grpc/interop/client/client.go index a537c69678..4f715d35ee 100644 --- a/Godeps/_workspace/src/google.golang.org/grpc/interop/client/client.go +++ b/Godeps/_workspace/src/google.golang.org/grpc/interop/client/client.go @@ -37,18 +37,22 @@ import ( "flag" "io" "io/ioutil" - "log" "net" "strconv" "strings" + "time" "github.com/golang/protobuf/proto" "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" - "google.golang.org/grpc/metadata" + "google.golang.org/grpc/credentials/oauth" + "google.golang.org/grpc/grpclog" testpb "google.golang.org/grpc/interop/grpc_testing" + "google.golang.org/grpc/metadata" ) var ( @@ -67,10 +71,15 @@ var ( client_streaming : request streaming with single response; server_streaming : single request with response streaming; ping_pong : full-duplex streaming; + empty_stream : full-duplex streaming with zero message; + timeout_on_sleeping_server: fullduplex streaming; compute_engine_creds: large_unary with compute engine auth; - service_account_creds: large_unary with service account auth; - cancel_after_begin: cancellation after metadata has been sent but before payloads are sent; - cancel_after_first_response: cancellation after receiving 1st message from the server.`) + service_account_creds: large_unary with service account auth; + jwt_token_creds: large_unary with jwt token auth; + per_rpc_creds: large_unary with per rpc token; + oauth2_auth_token: large_unary with oauth2 token auth; + cancel_after_begin: cancellation after metadata has been sent but before payloads are sent; + cancel_after_first_response: cancellation after receiving 1st message from the server.`) ) var ( @@ -82,15 +91,15 @@ var ( func newPayload(t testpb.PayloadType, size int) *testpb.Payload { if size < 0 { - log.Fatalf("Requested a response with invalid length %d", size) + grpclog.Fatalf("Requested a response with invalid length %d", size) } body := make([]byte, size) switch t { case testpb.PayloadType_COMPRESSABLE: case testpb.PayloadType_UNCOMPRESSABLE: - log.Fatalf("PayloadType UNCOMPRESSABLE is not supported") + grpclog.Fatalf("PayloadType UNCOMPRESSABLE is not supported") default: - log.Fatalf("Unsupported payload type: %d", t) + grpclog.Fatalf("Unsupported payload type: %d", t) } return &testpb.Payload{ Type: t.Enum(), @@ -101,12 +110,12 @@ func newPayload(t testpb.PayloadType, size int) *testpb.Payload { func doEmptyUnaryCall(tc testpb.TestServiceClient) { reply, err := tc.EmptyCall(context.Background(), &testpb.Empty{}) if err != nil { - log.Fatal("/TestService/EmptyCall RPC failed: ", err) + grpclog.Fatal("/TestService/EmptyCall RPC failed: ", err) } if !proto.Equal(&testpb.Empty{}, reply) { - log.Fatalf("/TestService/EmptyCall receives %v, want %v", reply, testpb.Empty{}) + grpclog.Fatalf("/TestService/EmptyCall receives %v, want %v", reply, testpb.Empty{}) } - log.Println("EmptyUnaryCall done") + grpclog.Println("EmptyUnaryCall done") } func doLargeUnaryCall(tc testpb.TestServiceClient) { @@ -118,20 +127,20 @@ func doLargeUnaryCall(tc testpb.TestServiceClient) { } reply, err := tc.UnaryCall(context.Background(), req) if err != nil { - log.Fatal("/TestService/UnaryCall RPC failed: ", err) + grpclog.Fatal("/TestService/UnaryCall RPC failed: ", err) } t := reply.GetPayload().GetType() s := len(reply.GetPayload().GetBody()) if t != testpb.PayloadType_COMPRESSABLE || s != largeRespSize { - log.Fatalf("Got the reply with type %d len %d; want %d, %d", t, s, testpb.PayloadType_COMPRESSABLE, largeRespSize) + grpclog.Fatalf("Got the reply with type %d len %d; want %d, %d", t, s, testpb.PayloadType_COMPRESSABLE, largeRespSize) } - log.Println("LargeUnaryCall done") + grpclog.Println("LargeUnaryCall done") } func doClientStreaming(tc testpb.TestServiceClient) { stream, err := tc.StreamingInputCall(context.Background()) if err != nil { - log.Fatalf("%v.StreamingInputCall(_) = _, %v", tc, err) + grpclog.Fatalf("%v.StreamingInputCall(_) = _, %v", tc, err) } var sum int for _, s := range reqSizes { @@ -140,20 +149,20 @@ func doClientStreaming(tc testpb.TestServiceClient) { Payload: pl, } if err := stream.Send(req); err != nil { - log.Fatalf("%v.Send(%v) = %v", stream, req, err) + grpclog.Fatalf("%v.Send(%v) = %v", stream, req, err) } sum += s - log.Printf("Sent a request of size %d, aggregated size %d", s, sum) + grpclog.Printf("Sent a request of size %d, aggregated size %d", s, sum) } reply, err := stream.CloseAndRecv() if err != nil { - log.Fatalf("%v.CloseAndRecv() got error %v, want %v", stream, err, nil) + grpclog.Fatalf("%v.CloseAndRecv() got error %v, want %v", stream, err, nil) } if reply.GetAggregatedPayloadSize() != int32(sum) { - log.Fatalf("%v.CloseAndRecv().GetAggregatePayloadSize() = %v; want %v", stream, reply.GetAggregatedPayloadSize(), sum) + grpclog.Fatalf("%v.CloseAndRecv().GetAggregatePayloadSize() = %v; want %v", stream, reply.GetAggregatedPayloadSize(), sum) } - log.Println("ClientStreaming done") + grpclog.Println("ClientStreaming done") } func doServerStreaming(tc testpb.TestServiceClient) { @@ -169,7 +178,7 @@ func doServerStreaming(tc testpb.TestServiceClient) { } stream, err := tc.StreamingOutputCall(context.Background(), req) if err != nil { - log.Fatalf("%v.StreamingOutputCall(_) = _, %v", tc, err) + grpclog.Fatalf("%v.StreamingOutputCall(_) = _, %v", tc, err) } var rpcStatus error var respCnt int @@ -182,28 +191,28 @@ func doServerStreaming(tc testpb.TestServiceClient) { } t := reply.GetPayload().GetType() if t != testpb.PayloadType_COMPRESSABLE { - log.Fatalf("Got the reply of type %d, want %d", t, testpb.PayloadType_COMPRESSABLE) + grpclog.Fatalf("Got the reply of type %d, want %d", t, testpb.PayloadType_COMPRESSABLE) } size := len(reply.GetPayload().GetBody()) if size != int(respSizes[index]) { - log.Fatalf("Got reply body of length %d, want %d", size, respSizes[index]) + grpclog.Fatalf("Got reply body of length %d, want %d", size, respSizes[index]) } index++ respCnt++ } if rpcStatus != io.EOF { - log.Fatalf("Failed to finish the server streaming rpc: %v", err) + grpclog.Fatalf("Failed to finish the server streaming rpc: %v", err) } if respCnt != len(respSizes) { - log.Fatalf("Got %d reply, want %d", len(respSizes), respCnt) + grpclog.Fatalf("Got %d reply, want %d", len(respSizes), respCnt) } - log.Println("ServerStreaming done") + grpclog.Println("ServerStreaming done") } func doPingPong(tc testpb.TestServiceClient) { stream, err := tc.FullDuplexCall(context.Background()) if err != nil { - log.Fatalf("%v.FullDuplexCall(_) = _, %v", tc, err) + grpclog.Fatalf("%v.FullDuplexCall(_) = _, %v", tc, err) } var index int for index < len(reqSizes) { @@ -219,29 +228,67 @@ func doPingPong(tc testpb.TestServiceClient) { Payload: pl, } if err := stream.Send(req); err != nil { - log.Fatalf("%v.Send(%v) = %v", stream, req, err) + grpclog.Fatalf("%v.Send(%v) = %v", stream, req, err) } reply, err := stream.Recv() if err != nil { - log.Fatalf("%v.Recv() = %v", stream, err) + grpclog.Fatalf("%v.Recv() = %v", stream, err) } t := reply.GetPayload().GetType() if t != testpb.PayloadType_COMPRESSABLE { - log.Fatalf("Got the reply of type %d, want %d", t, testpb.PayloadType_COMPRESSABLE) + grpclog.Fatalf("Got the reply of type %d, want %d", t, testpb.PayloadType_COMPRESSABLE) } size := len(reply.GetPayload().GetBody()) if size != int(respSizes[index]) { - log.Fatalf("Got reply body of length %d, want %d", size, respSizes[index]) + grpclog.Fatalf("Got reply body of length %d, want %d", size, respSizes[index]) } index++ } if err := stream.CloseSend(); err != nil { - log.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil) + grpclog.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil) } if _, err := stream.Recv(); err != io.EOF { - log.Fatalf("%v failed to complele the ping pong test: %v", stream, err) + grpclog.Fatalf("%v failed to complele the ping pong test: %v", stream, err) } - log.Println("Pingpong done") + grpclog.Println("Pingpong done") +} + +func doEmptyStream(tc testpb.TestServiceClient) { + stream, err := tc.FullDuplexCall(context.Background()) + if err != nil { + grpclog.Fatalf("%v.FullDuplexCall(_) = _, %v", tc, err) + } + if err := stream.CloseSend(); err != nil { + grpclog.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil) + } + if _, err := stream.Recv(); err != io.EOF { + grpclog.Fatalf("%v failed to complete the empty stream test: %v", stream, err) + } + grpclog.Println("Emptystream done") +} + +func doTimeoutOnSleepingServer(tc testpb.TestServiceClient) { + ctx, _ := context.WithTimeout(context.Background(), 1*time.Millisecond) + stream, err := tc.FullDuplexCall(ctx) + if err != nil { + if grpc.Code(err) == codes.DeadlineExceeded { + grpclog.Println("TimeoutOnSleepingServer done") + return + } + grpclog.Fatalf("%v.FullDuplexCall(_) = _, %v", tc, err) + } + pl := newPayload(testpb.PayloadType_COMPRESSABLE, 27182) + req := &testpb.StreamingOutputCallRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + Payload: pl, + } + if err := stream.Send(req); err != nil { + grpclog.Fatalf("%v.Send(%v) = %v", stream, req, err) + } + if _, err := stream.Recv(); grpc.Code(err) != codes.DeadlineExceeded { + grpclog.Fatalf("%v.Recv() = _, %v, want error code %d", stream, err, codes.DeadlineExceeded) + } + grpclog.Println("TimeoutOnSleepingServer done") } func doComputeEngineCreds(tc testpb.TestServiceClient) { @@ -255,23 +302,23 @@ func doComputeEngineCreds(tc testpb.TestServiceClient) { } reply, err := tc.UnaryCall(context.Background(), req) if err != nil { - log.Fatal("/TestService/UnaryCall RPC failed: ", err) + grpclog.Fatal("/TestService/UnaryCall RPC failed: ", err) } user := reply.GetUsername() scope := reply.GetOauthScope() if user != *defaultServiceAccount { - log.Fatalf("Got user name %q, want %q.", user, *defaultServiceAccount) + grpclog.Fatalf("Got user name %q, want %q.", user, *defaultServiceAccount) } if !strings.Contains(*oauthScope, scope) { - log.Fatalf("Got OAuth scope %q which is NOT a substring of %q.", scope, *oauthScope) + grpclog.Fatalf("Got OAuth scope %q which is NOT a substring of %q.", scope, *oauthScope) } - log.Println("ComputeEngineCreds done") + grpclog.Println("ComputeEngineCreds done") } -func getServiceAccountJsonKey() []byte { +func getServiceAccountJSONKey() []byte { jsonKey, err := ioutil.ReadFile(*serviceAccountKeyFile) if err != nil { - log.Fatalf("Failed to read the service account key file: %v", err) + grpclog.Fatalf("Failed to read the service account key file: %v", err) } return jsonKey } @@ -287,24 +334,110 @@ func doServiceAccountCreds(tc testpb.TestServiceClient) { } reply, err := tc.UnaryCall(context.Background(), req) if err != nil { - log.Fatal("/TestService/UnaryCall RPC failed: ", err) + grpclog.Fatal("/TestService/UnaryCall RPC failed: ", err) } - jsonKey := getServiceAccountJsonKey() + jsonKey := getServiceAccountJSONKey() user := reply.GetUsername() scope := reply.GetOauthScope() if !strings.Contains(string(jsonKey), user) { - log.Fatalf("Got user name %q which is NOT a substring of %q.", user, jsonKey) + grpclog.Fatalf("Got user name %q which is NOT a substring of %q.", user, jsonKey) } if !strings.Contains(*oauthScope, scope) { - log.Fatalf("Got OAuth scope %q which is NOT a substring of %q.", scope, *oauthScope) + grpclog.Fatalf("Got OAuth scope %q which is NOT a substring of %q.", scope, *oauthScope) } - log.Println("ServiceAccountCreds done") + grpclog.Println("ServiceAccountCreds done") +} + +func doJWTTokenCreds(tc testpb.TestServiceClient) { + pl := newPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseSize: proto.Int32(int32(largeRespSize)), + Payload: pl, + FillUsername: proto.Bool(true), + } + reply, err := tc.UnaryCall(context.Background(), req) + if err != nil { + grpclog.Fatal("/TestService/UnaryCall RPC failed: ", err) + } + jsonKey := getServiceAccountJSONKey() + user := reply.GetUsername() + if !strings.Contains(string(jsonKey), user) { + grpclog.Fatalf("Got user name %q which is NOT a substring of %q.", user, jsonKey) + } + grpclog.Println("JWTtokenCreds done") +} + +func getToken() *oauth2.Token { + jsonKey := getServiceAccountJSONKey() + config, err := google.JWTConfigFromJSON(jsonKey, *oauthScope) + if err != nil { + grpclog.Fatalf("Failed to get the config: %v", err) + } + token, err := config.TokenSource(context.Background()).Token() + if err != nil { + grpclog.Fatalf("Failed to get the token: %v", err) + } + return token +} + +func doOauth2TokenCreds(tc testpb.TestServiceClient) { + pl := newPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseSize: proto.Int32(int32(largeRespSize)), + Payload: pl, + FillUsername: proto.Bool(true), + FillOauthScope: proto.Bool(true), + } + reply, err := tc.UnaryCall(context.Background(), req) + if err != nil { + grpclog.Fatal("/TestService/UnaryCall RPC failed: ", err) + } + jsonKey := getServiceAccountJSONKey() + user := reply.GetUsername() + scope := reply.GetOauthScope() + if !strings.Contains(string(jsonKey), user) { + grpclog.Fatalf("Got user name %q which is NOT a substring of %q.", user, jsonKey) + } + if !strings.Contains(*oauthScope, scope) { + grpclog.Fatalf("Got OAuth scope %q which is NOT a substring of %q.", scope, *oauthScope) + } + grpclog.Println("Oauth2TokenCreds done") +} + +func doPerRPCCreds(tc testpb.TestServiceClient) { + jsonKey := getServiceAccountJSONKey() + pl := newPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseSize: proto.Int32(int32(largeRespSize)), + Payload: pl, + FillUsername: proto.Bool(true), + FillOauthScope: proto.Bool(true), + } + token := getToken() + kv := map[string]string{"authorization": token.TokenType + " " + token.AccessToken} + ctx := metadata.NewContext(context.Background(), metadata.MD{"authorization": []string{kv["authorization"]}}) + reply, err := tc.UnaryCall(ctx, req) + if err != nil { + grpclog.Fatal("/TestService/UnaryCall RPC failed: ", err) + } + user := reply.GetUsername() + scope := reply.GetOauthScope() + if !strings.Contains(string(jsonKey), user) { + grpclog.Fatalf("Got user name %q which is NOT a substring of %q.", user, jsonKey) + } + if !strings.Contains(*oauthScope, scope) { + grpclog.Fatalf("Got OAuth scope %q which is NOT a substring of %q.", scope, *oauthScope) + } + grpclog.Println("PerRPCCreds done") } var ( testMetadata = metadata.MD{ - "key1": "value1", - "key2": "value2", + "key1": []string{"value1"}, + "key2": []string{"value2"}, } ) @@ -312,21 +445,21 @@ func doCancelAfterBegin(tc testpb.TestServiceClient) { ctx, cancel := context.WithCancel(metadata.NewContext(context.Background(), testMetadata)) stream, err := tc.StreamingInputCall(ctx) if err != nil { - log.Fatalf("%v.StreamingInputCall(_) = _, %v", tc, err) + grpclog.Fatalf("%v.StreamingInputCall(_) = _, %v", tc, err) } cancel() _, err = stream.CloseAndRecv() if grpc.Code(err) != codes.Canceled { - log.Fatalf("%v.CloseAndRecv() got error code %d, want %d", stream, grpc.Code(err), codes.Canceled) + grpclog.Fatalf("%v.CloseAndRecv() got error code %d, want %d", stream, grpc.Code(err), codes.Canceled) } - log.Println("CancelAfterBegin done") + grpclog.Println("CancelAfterBegin done") } func doCancelAfterFirstResponse(tc testpb.TestServiceClient) { ctx, cancel := context.WithCancel(context.Background()) stream, err := tc.FullDuplexCall(ctx) if err != nil { - log.Fatalf("%v.FullDuplexCall(_) = _, %v", tc, err) + grpclog.Fatalf("%v.FullDuplexCall(_) = _, %v", tc, err) } respParam := []*testpb.ResponseParameters{ { @@ -340,16 +473,16 @@ func doCancelAfterFirstResponse(tc testpb.TestServiceClient) { Payload: pl, } if err := stream.Send(req); err != nil { - log.Fatalf("%v.Send(%v) = %v", stream, req, err) + grpclog.Fatalf("%v.Send(%v) = %v", stream, req, err) } if _, err := stream.Recv(); err != nil { - log.Fatalf("%v.Recv() = %v", stream, err) + grpclog.Fatalf("%v.Recv() = %v", stream, err) } cancel() if _, err := stream.Recv(); grpc.Code(err) != codes.Canceled { - log.Fatalf("%v compleled with error code %d, want %d", stream, grpc.Code(err), codes.Canceled) + grpclog.Fatalf("%v compleled with error code %d, want %d", stream, grpc.Code(err), codes.Canceled) } - log.Println("CancelAfterFirstResponse done") + grpclog.Println("CancelAfterFirstResponse done") } func main() { @@ -366,25 +499,35 @@ func main() { var err error creds, err = credentials.NewClientTLSFromFile(*caFile, sn) if err != nil { - log.Fatalf("Failed to create TLS credentials %v", err) + grpclog.Fatalf("Failed to create TLS credentials %v", err) } } else { creds = credentials.NewClientTLSFromCert(nil, sn) } opts = append(opts, grpc.WithTransportCredentials(creds)) if *testCase == "compute_engine_creds" { - opts = append(opts, grpc.WithPerRPCCredentials(credentials.NewComputeEngine())) + opts = append(opts, grpc.WithPerRPCCredentials(oauth.NewComputeEngine())) } else if *testCase == "service_account_creds" { - jwtCreds, err := credentials.NewServiceAccountFromFile(*serviceAccountKeyFile, *oauthScope) + jwtCreds, err := oauth.NewServiceAccountFromFile(*serviceAccountKeyFile, *oauthScope) if err != nil { - log.Fatalf("Failed to create JWT credentials: %v", err) + grpclog.Fatalf("Failed to create JWT credentials: %v", err) } opts = append(opts, grpc.WithPerRPCCredentials(jwtCreds)) + } else if *testCase == "jwt_token_creds" { + jwtCreds, err := oauth.NewJWTAccessFromFile(*serviceAccountKeyFile) + if err != nil { + grpclog.Fatalf("Failed to create JWT credentials: %v", err) + } + opts = append(opts, grpc.WithPerRPCCredentials(jwtCreds)) + } else if *testCase == "oauth2_auth_token" { + opts = append(opts, grpc.WithPerRPCCredentials(oauth.NewOauthAccess(getToken()))) } + } else { + opts = append(opts, grpc.WithInsecure()) } conn, err := grpc.Dial(serverAddr, opts...) if err != nil { - log.Fatalf("Fail to dial: %v", err) + grpclog.Fatalf("Fail to dial: %v", err) } defer conn.Close() tc := testpb.NewTestServiceClient(conn) @@ -399,21 +542,40 @@ func main() { doServerStreaming(tc) case "ping_pong": doPingPong(tc) + case "empty_stream": + doEmptyStream(tc) + case "timeout_on_sleeping_server": + doTimeoutOnSleepingServer(tc) case "compute_engine_creds": if !*useTLS { - log.Fatalf("TLS is not enabled. TLS is required to execute compute_engine_creds test case.") + grpclog.Fatalf("TLS is not enabled. TLS is required to execute compute_engine_creds test case.") } doComputeEngineCreds(tc) case "service_account_creds": if !*useTLS { - log.Fatalf("TLS is not enabled. TLS is required to execute service_account_creds test case.") + grpclog.Fatalf("TLS is not enabled. TLS is required to execute service_account_creds test case.") } doServiceAccountCreds(tc) + case "jwt_token_creds": + if !*useTLS { + grpclog.Fatalf("TLS is not enabled. TLS is required to execute jwt_token_creds test case.") + } + doJWTTokenCreds(tc) + case "per_rpc_creds": + if !*useTLS { + grpclog.Fatalf("TLS is not enabled. TLS is required to execute per_rpc_creds test case.") + } + doPerRPCCreds(tc) + case "oauth2_auth_token": + if !*useTLS { + grpclog.Fatalf("TLS is not enabled. TLS is required to execute oauth2_auth_token test case.") + } + doOauth2TokenCreds(tc) case "cancel_after_begin": doCancelAfterBegin(tc) case "cancel_after_first_response": doCancelAfterFirstResponse(tc) default: - log.Fatal("Unsupported test case: ", *testCase) + grpclog.Fatal("Unsupported test case: ", *testCase) } } diff --git a/Godeps/_workspace/src/google.golang.org/grpc/interop/client/testdata/ca.pem b/Godeps/_workspace/src/google.golang.org/grpc/interop/client/testdata/ca.pem deleted file mode 100644 index 999da89775..0000000000 --- a/Godeps/_workspace/src/google.golang.org/grpc/interop/client/testdata/ca.pem +++ /dev/null @@ -1,14 +0,0 @@ ------BEGIN CERTIFICATE----- -MIICIzCCAYwCCQCFTbF7XNSvvjANBgkqhkiG9w0BAQUFADBWMQswCQYDVQQGEwJB -VTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0 -cyBQdHkgTHRkMQ8wDQYDVQQDDAZ0ZXN0Y2EwHhcNMTQwNzE3MjMxNzUxWhcNMjQw -NzE0MjMxNzUxWjBWMQswCQYDVQQGEwJBVTETMBEGA1UECAwKU29tZS1TdGF0ZTEh -MB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMQ8wDQYDVQQDDAZ0ZXN0 -Y2EwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAMBA3wVeTGHZR1Rye/i+J8a2 -cu5gXwFV6TnObzGM7bLFCO5i9v4mLo4iFzPsHmWDUxKS3Y8iXbu0eYBlLoNY0lSv -xDx33O+DuwMmVN+DzSD+Eod9zfvwOWHsazYCZT2PhNxnVWIuJXViY4JAHUGodjx+ -QAi6yCAurUZGvYXGgZSBAgMBAAEwDQYJKoZIhvcNAQEFBQADgYEAQoQVD8bwdtWJ -AniGBwcCfqYyH+/KpA10AcebJVVTyYbYvI9Q8d6RSVu4PZy9OALHR/QrWBdYTAyz -fNAmc2cmdkSRJzjhIaOstnQy1J+Fk0T9XyvQtq499yFbq9xogUVlEGH62xP6vH0Y -5ukK//dCPAzA11YuX2rnex0JhuTQfcI= ------END CERTIFICATE----- diff --git a/Godeps/_workspace/src/google.golang.org/grpc/interop/client/testdata/server1.key b/Godeps/_workspace/src/google.golang.org/grpc/interop/client/testdata/server1.key deleted file mode 100644 index ed00cd500e..0000000000 --- a/Godeps/_workspace/src/google.golang.org/grpc/interop/client/testdata/server1.key +++ /dev/null @@ -1,15 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIICWwIBAAKBgQDhwxUnKCwlSaWAwzOB2LSHVegJHv7DDWminTg4wzLLsf+LQ8nZ -bpjfn5vgIzxCuRh4Rp9QYM5FhfrJX9wcYawP/HTbJ7p7LVQO2QYAP+akMTHxgKuM -BzVV++3wWToKfVZUjFX8nfTfGMGwWAHJDnlEGnU4tl9UujoCV4ENJtzFoQIDAQAB -AoGAJ+6hpzNr24yTQZtFWQpDpEyFplddKJMOxDya3S9ppK3vTWrIITV2xNcucw7I -ceTbdyrGsyjsU0/HdCcIf9ym2jfmGLUwmyhltKVw0QYcFB0XLkc0nI5YvEYoeVDg -omZIXn1E3EW+sSIWSbkMu9bY2kstKXR2UZmMgWDtmBEPMaECQQD6yT4TAZM5hGBb -ciBKgMUP6PwOhPhOMPIvijO50Aiu6iuCV88l1QIy38gWVhxjNrq6P346j4IBg+kB -9alwpCODAkEA5nSnm9k6ykYeQWNS0fNWiRinCdl23A7usDGSuKKlm019iomJ/Rgd -MKDOp0q/2OostbteOWM2MRFf4jMH3wyVCwJAfAdjJ8szoNKTRSagSbh9vWygnB2v -IByc6l4TTuZQJRGzCveafz9lovuB3WohCABdQRd9ukCXL2CpsEpqzkafOQJAJUjc -USedDlq3zGZwYM1Yw8d8RuirBUFZNqJelYai+nRYClDkRVFgb5yksoYycbq5TxGo -VeqKOvgPpj4RWPHlLwJAGUMk3bqT91xBUCnLRs/vfoCpHpg6eywQTBDAV6xkyz4a -RH3I7/+yj3ZxR2JoWHgUwZ7lZk1VnhffFye7SBXyag== ------END RSA PRIVATE KEY----- diff --git a/Godeps/_workspace/src/google.golang.org/grpc/interop/client/testdata/server1.pem b/Godeps/_workspace/src/google.golang.org/grpc/interop/client/testdata/server1.pem deleted file mode 100644 index 8e582e571f..0000000000 --- a/Godeps/_workspace/src/google.golang.org/grpc/interop/client/testdata/server1.pem +++ /dev/null @@ -1,16 +0,0 @@ ------BEGIN CERTIFICATE----- -MIICmzCCAgSgAwIBAgIBAzANBgkqhkiG9w0BAQUFADBWMQswCQYDVQQGEwJBVTET -MBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0cyBQ -dHkgTHRkMQ8wDQYDVQQDDAZ0ZXN0Y2EwHhcNMTQwNzIyMDYwMDU3WhcNMjQwNzE5 -MDYwMDU3WjBkMQswCQYDVQQGEwJVUzERMA8GA1UECBMISWxsaW5vaXMxEDAOBgNV -BAcTB0NoaWNhZ28xFDASBgNVBAoTC0dvb2dsZSBJbmMuMRowGAYDVQQDFBEqLnRl -c3QuZ29vZ2xlLmNvbTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA4cMVJygs -JUmlgMMzgdi0h1XoCR7+ww1pop04OMMyy7H/i0PJ2W6Y35+b4CM8QrkYeEafUGDO -RYX6yV/cHGGsD/x02ye6ey1UDtkGAD/mpDEx8YCrjAc1Vfvt8Fk6Cn1WVIxV/J30 -3xjBsFgByQ55RBp1OLZfVLo6AleBDSbcxaECAwEAAaNrMGkwCQYDVR0TBAIwADAL -BgNVHQ8EBAMCBeAwTwYDVR0RBEgwRoIQKi50ZXN0Lmdvb2dsZS5mcoIYd2F0ZXJ6 -b29pLnRlc3QuZ29vZ2xlLmJlghIqLnRlc3QueW91dHViZS5jb22HBMCoAQMwDQYJ -KoZIhvcNAQEFBQADgYEAM2Ii0LgTGbJ1j4oqX9bxVcxm+/R5Yf8oi0aZqTJlnLYS -wXcBykxTx181s7WyfJ49WwrYXo78zTDAnf1ma0fPq3e4mpspvyndLh1a+OarHa1e -aT0DIIYk7qeEa1YcVljx2KyLd0r1BBAfrwyGaEPVeJQVYWaOJRU2we/KD4ojf9s= ------END CERTIFICATE----- diff --git a/Godeps/_workspace/src/google.golang.org/grpc/interop/grpc_testing/test.pb.go b/Godeps/_workspace/src/google.golang.org/grpc/interop/grpc_testing/test.pb.go index 261ba1cc27..bd492fef47 100644 --- a/Godeps/_workspace/src/google.golang.org/grpc/interop/grpc_testing/test.pb.go +++ b/Godeps/_workspace/src/google.golang.org/grpc/interop/grpc_testing/test.pb.go @@ -1,12 +1,12 @@ // Code generated by protoc-gen-go. -// source: src/google.golang.org/grpc/test/grpc_testing/test.proto +// source: test.proto // DO NOT EDIT! /* Package grpc_testing is a generated protocol buffer package. It is generated from these files: - src/google.golang.org/grpc/test/grpc_testing/test.proto + test.proto It has these top-level messages: Empty @@ -539,9 +539,9 @@ func RegisterTestServiceServer(s *grpc.Server, srv TestServiceServer) { s.RegisterService(&_TestService_serviceDesc, srv) } -func _TestService_EmptyCall_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { +func _TestService_EmptyCall_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { in := new(Empty) - if err := codec.Unmarshal(buf, in); err != nil { + if err := dec(in); err != nil { return nil, err } out, err := srv.(TestServiceServer).EmptyCall(ctx, in) @@ -551,9 +551,9 @@ func _TestService_EmptyCall_Handler(srv interface{}, ctx context.Context, codec return out, nil } -func _TestService_UnaryCall_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { +func _TestService_UnaryCall_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { in := new(SimpleRequest) - if err := codec.Unmarshal(buf, in); err != nil { + if err := dec(in); err != nil { return nil, err } out, err := srv.(TestServiceServer).UnaryCall(ctx, in) diff --git a/Godeps/_workspace/src/google.golang.org/grpc/interop/server/server.go b/Godeps/_workspace/src/google.golang.org/grpc/interop/server/server.go index 35eefdb54f..f781c9d537 100644 --- a/Godeps/_workspace/src/google.golang.org/grpc/interop/server/server.go +++ b/Godeps/_workspace/src/google.golang.org/grpc/interop/server/server.go @@ -37,7 +37,6 @@ import ( "flag" "fmt" "io" - "log" "net" "strconv" "time" @@ -46,6 +45,7 @@ import ( "golang.org/x/net/context" "google.golang.org/grpc" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" testpb "google.golang.org/grpc/interop/grpc_testing" ) @@ -156,7 +156,7 @@ func (s *testServer) FullDuplexCall(stream testpb.TestService_FullDuplexCallServ } func (s *testServer) HalfDuplexCall(stream testpb.TestService_HalfDuplexCallServer) error { - msgBuf := make([]*testpb.StreamingOutputCallRequest, 0) + var msgBuf []*testpb.StreamingOutputCallRequest for { in, err := stream.Recv() if err == io.EOF { @@ -193,17 +193,17 @@ func main() { p := strconv.Itoa(*port) lis, err := net.Listen("tcp", ":"+p) if err != nil { - log.Fatalf("failed to listen: %v", err) + grpclog.Fatalf("failed to listen: %v", err) } - server := grpc.NewServer() - testpb.RegisterTestServiceServer(server, &testServer{}) + var opts []grpc.ServerOption if *useTLS { creds, err := credentials.NewServerTLSFromFile(*certFile, *keyFile) if err != nil { - log.Fatalf("Failed to generate credentials %v", err) + grpclog.Fatalf("Failed to generate credentials %v", err) } - server.Serve(creds.NewListener(lis)) - } else { - server.Serve(lis) + opts = []grpc.ServerOption{grpc.Creds(creds)} } + server := grpc.NewServer(opts...) + testpb.RegisterTestServiceServer(server, &testServer{}) + server.Serve(lis) } diff --git a/Godeps/_workspace/src/google.golang.org/grpc/interop/server/testdata/ca.pem b/Godeps/_workspace/src/google.golang.org/grpc/interop/server/testdata/ca.pem deleted file mode 100644 index 999da89775..0000000000 --- a/Godeps/_workspace/src/google.golang.org/grpc/interop/server/testdata/ca.pem +++ /dev/null @@ -1,14 +0,0 @@ ------BEGIN CERTIFICATE----- -MIICIzCCAYwCCQCFTbF7XNSvvjANBgkqhkiG9w0BAQUFADBWMQswCQYDVQQGEwJB -VTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0 -cyBQdHkgTHRkMQ8wDQYDVQQDDAZ0ZXN0Y2EwHhcNMTQwNzE3MjMxNzUxWhcNMjQw -NzE0MjMxNzUxWjBWMQswCQYDVQQGEwJBVTETMBEGA1UECAwKU29tZS1TdGF0ZTEh -MB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMQ8wDQYDVQQDDAZ0ZXN0 -Y2EwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAMBA3wVeTGHZR1Rye/i+J8a2 -cu5gXwFV6TnObzGM7bLFCO5i9v4mLo4iFzPsHmWDUxKS3Y8iXbu0eYBlLoNY0lSv -xDx33O+DuwMmVN+DzSD+Eod9zfvwOWHsazYCZT2PhNxnVWIuJXViY4JAHUGodjx+ -QAi6yCAurUZGvYXGgZSBAgMBAAEwDQYJKoZIhvcNAQEFBQADgYEAQoQVD8bwdtWJ -AniGBwcCfqYyH+/KpA10AcebJVVTyYbYvI9Q8d6RSVu4PZy9OALHR/QrWBdYTAyz -fNAmc2cmdkSRJzjhIaOstnQy1J+Fk0T9XyvQtq499yFbq9xogUVlEGH62xP6vH0Y -5ukK//dCPAzA11YuX2rnex0JhuTQfcI= ------END CERTIFICATE----- diff --git a/Godeps/_workspace/src/google.golang.org/grpc/interop/server/testdata/server1.key b/Godeps/_workspace/src/google.golang.org/grpc/interop/server/testdata/server1.key deleted file mode 100644 index ed00cd500e..0000000000 --- a/Godeps/_workspace/src/google.golang.org/grpc/interop/server/testdata/server1.key +++ /dev/null @@ -1,15 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIICWwIBAAKBgQDhwxUnKCwlSaWAwzOB2LSHVegJHv7DDWminTg4wzLLsf+LQ8nZ -bpjfn5vgIzxCuRh4Rp9QYM5FhfrJX9wcYawP/HTbJ7p7LVQO2QYAP+akMTHxgKuM -BzVV++3wWToKfVZUjFX8nfTfGMGwWAHJDnlEGnU4tl9UujoCV4ENJtzFoQIDAQAB -AoGAJ+6hpzNr24yTQZtFWQpDpEyFplddKJMOxDya3S9ppK3vTWrIITV2xNcucw7I -ceTbdyrGsyjsU0/HdCcIf9ym2jfmGLUwmyhltKVw0QYcFB0XLkc0nI5YvEYoeVDg -omZIXn1E3EW+sSIWSbkMu9bY2kstKXR2UZmMgWDtmBEPMaECQQD6yT4TAZM5hGBb -ciBKgMUP6PwOhPhOMPIvijO50Aiu6iuCV88l1QIy38gWVhxjNrq6P346j4IBg+kB -9alwpCODAkEA5nSnm9k6ykYeQWNS0fNWiRinCdl23A7usDGSuKKlm019iomJ/Rgd -MKDOp0q/2OostbteOWM2MRFf4jMH3wyVCwJAfAdjJ8szoNKTRSagSbh9vWygnB2v -IByc6l4TTuZQJRGzCveafz9lovuB3WohCABdQRd9ukCXL2CpsEpqzkafOQJAJUjc -USedDlq3zGZwYM1Yw8d8RuirBUFZNqJelYai+nRYClDkRVFgb5yksoYycbq5TxGo -VeqKOvgPpj4RWPHlLwJAGUMk3bqT91xBUCnLRs/vfoCpHpg6eywQTBDAV6xkyz4a -RH3I7/+yj3ZxR2JoWHgUwZ7lZk1VnhffFye7SBXyag== ------END RSA PRIVATE KEY----- diff --git a/Godeps/_workspace/src/google.golang.org/grpc/interop/server/testdata/server1.pem b/Godeps/_workspace/src/google.golang.org/grpc/interop/server/testdata/server1.pem deleted file mode 100644 index 8e582e571f..0000000000 --- a/Godeps/_workspace/src/google.golang.org/grpc/interop/server/testdata/server1.pem +++ /dev/null @@ -1,16 +0,0 @@ ------BEGIN CERTIFICATE----- -MIICmzCCAgSgAwIBAgIBAzANBgkqhkiG9w0BAQUFADBWMQswCQYDVQQGEwJBVTET -MBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0cyBQ -dHkgTHRkMQ8wDQYDVQQDDAZ0ZXN0Y2EwHhcNMTQwNzIyMDYwMDU3WhcNMjQwNzE5 -MDYwMDU3WjBkMQswCQYDVQQGEwJVUzERMA8GA1UECBMISWxsaW5vaXMxEDAOBgNV -BAcTB0NoaWNhZ28xFDASBgNVBAoTC0dvb2dsZSBJbmMuMRowGAYDVQQDFBEqLnRl -c3QuZ29vZ2xlLmNvbTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA4cMVJygs -JUmlgMMzgdi0h1XoCR7+ww1pop04OMMyy7H/i0PJ2W6Y35+b4CM8QrkYeEafUGDO -RYX6yV/cHGGsD/x02ye6ey1UDtkGAD/mpDEx8YCrjAc1Vfvt8Fk6Cn1WVIxV/J30 -3xjBsFgByQ55RBp1OLZfVLo6AleBDSbcxaECAwEAAaNrMGkwCQYDVR0TBAIwADAL -BgNVHQ8EBAMCBeAwTwYDVR0RBEgwRoIQKi50ZXN0Lmdvb2dsZS5mcoIYd2F0ZXJ6 -b29pLnRlc3QuZ29vZ2xlLmJlghIqLnRlc3QueW91dHViZS5jb22HBMCoAQMwDQYJ -KoZIhvcNAQEFBQADgYEAM2Ii0LgTGbJ1j4oqX9bxVcxm+/R5Yf8oi0aZqTJlnLYS -wXcBykxTx181s7WyfJ49WwrYXo78zTDAnf1ma0fPq3e4mpspvyndLh1a+OarHa1e -aT0DIIYk7qeEa1YcVljx2KyLd0r1BBAfrwyGaEPVeJQVYWaOJRU2we/KD4ojf9s= ------END CERTIFICATE----- diff --git a/Godeps/_workspace/src/google.golang.org/grpc/metadata/metadata.go b/Godeps/_workspace/src/google.golang.org/grpc/metadata/metadata.go index aa12b1d573..5f26abaeac 100644 --- a/Godeps/_workspace/src/google.golang.org/grpc/metadata/metadata.go +++ b/Godeps/_workspace/src/google.golang.org/grpc/metadata/metadata.go @@ -64,7 +64,7 @@ func encodeKeyValue(k, v string) (string, string) { if isASCII(v) { return k, v } - key := k + binHdrSuffix + key := strings.ToLower(k + binHdrSuffix) val := base64.StdEncoding.EncodeToString([]byte(v)) return key, string(val) } @@ -85,14 +85,14 @@ func DecodeKeyValue(k, v string) (string, string, error) { // MD is a mapping from metadata keys to values. Users should use the following // two convenience functions New and Pairs to generate MD. -type MD map[string]string +type MD map[string][]string // New creates a MD from given key-value map. func New(m map[string]string) MD { md := MD{} for k, v := range m { key, val := encodeKeyValue(k, v) - md[key] = val + md[key] = append(md[key], val) } return md } @@ -111,7 +111,7 @@ func Pairs(kv ...string) MD { continue } key, val := encodeKeyValue(k, s) - md[key] = val + md[key] = append(md[key], val) } return md } @@ -125,7 +125,9 @@ func (md MD) Len() int { func (md MD) Copy() MD { out := MD{} for k, v := range md { - out[k] = v + for _, i := range v { + out[k] = append(out[k], i) + } } return out } diff --git a/Godeps/_workspace/src/google.golang.org/grpc/naming/etcd/etcd.go b/Godeps/_workspace/src/google.golang.org/grpc/naming/etcd/etcd.go new file mode 100644 index 0000000000..362649da43 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/naming/etcd/etcd.go @@ -0,0 +1,158 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package etcd + +import ( + etcdcl "github.com/coreos/etcd/client" + "golang.org/x/net/context" + "google.golang.org/grpc/naming" +) +// update defines an etcd key-value update. +type update struct { + key, val string +} + +// getNode reports the set of changes starting from node recursively. +func getNode(node *etcdcl.Node) (updates []*update) { + for _, v := range node.Nodes { + updates = append(updates, getNode(v)...) + } + if !node.Dir { + u := &update{ + key: node.Key, + val: node.Value, + } + updates = []*update{u} + } + return +} + +type watcher struct { + wr etcdcl.Watcher + ctx context.Context + cancel context.CancelFunc + kv map[string]string +} + +func (w *watcher) Next() (nu []*naming.Update, _ error) { + for { + resp, err := w.wr.Next(w.ctx) + if err != nil { + return nil, err + } + updates := getNode(resp.Node) + for _, u := range updates { + switch resp.Action { + case "set": + if resp.PrevNode == nil { + w.kv[u.key] = u.val + nu = append(nu, &naming.Update{ + Op: naming.Add, + Addr: u.val, + }) + } else { + nu = append(nu, &naming.Update{ + Op: naming.Delete, + Addr: w.kv[u.key], + }) + nu = append(nu, &naming.Update{ + Op: naming.Add, + Addr: u.val, + }) + w.kv[u.key] = u.val + } + case "delete": + nu = append(nu, &naming.Update{ + Op: naming.Delete, + Addr: w.kv[u.key], + }) + delete(w.kv, u.key) + } + } + if len(nu) > 0 { + break + } + } + return nu, nil +} + +func (w *watcher) Stop() { + w.cancel() +} + +type resolver struct { + kapi etcdcl.KeysAPI + kv map[string]string +} + +func (r *resolver) NewWatcher(target string) naming.Watcher { + ctx, cancel := context.WithCancel(context.Background()) + w := &watcher{ + wr: r.kapi.Watcher(target, &etcdcl.WatcherOptions{Recursive: true}), + ctx: ctx, + cancel: cancel, + } + for k, v := range r.kv { + w.kv[k] = v + } + return w +} + +func (r *resolver) Resolve(target string) (nu []*naming.Update, _ error) { + resp, err := r.kapi.Get(context.Background(), target, &etcdcl.GetOptions{Recursive: true}) + if err != nil { + return nil, err + } + updates := getNode(resp.Node) + for _, u := range updates { + r.kv[u.key] = u.val + nu = append(nu, &naming.Update{ + Op: naming.Add, + Addr: u.val, + }) + } + return nu, nil +} + +// NewResolver creates an etcd-based naming.Resolver. +func NewResolver(cfg etcdcl.Config) (naming.Resolver, error) { + c, err := etcdcl.New(cfg) + if err != nil { + return nil, err + } + return &resolver{ + kapi: etcdcl.NewKeysAPI(c), + kv: make(map[string]string), + }, nil +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/transport/http_util_test.go b/Godeps/_workspace/src/google.golang.org/grpc/naming/naming.go similarity index 54% rename from Godeps/_workspace/src/google.golang.org/grpc/transport/http_util_test.go rename to Godeps/_workspace/src/google.golang.org/grpc/naming/naming.go index b5b18bf439..610eb8112c 100644 --- a/Godeps/_workspace/src/google.golang.org/grpc/transport/http_util_test.go +++ b/Godeps/_workspace/src/google.golang.org/grpc/naming/naming.go @@ -31,57 +31,44 @@ * */ -package transport +// Package naming defines the naming API and related data structures for gRPC. +// The interface is EXPERIMENTAL and may be suject to change. +package naming -import ( - "fmt" - "testing" - "time" +// OP defines the corresponding operations for a name resolution change. +type OP uint8 + +const ( + // Add indicates a new address is added. + Add = iota + // Delete indicates an exisiting address is deleted. + Delete ) -func TestTimeoutEncode(t *testing.T) { - for _, test := range []struct { - in string - out string - }{ - {"12345678ns", "12345678n"}, - {"123456789ns", "123457u"}, - {"12345678us", "12345678u"}, - {"123456789us", "123457m"}, - {"12345678ms", "12345678m"}, - {"123456789ms", "123457S"}, - {"12345678s", "12345678S"}, - {"123456789s", "2057614M"}, - {"12345678m", "12345678M"}, - {"123456789m", "2057614H"}, - } { - d, err := time.ParseDuration(test.in) - if err != nil { - t.Fatalf("failed to parse duration string %s: %v", test.in, err) - } - out := timeoutEncode(d) - if out != test.out { - t.Fatalf("timeoutEncode(%s) = %s, want %s", test.in, out, test.out) - } - } +type ServiceConfig interface{} + +// Update defines a name resolution change. +type Update struct { + // Op indicates the operation of the update. + Op OP + Addr string + Config ServiceConfig } -func TestTimeoutDecode(t *testing.T) { - for _, test := range []struct { - // input - s string - // output - d time.Duration - err error - }{ - {"1234S", time.Second * 1234, nil}, - {"1234x", 0, fmt.Errorf("transport: timeout unit is not recognized: %q", "1234x")}, - {"1", 0, fmt.Errorf("transport: timeout string is too short: %q", "1")}, - {"", 0, fmt.Errorf("transport: timeout string is too short: %q", "")}, - } { - d, err := timeoutDecode(test.s) - if d != test.d || fmt.Sprint(err) != fmt.Sprint(test.err) { - t.Fatalf("timeoutDecode(%q) = %d, %v, want %d, %v", test.s, int64(d), err, int64(test.d), test.err) - } - } +// Resolver does one-shot name resolution and creates a Watcher to +// watch the future updates. +type Resolver interface { + // Resolve returns the name resolution results. + Resolve(target string) ([]*Update, error) + // NewWatcher creates a Watcher to watch the changes on target. + NewWatcher(target string) Watcher +} + +// Watcher watches the updates for a particular target. +type Watcher interface { + // Next blocks until an update or error happens. It may return one or more + // updates. + Next() ([]*Update, error) + // Stop stops the Watcher. + Stop() } diff --git a/Godeps/_workspace/src/google.golang.org/grpc/picker.go b/Godeps/_workspace/src/google.golang.org/grpc/picker.go new file mode 100644 index 0000000000..79f988688b --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/picker.go @@ -0,0 +1,90 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package grpc + +import ( + "time" +) + +// Picker picks a Conn for RPC requests. +// This is EXPERIMENTAL and Please do not implement your own Picker for now. +type Picker interface { + // Init does initial processing for the Picker, e.g., initiate some connections. + Init(cc *ClientConn) error + // Pick returns the Conn to use for the upcoming RPC. It may return different + // Conn's up to the implementation. + Pick() (*Conn, error) + // State returns the connectivity state of the underlying connections. + State() ConnectivityState + // WaitForStateChange blocks until the state changes to something other than + // the sourceState or timeout fires on cc. It returns false if timeout fires, + // and true otherwise. + WaitForStateChange(timeout time.Duration, sourceState ConnectivityState) bool + // Close closes all the Conn's owned by this Picker. + Close() error +} + +// unicastPicker is the default Picker which is used when there is no custom Picker +// specified by users. It always picks the same Conn. +type unicastPicker struct { + conn *Conn +} + +func (p *unicastPicker) Init(cc *ClientConn) error { + c, err := NewConn(cc) + if err != nil { + return err + } + p.conn = c + return nil +} + +func (p *unicastPicker) Pick() (*Conn, error) { + return p.conn, nil +} + +func (p *unicastPicker) State() ConnectivityState { + return p.conn.State() +} + +func (p *unicastPicker) WaitForStateChange(timeout time.Duration, sourceState ConnectivityState) bool { + return p.conn.WaitForStateChange(timeout, sourceState) +} + +func (p *unicastPicker) Close() error { + if p.conn != nil { + return p.conn.Close() + } + return nil +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/rpc_util.go b/Godeps/_workspace/src/google.golang.org/grpc/rpc_util.go index 49512603bb..21a4a15a77 100644 --- a/Godeps/_workspace/src/google.golang.org/grpc/rpc_util.go +++ b/Godeps/_workspace/src/google.golang.org/grpc/rpc_util.go @@ -217,6 +217,18 @@ func Code(err error) codes.Code { return codes.Unknown } +// ErrorDesc returns the error description of err if it was produced by the rpc system. +// Otherwise, it returns err.Error() or empty string when err is nil. +func ErrorDesc(err error) string { + if err == nil { + return "" + } + if e, ok := err.(rpcError); ok { + return e.desc + } + return err.Error() +} + // Errorf returns an error containing an error code and a description; // Errorf returns nil if c is OK. func Errorf(c codes.Code, format string, a ...interface{}) error { @@ -277,28 +289,29 @@ func convertCode(err error) codes.Code { const ( // how long to wait after the first failure before retrying baseDelay = 1.0 * time.Second - // upper bound on backoff delay - maxDelay = 120 * time.Second - backoffFactor = 2.0 // backoff increases by this factor on each retry - backoffRange = 0.4 // backoff is randomized downwards by this factor + // upper bound of backoff delay + maxDelay = 120 * time.Second + // backoff increases by this factor on each retry + backoffFactor = 1.6 + // backoff is randomized downwards by this factor + backoffJitter = 0.2 ) -// backoff returns a value in [0, maxDelay] that increases exponentially with -// retries, starting from baseDelay. -func backoff(retries int) time.Duration { +func backoff(retries int) (t time.Duration) { + if retries == 0 { + return baseDelay + } backoff, max := float64(baseDelay), float64(maxDelay) for backoff < max && retries > 0 { - backoff = backoff * backoffFactor + backoff *= backoffFactor retries-- } if backoff > max { backoff = max } - // Randomize backoff delays so that if a cluster of requests start at - // the same time, they won't operate in lockstep. We just subtract up - // to 40% so that we obey maxDelay. - backoff -= backoff * backoffRange * rand.Float64() + // the same time, they won't operate in lockstep. + backoff *= 1 + backoffJitter*(rand.Float64()*2-1) if backoff < 0 { return 0 } diff --git a/Godeps/_workspace/src/google.golang.org/grpc/rpc_util_test.go b/Godeps/_workspace/src/google.golang.org/grpc/rpc_util_test.go deleted file mode 100644 index 56cac3301e..0000000000 --- a/Godeps/_workspace/src/google.golang.org/grpc/rpc_util_test.go +++ /dev/null @@ -1,211 +0,0 @@ -/* - * - * Copyright 2014, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -package grpc - -import ( - "bytes" - "io" - "math" - "reflect" - "testing" - "time" - - "github.com/golang/protobuf/proto" - "golang.org/x/net/context" - "google.golang.org/grpc/codes" - perfpb "google.golang.org/grpc/test/codec_perf" - "google.golang.org/grpc/transport" -) - -func TestSimpleParsing(t *testing.T) { - for _, test := range []struct { - // input - p []byte - // outputs - err error - b []byte - pt payloadFormat - }{ - {nil, io.EOF, nil, compressionNone}, - {[]byte{0, 0, 0, 0, 0}, nil, nil, compressionNone}, - {[]byte{0, 0, 0, 0, 1, 'a'}, nil, []byte{'a'}, compressionNone}, - {[]byte{1, 0}, io.ErrUnexpectedEOF, nil, compressionNone}, - {[]byte{0, 0, 0, 0, 10, 'a'}, io.ErrUnexpectedEOF, nil, compressionNone}, - } { - buf := bytes.NewReader(test.p) - parser := &parser{buf} - pt, b, err := parser.recvMsg() - if err != test.err || !bytes.Equal(b, test.b) || pt != test.pt { - t.Fatalf("parser{%v}.recvMsg() = %v, %v, %v\nwant %v, %v, %v", test.p, pt, b, err, test.pt, test.b, test.err) - } - } -} - -func TestMultipleParsing(t *testing.T) { - // Set a byte stream consists of 3 messages with their headers. - p := []byte{0, 0, 0, 0, 1, 'a', 0, 0, 0, 0, 2, 'b', 'c', 0, 0, 0, 0, 1, 'd'} - b := bytes.NewReader(p) - parser := &parser{b} - - wantRecvs := []struct { - pt payloadFormat - data []byte - }{ - {compressionNone, []byte("a")}, - {compressionNone, []byte("bc")}, - {compressionNone, []byte("d")}, - } - for i, want := range wantRecvs { - pt, data, err := parser.recvMsg() - if err != nil || pt != want.pt || !reflect.DeepEqual(data, want.data) { - t.Fatalf("after %d calls, parser{%v}.recvMsg() = %v, %v, %v\nwant %v, %v, ", - i, p, pt, data, err, want.pt, want.data) - } - } - - pt, data, err := parser.recvMsg() - if err != io.EOF { - t.Fatalf("after %d recvMsgs calls, parser{%v}.recvMsg() = %v, %v, %v\nwant _, _, %v", - len(wantRecvs), p, pt, data, err, io.EOF) - } -} - -func TestEncode(t *testing.T) { - for _, test := range []struct { - // input - msg proto.Message - pt payloadFormat - // outputs - b []byte - err error - }{ - {nil, compressionNone, []byte{0, 0, 0, 0, 0}, nil}, - } { - b, err := encode(protoCodec{}, test.msg, test.pt) - if err != test.err || !bytes.Equal(b, test.b) { - t.Fatalf("encode(_, _, %d) = %v, %v\nwant %v, %v", test.pt, b, err, test.b, test.err) - } - } -} - -func TestToRPCErr(t *testing.T) { - for _, test := range []struct { - // input - errIn error - // outputs - errOut error - }{ - {transport.StreamErrorf(codes.Unknown, ""), Errorf(codes.Unknown, "")}, - {transport.ErrConnClosing, Errorf(codes.Internal, transport.ErrConnClosing.Desc)}, - } { - err := toRPCErr(test.errIn) - if err != test.errOut { - t.Fatalf("toRPCErr{%v} = %v \nwant %v", test.errIn, err, test.errOut) - } - } -} - -func TestContextErr(t *testing.T) { - for _, test := range []struct { - // input - errIn error - // outputs - errOut transport.StreamError - }{ - {context.DeadlineExceeded, transport.StreamErrorf(codes.DeadlineExceeded, "%v", context.DeadlineExceeded)}, - {context.Canceled, transport.StreamErrorf(codes.Canceled, "%v", context.Canceled)}, - } { - err := transport.ContextErr(test.errIn) - if err != test.errOut { - t.Fatalf("ContextErr{%v} = %v \nwant %v", test.errIn, err, test.errOut) - } - } -} - -func TestBackoff(t *testing.T) { - for _, test := range []struct { - retries int - maxResult time.Duration - }{ - {0, time.Second}, - {1, time.Duration(1e9 * math.Pow(backoffFactor, 1))}, - {2, time.Duration(1e9 * math.Pow(backoffFactor, 2))}, - {3, time.Duration(1e9 * math.Pow(backoffFactor, 3))}, - {4, time.Duration(1e9 * math.Pow(backoffFactor, 4))}, - {int(math.Log2(float64(maxDelay)/float64(baseDelay))) + 1, maxDelay}, - } { - delay := backoff(test.retries) - if delay < 0 || delay > test.maxResult { - t.Errorf("backoff(%d) = %v outside [0, %v]", test.retries, delay, test.maxResult) - } - } -} - -// bmEncode benchmarks encoding a Protocol Buffer message containing mSize -// bytes. -func bmEncode(b *testing.B, mSize int) { - msg := &perfpb.Buffer{Body: make([]byte, mSize)} - encoded, _ := encode(protoCodec{}, msg, compressionNone) - encodedSz := int64(len(encoded)) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - encode(protoCodec{}, msg, compressionNone) - } - b.SetBytes(encodedSz) -} - -func BenchmarkEncode1B(b *testing.B) { - bmEncode(b, 1) -} - -func BenchmarkEncode1KiB(b *testing.B) { - bmEncode(b, 1024) -} - -func BenchmarkEncode8KiB(b *testing.B) { - bmEncode(b, 8*1024) -} - -func BenchmarkEncode64KiB(b *testing.B) { - bmEncode(b, 64*1024) -} - -func BenchmarkEncode512KiB(b *testing.B) { - bmEncode(b, 512*1024) -} - -func BenchmarkEncode1MiB(b *testing.B) { - bmEncode(b, 1024*1024) -} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/server.go b/Godeps/_workspace/src/google.golang.org/grpc/server.go index 79f17f3d7e..a5c8dc3539 100644 --- a/Godeps/_workspace/src/google.golang.org/grpc/server.go +++ b/Godeps/_workspace/src/google.golang.org/grpc/server.go @@ -37,19 +37,23 @@ import ( "errors" "fmt" "io" - "log" "net" "reflect" + "runtime" "strings" "sync" + "time" "golang.org/x/net/context" + "golang.org/x/net/trace" "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" "google.golang.org/grpc/metadata" "google.golang.org/grpc/transport" ) -type methodHandler func(srv interface{}, ctx context.Context, codec Codec, buf []byte) (interface{}, error) +type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) // MethodDesc represents an RPC service's method specification. type MethodDesc struct { @@ -77,14 +81,16 @@ type service struct { // Server is a gRPC server to serve RPC requests. type Server struct { - opts options - mu sync.Mutex - lis map[net.Listener]bool - conns map[transport.ServerTransport]bool - m map[string]*service // service name -> service info + opts options + mu sync.Mutex + lis map[net.Listener]bool + conns map[transport.ServerTransport]bool + m map[string]*service // service name -> service info + events trace.EventLog } type options struct { + creds credentials.Credentials codec Codec maxConcurrentStreams uint32 } @@ -92,13 +98,14 @@ type options struct { // A ServerOption sets options. type ServerOption func(*options) +// CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling. func CustomCodec(codec Codec) ServerOption { return func(o *options) { o.codec = codec } } -// MaxConcurrentStreams returns an Option that will apply a limit on the number +// MaxConcurrentStreams returns a ServerOption that will apply a limit on the number // of concurrent streams to each ServerTransport. func MaxConcurrentStreams(n uint32) ServerOption { return func(o *options) { @@ -106,6 +113,13 @@ func MaxConcurrentStreams(n uint32) ServerOption { } } +// Creds returns a ServerOption that sets credentials for server connections. +func Creds(c credentials.Credentials) ServerOption { + return func(o *options) { + o.creds = c + } +} + // NewServer creates a gRPC server which has no service registered and has not // started to accept requests yet. func NewServer(opt ...ServerOption) *Server { @@ -117,28 +131,53 @@ func NewServer(opt ...ServerOption) *Server { // Set the default codec. opts.codec = protoCodec{} } - return &Server{ + s := &Server{ lis: make(map[net.Listener]bool), opts: opts, conns: make(map[transport.ServerTransport]bool), m: make(map[string]*service), } + if EnableTracing { + _, file, line, _ := runtime.Caller(1) + s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line)) + } + return s +} + +// printf records an event in s's event log, unless s has been stopped. +// REQUIRES s.mu is held. +func (s *Server) printf(format string, a ...interface{}) { + if s.events != nil { + s.events.Printf(format, a...) + } +} + +// errorf records an error in s's event log, unless s has been stopped. +// REQUIRES s.mu is held. +func (s *Server) errorf(format string, a ...interface{}) { + if s.events != nil { + s.events.Errorf(format, a...) + } } // RegisterService register a service and its implementation to the gRPC // server. Called from the IDL generated code. This must be called before // invoking Serve. func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) { - s.mu.Lock() - defer s.mu.Unlock() - // Does some sanity checks. - if _, ok := s.m[sd.ServiceName]; ok { - log.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName) - } ht := reflect.TypeOf(sd.HandlerType).Elem() st := reflect.TypeOf(ss) if !st.Implements(ht) { - log.Fatalf("grpc: Server.RegisterService found the handler of type %v that does not satisfy %v", st, ht) + grpclog.Fatalf("grpc: Server.RegisterService found the handler of type %v that does not satisfy %v", st, ht) + } + s.register(sd, ss) +} + +func (s *Server) register(sd *ServiceDesc, ss interface{}) { + s.mu.Lock() + defer s.mu.Unlock() + s.printf("RegisterService(%q)", sd.ServiceName) + if _, ok := s.m[sd.ServiceName]; ok { + grpclog.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName) } srv := &service{ server: ss, @@ -168,6 +207,7 @@ var ( // Service returns when lis.Accept fails. func (s *Server) Serve(lis net.Listener) error { s.mu.Lock() + s.printf("serving") if s.lis == nil { s.mu.Unlock() return ErrServerStopped @@ -183,20 +223,36 @@ func (s *Server) Serve(lis net.Listener) error { for { c, err := lis.Accept() if err != nil { + s.mu.Lock() + s.printf("done serving; Accept = %v", err) + s.mu.Unlock() return err } - + var authInfo credentials.AuthInfo + if creds, ok := s.opts.creds.(credentials.TransportAuthenticator); ok { + var conn net.Conn + conn, authInfo, err = creds.ServerHandshake(c) + if err != nil { + s.mu.Lock() + s.errorf("ServerHandshake(%q) failed: %v", c.RemoteAddr(), err) + s.mu.Unlock() + grpclog.Println("grpc: Server.Serve failed to complete security handshake.") + continue + } + c = conn + } s.mu.Lock() if s.conns == nil { s.mu.Unlock() c.Close() return nil } - st, err := transport.NewServerTransport("http2", c, s.opts.maxConcurrentStreams) + st, err := transport.NewServerTransport("http2", c, s.opts.maxConcurrentStreams, authInfo, EnableTracing) if err != nil { + s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err) s.mu.Unlock() c.Close() - log.Println("grpc: Server.Serve failed to create ServerTransport: ", err) + grpclog.Println("grpc: Server.Serve failed to create ServerTransport: ", err) continue } s.conns[st] = true @@ -223,18 +279,35 @@ func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Str // TODO(zhaoq): There exist other options also such as only closing the // faulty stream locally and remotely (Other streams can keep going). Find // the optimal option. - log.Fatalf("grpc: Server failed to encode response %v", err) + grpclog.Fatalf("grpc: Server failed to encode response %v", err) } return t.Write(stream, p, opts) } -func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc) { +func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc) (err error) { + var traceInfo traceInfo + if EnableTracing { + traceInfo.tr = stream.Trace() + defer traceInfo.tr.Finish() + traceInfo.firstLine.client = false + traceInfo.firstLine.remoteAddr = t.RemoteAddr() + if dl, ok := stream.Context().Deadline(); ok { + traceInfo.firstLine.deadline = dl.Sub(time.Now()) + } + traceInfo.tr.LazyLog(&traceInfo.firstLine, false) + defer func() { + if err != nil && err != io.EOF { + traceInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + traceInfo.tr.SetError() + } + }() + } p := &parser{s: stream} for { pf, req, err := p.recvMsg() if err == io.EOF { // The entire stream is done (for unary RPC only). - return + return err } if err != nil { switch err := err.(type) { @@ -242,18 +315,27 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. // Nothing to do here. case transport.StreamError: if err := t.WriteStatus(stream, err.Code, err.Desc); err != nil { - log.Printf("grpc: Server.processUnaryRPC failed to write status: %v", err) + grpclog.Printf("grpc: Server.processUnaryRPC failed to write status: %v", err) } default: panic(fmt.Sprintf("grpc: Unexpected error (%T) from recvMsg: %v", err, err)) } - return + return err } switch pf { case compressionNone: statusCode := codes.OK statusDesc := "" - reply, appErr := md.Handler(srv.server, stream.Context(), s.opts.codec, req) + df := func(v interface{}) error { + if err := s.opts.codec.Unmarshal(req, v); err != nil { + return err + } + if traceInfo.tr != nil { + traceInfo.tr.LazyLog(&payload{sent: false, msg: v}, true) + } + return nil + } + reply, appErr := md.Handler(srv.server, stream.Context(), df) if appErr != nil { if err, ok := appErr.(rpcError); ok { statusCode = err.code @@ -262,40 +344,73 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. statusCode = convertCode(appErr) statusDesc = appErr.Error() } - if err := t.WriteStatus(stream, statusCode, statusDesc); err != nil { - log.Printf("grpc: Server.processUnaryRPC failed to write status: %v", err) + if traceInfo.tr != nil && statusCode != codes.OK { + traceInfo.tr.LazyLog(stringer(statusDesc), true) + traceInfo.tr.SetError() } - return + + if err := t.WriteStatus(stream, statusCode, statusDesc); err != nil { + grpclog.Printf("grpc: Server.processUnaryRPC failed to write status: %v", err) + return err + } + return nil + } + if traceInfo.tr != nil { + traceInfo.tr.LazyLog(stringer("OK"), false) } opts := &transport.Options{ Last: true, Delay: false, } if err := s.sendResponse(t, stream, reply, compressionNone, opts); err != nil { - if _, ok := err.(transport.ConnectionError); ok { - return - } - if e, ok := err.(transport.StreamError); ok { - statusCode = e.Code - statusDesc = e.Desc - } else { + switch err := err.(type) { + case transport.ConnectionError: + // Nothing to do here. + case transport.StreamError: + statusCode = err.Code + statusDesc = err.Desc + default: statusCode = codes.Unknown statusDesc = err.Error() } + return err } - t.WriteStatus(stream, statusCode, statusDesc) + if traceInfo.tr != nil { + traceInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true) + } + return t.WriteStatus(stream, statusCode, statusDesc) default: panic(fmt.Sprintf("payload format to be supported: %d", pf)) } } } -func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc) { +func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc) (err error) { ss := &serverStream{ - t: t, - s: stream, - p: &parser{s: stream}, - codec: s.opts.codec, + t: t, + s: stream, + p: &parser{s: stream}, + codec: s.opts.codec, + tracing: EnableTracing, + } + if ss.tracing { + ss.traceInfo.tr = stream.Trace() + ss.traceInfo.firstLine.client = false + ss.traceInfo.firstLine.remoteAddr = t.RemoteAddr() + if dl, ok := stream.Context().Deadline(); ok { + ss.traceInfo.firstLine.deadline = dl.Sub(time.Now()) + } + ss.traceInfo.tr.LazyLog(&ss.traceInfo.firstLine, false) + defer func() { + ss.mu.Lock() + if err != nil && err != io.EOF { + ss.traceInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.traceInfo.tr.SetError() + } + ss.traceInfo.tr.Finish() + ss.traceInfo.tr = nil + ss.mu.Unlock() + }() } if appErr := sd.Handler(srv.server, ss); appErr != nil { if err, ok := appErr.(rpcError); ok { @@ -306,7 +421,18 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp ss.statusDesc = appErr.Error() } } - t.WriteStatus(ss.s, ss.statusCode, ss.statusDesc) + if ss.tracing { + ss.mu.Lock() + if ss.statusCode != codes.OK { + ss.traceInfo.tr.LazyLog(stringer(ss.statusDesc), true) + ss.traceInfo.tr.SetError() + } else { + ss.traceInfo.tr.LazyLog(stringer("OK"), false) + } + ss.mu.Unlock() + } + return t.WriteStatus(ss.s, ss.statusCode, ss.statusDesc) + } func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream) { @@ -317,7 +443,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str pos := strings.LastIndex(sm, "/") if pos == -1 { if err := t.WriteStatus(stream, codes.InvalidArgument, fmt.Sprintf("malformed method name: %q", stream.Method())); err != nil { - log.Printf("grpc: Server.handleStream failed to write status: %v", err) + grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err) } return } @@ -326,7 +452,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str srv, ok := s.m[service] if !ok { if err := t.WriteStatus(stream, codes.Unimplemented, fmt.Sprintf("unknown service %v", service)); err != nil { - log.Printf("grpc: Server.handleStream failed to write status: %v", err) + grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err) } return } @@ -340,7 +466,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str return } if err := t.WriteStatus(stream, codes.Unimplemented, fmt.Sprintf("unknown method %v", method)); err != nil { - log.Printf("grpc: Server.handleStream failed to write status: %v", err) + grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err) } } @@ -359,6 +485,12 @@ func (s *Server) Stop() { for c := range cs { c.Close() } + s.mu.Lock() + if s.events != nil { + s.events.Finish() + s.events = nil + } + s.mu.Unlock() } // TestingCloseConns closes all exiting transports but keeps s.lis accepting new @@ -384,7 +516,7 @@ func SendHeader(ctx context.Context, md metadata.MD) error { } t := stream.ServerTransport() if t == nil { - log.Fatalf("grpc: SendHeader: %v has no ServerTransport to send header metadata.", stream) + grpclog.Fatalf("grpc: SendHeader: %v has no ServerTransport to send header metadata.", stream) } return t.WriteHeader(stream, md) } diff --git a/Godeps/_workspace/src/google.golang.org/grpc/stream.go b/Godeps/_workspace/src/google.golang.org/grpc/stream.go index 43fdcbecb5..28938a3c31 100644 --- a/Godeps/_workspace/src/google.golang.org/grpc/stream.go +++ b/Godeps/_workspace/src/google.golang.org/grpc/stream.go @@ -36,8 +36,11 @@ package grpc import ( "errors" "io" + "sync" + "time" "golang.org/x/net/context" + "golang.org/x/net/trace" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" "google.golang.org/grpc/transport" @@ -93,26 +96,52 @@ type ClientStream interface { // NewClientStream creates a new Stream for the client side. This is called // by generated code. func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { + var ( + conn *Conn + t transport.ClientTransport + err error + ) + for { + conn, err = cc.dopts.picker.Pick() + if err != nil { + return nil, toRPCErr(err) + } + t, err = conn.Wait(ctx) + if err != nil { + if err == ErrTransientFailure { + continue + } + return nil, toRPCErr(err) + } + break + } // TODO(zhaoq): CallOption is omitted. Add support when it is needed. callHdr := &transport.CallHdr{ - Host: cc.authority, + Host: conn.authority, Method: method, } - t, _, err := cc.wait(ctx, 0) - if err != nil { - return nil, toRPCErr(err) + cs := &clientStream{ + desc: desc, + codec: conn.dopts.codec, + tracing: EnableTracing, + } + if cs.tracing { + cs.traceInfo.tr = trace.New("grpc.Sent."+transport.MethodFamily(method), method) + cs.traceInfo.firstLine.client = true + if deadline, ok := ctx.Deadline(); ok { + cs.traceInfo.firstLine.deadline = deadline.Sub(time.Now()) + } + cs.traceInfo.tr.LazyLog(&cs.traceInfo.firstLine, false) + ctx = trace.NewContext(ctx, cs.traceInfo.tr) } s, err := t.NewStream(ctx, callHdr) if err != nil { return nil, toRPCErr(err) } - return &clientStream{ - t: t, - s: s, - p: &parser{s: s}, - desc: desc, - codec: cc.dopts.codec, - }, nil + cs.t = t + cs.s = s + cs.p = &parser{s: s} + return cs, nil } // clientStream implements a client side Stream. @@ -122,6 +151,13 @@ type clientStream struct { p *parser desc *StreamDesc codec Codec + + tracing bool // set to EnableTracing when the clientStream is created. + + mu sync.Mutex // protects traceInfo + // traceInfo.tr is set when the clientStream is created (if EnableTracing is true), + // and is set to nil when the clientStream's finish method is called. + traceInfo traceInfo } func (cs *clientStream) Context() context.Context { @@ -143,6 +179,13 @@ func (cs *clientStream) Trailer() metadata.MD { } func (cs *clientStream) SendMsg(m interface{}) (err error) { + if cs.tracing { + cs.mu.Lock() + if cs.traceInfo.tr != nil { + cs.traceInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) + } + cs.mu.Unlock() + } defer func() { if err == nil || err == io.EOF { return @@ -161,7 +204,20 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) { func (cs *clientStream) RecvMsg(m interface{}) (err error) { err = recv(cs.p, cs.codec, m) + defer func() { + // err != nil indicates the termination of the stream. + if err != nil { + cs.finish(err) + } + }() if err == nil { + if cs.tracing { + cs.mu.Lock() + if cs.traceInfo.tr != nil { + cs.traceInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) + } + cs.mu.Unlock() + } if !cs.desc.ClientStreams || cs.desc.ServerStreams { return } @@ -204,6 +260,24 @@ func (cs *clientStream) CloseSend() (err error) { return } +func (cs *clientStream) finish(err error) { + if !cs.tracing { + return + } + cs.mu.Lock() + defer cs.mu.Unlock() + if cs.traceInfo.tr != nil { + if err == nil || err == io.EOF { + cs.traceInfo.tr.LazyPrintf("RPC: [OK]") + } else { + cs.traceInfo.tr.LazyPrintf("RPC: [%v]", err) + cs.traceInfo.tr.SetError() + } + cs.traceInfo.tr.Finish() + cs.traceInfo.tr = nil + } +} + // ServerStream defines the interface a server stream has to satisfy. type ServerStream interface { // SendHeader sends the header metadata. It should not be called @@ -224,6 +298,13 @@ type serverStream struct { codec Codec statusCode codes.Code statusDesc string + + tracing bool // set to EnableTracing when the serverStream is created. + + mu sync.Mutex // protects traceInfo + // traceInfo.tr is set when the serverStream is created (if EnableTracing is true), + // and is set to nil when the serverStream's finish method is called. + traceInfo traceInfo } func (ss *serverStream) Context() context.Context { @@ -242,7 +323,19 @@ func (ss *serverStream) SetTrailer(md metadata.MD) { return } -func (ss *serverStream) SendMsg(m interface{}) error { +func (ss *serverStream) SendMsg(m interface{}) (err error) { + defer func() { + if ss.tracing { + ss.mu.Lock() + if err == nil { + ss.traceInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) + } else { + ss.traceInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.traceInfo.tr.SetError() + } + ss.mu.Unlock() + } + }() out, err := encode(ss.codec, m, compressionNone) if err != nil { err = transport.StreamErrorf(codes.Internal, "grpc: %v", err) @@ -251,6 +344,18 @@ func (ss *serverStream) SendMsg(m interface{}) error { return ss.t.Write(ss.s, out, &transport.Options{Last: false}) } -func (ss *serverStream) RecvMsg(m interface{}) error { +func (ss *serverStream) RecvMsg(m interface{}) (err error) { + defer func() { + if ss.tracing { + ss.mu.Lock() + if err == nil { + ss.traceInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) + } else if err != io.EOF { + ss.traceInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.traceInfo.tr.SetError() + } + ss.mu.Unlock() + } + }() return recv(ss.p, ss.codec, m) } diff --git a/Godeps/_workspace/src/google.golang.org/grpc/test/end2end_test.go b/Godeps/_workspace/src/google.golang.org/grpc/test/end2end_test.go deleted file mode 100644 index 3d41c6108f..0000000000 --- a/Godeps/_workspace/src/google.golang.org/grpc/test/end2end_test.go +++ /dev/null @@ -1,811 +0,0 @@ -/* - * - * Copyright 2014, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -package grpc_test - -import ( - "fmt" - "io" - "log" - "math" - "net" - "reflect" - "runtime" - "sync" - "syscall" - "testing" - "time" - - "github.com/golang/protobuf/proto" - "golang.org/x/net/context" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/metadata" - testpb "google.golang.org/grpc/test/grpc_testing" -) - -var ( - testMetadata = metadata.MD{ - "key1": "value1", - "key2": "value2", - } -) - -type testServer struct { -} - -func (s *testServer) EmptyCall(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { - if _, ok := metadata.FromContext(ctx); ok { - // For testing purpose, returns an error if there is attached metadata. - return nil, grpc.Errorf(codes.DataLoss, "got extra metadata") - } - return new(testpb.Empty), nil -} - -func newPayload(t testpb.PayloadType, size int32) *testpb.Payload { - if size < 0 { - log.Fatalf("Requested a response with invalid length %d", size) - } - body := make([]byte, size) - switch t { - case testpb.PayloadType_COMPRESSABLE: - case testpb.PayloadType_UNCOMPRESSABLE: - log.Fatalf("PayloadType UNCOMPRESSABLE is not supported") - default: - log.Fatalf("Unsupported payload type: %d", t) - } - return &testpb.Payload{ - Type: t.Enum(), - Body: body, - } -} - -func (s *testServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { - md, ok := metadata.FromContext(ctx) - if ok { - if err := grpc.SendHeader(ctx, md); err != nil { - log.Fatalf("grpc.SendHeader(%v, %v) = %v, want %v", ctx, md, err, nil) - } - grpc.SetTrailer(ctx, md) - } - // Simulate some service delay. - time.Sleep(2 * time.Millisecond) - return &testpb.SimpleResponse{ - Payload: newPayload(in.GetResponseType(), in.GetResponseSize()), - }, nil -} - -func (s *testServer) StreamingOutputCall(args *testpb.StreamingOutputCallRequest, stream testpb.TestService_StreamingOutputCallServer) error { - if _, ok := metadata.FromContext(stream.Context()); ok { - // For testing purpose, returns an error if there is attached metadata. - return grpc.Errorf(codes.DataLoss, "got extra metadata") - } - cs := args.GetResponseParameters() - for _, c := range cs { - if us := c.GetIntervalUs(); us > 0 { - time.Sleep(time.Duration(us) * time.Microsecond) - } - if err := stream.Send(&testpb.StreamingOutputCallResponse{ - Payload: newPayload(args.GetResponseType(), c.GetSize()), - }); err != nil { - return err - } - } - return nil -} - -func (s *testServer) StreamingInputCall(stream testpb.TestService_StreamingInputCallServer) error { - var sum int - for { - in, err := stream.Recv() - if err == io.EOF { - return stream.SendAndClose(&testpb.StreamingInputCallResponse{ - AggregatedPayloadSize: proto.Int32(int32(sum)), - }) - } - if err != nil { - return err - } - p := in.GetPayload().GetBody() - sum += len(p) - } -} - -func (s *testServer) FullDuplexCall(stream testpb.TestService_FullDuplexCallServer) error { - md, ok := metadata.FromContext(stream.Context()) - if ok { - if err := stream.SendHeader(md); err != nil { - log.Fatalf("%v.SendHeader(%v) = %v, want %v", stream, md, err, nil) - } - stream.SetTrailer(md) - } - for { - in, err := stream.Recv() - if err == io.EOF { - // read done. - return nil - } - if err != nil { - return err - } - cs := in.GetResponseParameters() - for _, c := range cs { - if us := c.GetIntervalUs(); us > 0 { - time.Sleep(time.Duration(us) * time.Microsecond) - } - if err := stream.Send(&testpb.StreamingOutputCallResponse{ - Payload: newPayload(in.GetResponseType(), c.GetSize()), - }); err != nil { - return err - } - } - } -} - -func (s *testServer) HalfDuplexCall(stream testpb.TestService_HalfDuplexCallServer) error { - msgBuf := make([]*testpb.StreamingOutputCallRequest, 0) - for { - in, err := stream.Recv() - if err == io.EOF { - // read done. - break - } - if err != nil { - return err - } - msgBuf = append(msgBuf, in) - } - for _, m := range msgBuf { - cs := m.GetResponseParameters() - for _, c := range cs { - if us := c.GetIntervalUs(); us > 0 { - time.Sleep(time.Duration(us) * time.Microsecond) - } - if err := stream.Send(&testpb.StreamingOutputCallResponse{ - Payload: newPayload(m.GetResponseType(), c.GetSize()), - }); err != nil { - return err - } - } - } - return nil -} - -const tlsDir = "testdata/" - -func TestDialTimeout(t *testing.T) { - conn, err := grpc.Dial("Non-Existent.Server:80", grpc.WithTimeout(time.Millisecond)) - if err == nil { - conn.Close() - } - if err != grpc.ErrClientConnTimeout { - t.Fatalf("grpc.Dial(_, _) = %v, %v, want %v", conn, err, grpc.ErrClientConnTimeout) - } -} - -func TestTLSDialTimeout(t *testing.T) { - creds, err := credentials.NewClientTLSFromFile(tlsDir+"ca.pem", "x.test.youtube.com") - if err != nil { - t.Fatalf("Failed to create credentials %v", err) - } - conn, err := grpc.Dial("Non-Existent.Server:80", grpc.WithTransportCredentials(creds), grpc.WithTimeout(time.Millisecond)) - if err == nil { - conn.Close() - } - if err != grpc.ErrClientConnTimeout { - t.Fatalf("grpc.Dial(_, _) = %v, %v, want %v", conn, err, grpc.ErrClientConnTimeout) - } -} - -func TestReconnectTimeout(t *testing.T) { - lis, err := net.Listen("tcp", ":0") - if err != nil { - t.Fatalf("Failed to listen: %v", err) - } - _, port, err := net.SplitHostPort(lis.Addr().String()) - if err != nil { - t.Fatalf("Failed to parse listener address: %v", err) - } - addr := "localhost:" + port - conn, err := grpc.Dial(addr, grpc.WithTimeout(5*time.Second)) - if err != nil { - t.Fatalf("Failed to dial to the server %q: %v", addr, err) - } - // Close unaccepted connection (i.e., conn). - lis.Close() - tc := testpb.NewTestServiceClient(conn) - waitC := make(chan struct{}) - go func() { - defer close(waitC) - argSize := 271828 - respSize := 314159 - req := &testpb.SimpleRequest{ - ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), - ResponseSize: proto.Int32(int32(respSize)), - Payload: newPayload(testpb.PayloadType_COMPRESSABLE, int32(argSize)), - } - if _, err := tc.UnaryCall(context.Background(), req); err == nil { - t.Fatalf("TestService/UnaryCall(_, _) = _, , want _, non-nil") - } - }() - // Block untill reconnect times out. - <-waitC - if err := conn.Close(); err != grpc.ErrClientConnClosing { - t.Fatalf("%v.Close() = %v, want %v", conn, err, grpc.ErrClientConnClosing) - } -} - -func unixDialer(addr string, timeout time.Duration) (net.Conn, error) { - return net.DialTimeout("unix", addr, timeout) -} - -type env struct { - network string // The type of network such as tcp, unix, etc. - dialer func(addr string, timeout time.Duration) (net.Conn, error) - security string // The security protocol such as TLS, SSH, etc. -} - -func listTestEnv() []env { - if runtime.GOOS == "windows" { - return []env{env{"tcp", nil, ""}, env{"tcp", nil, "tls"}} - } - return []env{env{"tcp", nil, ""}, env{"tcp", nil, "tls"}, env{"unix", unixDialer, ""}, env{"unix", unixDialer, "tls"}} -} - -func setUp(maxStream uint32, e env) (s *grpc.Server, cc *grpc.ClientConn) { - s = grpc.NewServer(grpc.MaxConcurrentStreams(maxStream)) - la := ":0" - switch e.network { - case "unix": - la = "/tmp/testsock" + fmt.Sprintf("%p", s) - syscall.Unlink(la) - } - lis, err := net.Listen(e.network, la) - if err != nil { - log.Fatalf("Failed to listen: %v", err) - } - testpb.RegisterTestServiceServer(s, &testServer{}) - if e.security == "tls" { - creds, err := credentials.NewServerTLSFromFile(tlsDir+"server1.pem", tlsDir+"server1.key") - if err != nil { - log.Fatalf("Failed to generate credentials %v", err) - } - go s.Serve(creds.NewListener(lis)) - } else { - go s.Serve(lis) - } - addr := la - switch e.network { - case "unix": - default: - _, port, err := net.SplitHostPort(lis.Addr().String()) - if err != nil { - log.Fatalf("Failed to parse listener address: %v", err) - } - addr = "localhost:" + port - } - if e.security == "tls" { - creds, err := credentials.NewClientTLSFromFile(tlsDir+"ca.pem", "x.test.youtube.com") - if err != nil { - log.Fatalf("Failed to create credentials %v", err) - } - cc, err = grpc.Dial(addr, grpc.WithTransportCredentials(creds), grpc.WithDialer(e.dialer)) - } else { - cc, err = grpc.Dial(addr, grpc.WithDialer(e.dialer)) - } - if err != nil { - log.Fatalf("Dial(%q) = %v", addr, err) - } - return -} - -func tearDown(s *grpc.Server, cc *grpc.ClientConn) { - cc.Close() - s.Stop() -} - -func TestTimeoutOnDeadServer(t *testing.T) { - for _, e := range listTestEnv() { - testTimeoutOnDeadServer(t, e) - } -} - -func testTimeoutOnDeadServer(t *testing.T, e env) { - s, cc := setUp(math.MaxUint32, e) - tc := testpb.NewTestServiceClient(cc) - s.Stop() - // Set -1 as the timeout to make sure if transportMonitor gets error - // notification in time the failure path of the 1st invoke of - // ClientConn.wait hits the deadline exceeded error. - ctx, _ := context.WithTimeout(context.Background(), -1) - if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); grpc.Code(err) != codes.DeadlineExceeded { - t.Fatalf("TestService/EmptyCall(%v, _) = _, error %v, want _, error code: %d", ctx, err, codes.DeadlineExceeded) - } - cc.Close() -} - -func TestEmptyUnary(t *testing.T) { - for _, e := range listTestEnv() { - testEmptyUnary(t, e) - } -} - -func testEmptyUnary(t *testing.T, e env) { - s, cc := setUp(math.MaxUint32, e) - tc := testpb.NewTestServiceClient(cc) - defer tearDown(s, cc) - reply, err := tc.EmptyCall(context.Background(), &testpb.Empty{}) - if err != nil || !proto.Equal(&testpb.Empty{}, reply) { - t.Fatalf("TestService/EmptyCall(_, _) = %v, %v, want %v, ", reply, err, &testpb.Empty{}) - } -} - -func TestFailedEmptyUnary(t *testing.T) { - for _, e := range listTestEnv() { - testFailedEmptyUnary(t, e) - } -} - -func testFailedEmptyUnary(t *testing.T, e env) { - s, cc := setUp(math.MaxUint32, e) - tc := testpb.NewTestServiceClient(cc) - defer tearDown(s, cc) - ctx := metadata.NewContext(context.Background(), testMetadata) - if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != grpc.Errorf(codes.DataLoss, "got extra metadata") { - t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %v", err, grpc.Errorf(codes.DataLoss, "got extra metadata")) - } -} - -func TestLargeUnary(t *testing.T) { - for _, e := range listTestEnv() { - testLargeUnary(t, e) - } -} - -func testLargeUnary(t *testing.T, e env) { - s, cc := setUp(math.MaxUint32, e) - tc := testpb.NewTestServiceClient(cc) - defer tearDown(s, cc) - argSize := 271828 - respSize := 314159 - req := &testpb.SimpleRequest{ - ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), - ResponseSize: proto.Int32(int32(respSize)), - Payload: newPayload(testpb.PayloadType_COMPRESSABLE, int32(argSize)), - } - reply, err := tc.UnaryCall(context.Background(), req) - if err != nil { - t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, ", err) - } - pt := reply.GetPayload().GetType() - ps := len(reply.GetPayload().GetBody()) - if pt != testpb.PayloadType_COMPRESSABLE || ps != respSize { - t.Fatalf("Got the reply with type %d len %d; want %d, %d", pt, ps, testpb.PayloadType_COMPRESSABLE, respSize) - } -} - -func TestMetadataUnaryRPC(t *testing.T) { - for _, e := range listTestEnv() { - testMetadataUnaryRPC(t, e) - } -} - -func testMetadataUnaryRPC(t *testing.T, e env) { - s, cc := setUp(math.MaxUint32, e) - tc := testpb.NewTestServiceClient(cc) - defer tearDown(s, cc) - argSize := 2718 - respSize := 314 - req := &testpb.SimpleRequest{ - ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), - ResponseSize: proto.Int32(int32(respSize)), - Payload: newPayload(testpb.PayloadType_COMPRESSABLE, int32(argSize)), - } - var header, trailer metadata.MD - ctx := metadata.NewContext(context.Background(), testMetadata) - _, err := tc.UnaryCall(ctx, req, grpc.Header(&header), grpc.Trailer(&trailer)) - if err != nil { - t.Fatalf("TestService.UnaryCall(%v, _, _, _) = _, %v; want _, ", ctx, err) - } - if !reflect.DeepEqual(testMetadata, header) { - t.Fatalf("Received header metadata %v, want %v", header, testMetadata) - } - if !reflect.DeepEqual(testMetadata, trailer) { - t.Fatalf("Received trailer metadata %v, want %v", trailer, testMetadata) - } -} - -func performOneRPC(t *testing.T, tc testpb.TestServiceClient, wg *sync.WaitGroup) { - argSize := 2718 - respSize := 314 - req := &testpb.SimpleRequest{ - ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), - ResponseSize: proto.Int32(int32(respSize)), - Payload: newPayload(testpb.PayloadType_COMPRESSABLE, int32(argSize)), - } - reply, err := tc.UnaryCall(context.Background(), req) - if err != nil { - t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, ", err) - } - pt := reply.GetPayload().GetType() - ps := len(reply.GetPayload().GetBody()) - if pt != testpb.PayloadType_COMPRESSABLE || ps != respSize { - t.Fatalf("Got the reply with type %d len %d; want %d, %d", pt, ps, testpb.PayloadType_COMPRESSABLE, respSize) - } - wg.Done() -} - -func TestRetry(t *testing.T) { - for _, e := range listTestEnv() { - testRetry(t, e) - } -} - -// This test mimics a user who sends 1000 RPCs concurrently on a faulty transport. -// TODO(zhaoq): Refactor to make this clearer and add more cases to test racy -// and error-prone paths. -func testRetry(t *testing.T, e env) { - s, cc := setUp(math.MaxUint32, e) - tc := testpb.NewTestServiceClient(cc) - defer tearDown(s, cc) - var wg sync.WaitGroup - wg.Add(1) - go func() { - time.Sleep(1 * time.Second) - // The server shuts down the network connection to make a - // transport error which will be detected by the client side - // code. - s.TestingCloseConns() - wg.Done() - }() - // All these RPCs should succeed eventually. - for i := 0; i < 1000; i++ { - time.Sleep(2 * time.Millisecond) - wg.Add(1) - go performOneRPC(t, tc, &wg) - } - wg.Wait() -} - -func TestRPCTimeout(t *testing.T) { - for _, e := range listTestEnv() { - testRPCTimeout(t, e) - } -} - -// TODO(zhaoq): Have a better test coverage of timeout and cancellation mechanism. -func testRPCTimeout(t *testing.T, e env) { - s, cc := setUp(math.MaxUint32, e) - tc := testpb.NewTestServiceClient(cc) - defer tearDown(s, cc) - argSize := 2718 - respSize := 314 - req := &testpb.SimpleRequest{ - ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), - ResponseSize: proto.Int32(int32(respSize)), - Payload: newPayload(testpb.PayloadType_COMPRESSABLE, int32(argSize)), - } - // Performs 100 RPCs with various timeout values so that - // the RPCs could timeout on different stages of their lifetime. This - // is the best-effort to cover various cases when an rpc gets cancelled. - for i := 1; i <= 100; i++ { - ctx, _ := context.WithTimeout(context.Background(), time.Duration(i)*time.Microsecond) - reply, err := tc.UnaryCall(ctx, req) - if grpc.Code(err) != codes.DeadlineExceeded { - t.Fatalf(`TestService/UnaryCallv(_, _) = %v, %v; want , error code: %d`, reply, err, codes.DeadlineExceeded) - } - } -} - -func TestCancel(t *testing.T) { - for _, e := range listTestEnv() { - testCancel(t, e) - } -} - -func testCancel(t *testing.T, e env) { - s, cc := setUp(math.MaxUint32, e) - tc := testpb.NewTestServiceClient(cc) - defer tearDown(s, cc) - argSize := 2718 - respSize := 314 - req := &testpb.SimpleRequest{ - ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), - ResponseSize: proto.Int32(int32(respSize)), - Payload: newPayload(testpb.PayloadType_COMPRESSABLE, int32(argSize)), - } - ctx, cancel := context.WithCancel(context.Background()) - time.AfterFunc(1*time.Millisecond, cancel) - reply, err := tc.UnaryCall(ctx, req) - if grpc.Code(err) != codes.Canceled { - t.Fatalf(`TestService/UnaryCall(_, _) = %v, %v; want , error code: %d`, reply, err, codes.Canceled) - } -} - -// The following tests the gRPC streaming RPC implementations. -// TODO(zhaoq): Have better coverage on error cases. -var ( - reqSizes = []int{27182, 8, 1828, 45904} - respSizes = []int{31415, 9, 2653, 58979} -) - -func TestPingPong(t *testing.T) { - for _, e := range listTestEnv() { - testPingPong(t, e) - } -} - -func testPingPong(t *testing.T, e env) { - s, cc := setUp(math.MaxUint32, e) - tc := testpb.NewTestServiceClient(cc) - defer tearDown(s, cc) - stream, err := tc.FullDuplexCall(context.Background()) - if err != nil { - t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) - } - var index int - for index < len(reqSizes) { - respParam := []*testpb.ResponseParameters{ - { - Size: proto.Int32(int32(respSizes[index])), - }, - } - req := &testpb.StreamingOutputCallRequest{ - ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), - ResponseParameters: respParam, - Payload: newPayload(testpb.PayloadType_COMPRESSABLE, int32(reqSizes[index])), - } - if err := stream.Send(req); err != nil { - t.Fatalf("%v.Send(%v) = %v, want ", stream, req, err) - } - reply, err := stream.Recv() - if err != nil { - t.Fatalf("%v.Recv() = %v, want ", stream, err) - } - pt := reply.GetPayload().GetType() - if pt != testpb.PayloadType_COMPRESSABLE { - t.Fatalf("Got the reply of type %d, want %d", pt, testpb.PayloadType_COMPRESSABLE) - } - size := len(reply.GetPayload().GetBody()) - if size != int(respSizes[index]) { - t.Fatalf("Got reply body of length %d, want %d", size, respSizes[index]) - } - index++ - } - if err := stream.CloseSend(); err != nil { - t.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil) - } - if _, err := stream.Recv(); err != io.EOF { - t.Fatalf("%v failed to complele the ping pong test: %v", stream, err) - } -} - -func TestMetadataStreamingRPC(t *testing.T) { - for _, e := range listTestEnv() { - testMetadataStreamingRPC(t, e) - } -} - -func testMetadataStreamingRPC(t *testing.T, e env) { - s, cc := setUp(math.MaxUint32, e) - tc := testpb.NewTestServiceClient(cc) - defer tearDown(s, cc) - ctx := metadata.NewContext(context.Background(), testMetadata) - stream, err := tc.FullDuplexCall(ctx) - if err != nil { - t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) - } - go func() { - headerMD, err := stream.Header() - if err != nil || !reflect.DeepEqual(testMetadata, headerMD) { - t.Errorf("#1 %v.Header() = %v, %v, want %v, ", stream, headerMD, err, testMetadata) - } - // test the cached value. - headerMD, err = stream.Header() - if err != nil || !reflect.DeepEqual(testMetadata, headerMD) { - t.Errorf("#2 %v.Header() = %v, %v, want %v, ", stream, headerMD, err, testMetadata) - } - var index int - for index < len(reqSizes) { - respParam := []*testpb.ResponseParameters{ - { - Size: proto.Int32(int32(respSizes[index])), - }, - } - req := &testpb.StreamingOutputCallRequest{ - ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), - ResponseParameters: respParam, - Payload: newPayload(testpb.PayloadType_COMPRESSABLE, int32(reqSizes[index])), - } - if err := stream.Send(req); err != nil { - t.Errorf("%v.Send(%v) = %v, want ", stream, req, err) - return - } - index++ - } - // Tell the server we're done sending args. - stream.CloseSend() - }() - for { - if _, err := stream.Recv(); err != nil { - break - } - } - trailerMD := stream.Trailer() - if !reflect.DeepEqual(testMetadata, trailerMD) { - t.Fatalf("%v.Trailer() = %v, want %v", stream, trailerMD, testMetadata) - } -} - -func TestServerStreaming(t *testing.T) { - for _, e := range listTestEnv() { - testServerStreaming(t, e) - } -} - -func testServerStreaming(t *testing.T, e env) { - s, cc := setUp(math.MaxUint32, e) - tc := testpb.NewTestServiceClient(cc) - defer tearDown(s, cc) - respParam := make([]*testpb.ResponseParameters, len(respSizes)) - for i, s := range respSizes { - respParam[i] = &testpb.ResponseParameters{ - Size: proto.Int32(int32(s)), - } - } - req := &testpb.StreamingOutputCallRequest{ - ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), - ResponseParameters: respParam, - } - stream, err := tc.StreamingOutputCall(context.Background(), req) - if err != nil { - t.Fatalf("%v.StreamingOutputCall(_) = _, %v, want ", tc, err) - } - var rpcStatus error - var respCnt int - var index int - for { - reply, err := stream.Recv() - if err != nil { - rpcStatus = err - break - } - pt := reply.GetPayload().GetType() - if pt != testpb.PayloadType_COMPRESSABLE { - t.Fatalf("Got the reply of type %d, want %d", pt, testpb.PayloadType_COMPRESSABLE) - } - size := len(reply.GetPayload().GetBody()) - if size != int(respSizes[index]) { - t.Fatalf("Got reply body of length %d, want %d", size, respSizes[index]) - } - index++ - respCnt++ - } - if rpcStatus != io.EOF { - t.Fatalf("Failed to finish the server streaming rpc: %v, want ", err) - } - if respCnt != len(respSizes) { - t.Fatalf("Got %d reply, want %d", len(respSizes), respCnt) - } -} - -func TestFailedServerStreaming(t *testing.T) { - for _, e := range listTestEnv() { - testFailedServerStreaming(t, e) - } -} - -func testFailedServerStreaming(t *testing.T, e env) { - s, cc := setUp(math.MaxUint32, e) - tc := testpb.NewTestServiceClient(cc) - defer tearDown(s, cc) - respParam := make([]*testpb.ResponseParameters, len(respSizes)) - for i, s := range respSizes { - respParam[i] = &testpb.ResponseParameters{ - Size: proto.Int32(int32(s)), - } - } - req := &testpb.StreamingOutputCallRequest{ - ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), - ResponseParameters: respParam, - } - ctx := metadata.NewContext(context.Background(), testMetadata) - stream, err := tc.StreamingOutputCall(ctx, req) - if err != nil { - t.Fatalf("%v.StreamingOutputCall(_) = _, %v, want ", tc, err) - } - if _, err := stream.Recv(); err != grpc.Errorf(codes.DataLoss, "got extra metadata") { - t.Fatalf("%v.Recv() = _, %v, want _, %v", stream, err, grpc.Errorf(codes.DataLoss, "got extra metadata")) - } -} - -func TestClientStreaming(t *testing.T) { - for _, e := range listTestEnv() { - testClientStreaming(t, e) - } -} - -func testClientStreaming(t *testing.T, e env) { - s, cc := setUp(math.MaxUint32, e) - tc := testpb.NewTestServiceClient(cc) - defer tearDown(s, cc) - stream, err := tc.StreamingInputCall(context.Background()) - if err != nil { - t.Fatalf("%v.StreamingInputCall(_) = _, %v, want ", tc, err) - } - var sum int - for _, s := range reqSizes { - pl := newPayload(testpb.PayloadType_COMPRESSABLE, int32(s)) - req := &testpb.StreamingInputCallRequest{ - Payload: pl, - } - if err := stream.Send(req); err != nil { - t.Fatalf("%v.Send(%v) = %v, want ", stream, req, err) - } - sum += s - } - reply, err := stream.CloseAndRecv() - if err != nil { - t.Fatalf("%v.CloseAndRecv() got error %v, want %v", stream, err, nil) - } - if reply.GetAggregatedPayloadSize() != int32(sum) { - t.Fatalf("%v.CloseAndRecv().GetAggregatePayloadSize() = %v; want %v", stream, reply.GetAggregatedPayloadSize(), sum) - } -} - -func TestExceedMaxStreamsLimit(t *testing.T) { - for _, e := range listTestEnv() { - testExceedMaxStreamsLimit(t, e) - } -} - -func testExceedMaxStreamsLimit(t *testing.T, e env) { - // Only allows 1 live stream per server transport. - s, cc := setUp(1, e) - tc := testpb.NewTestServiceClient(cc) - defer tearDown(s, cc) - var err error - for { - time.Sleep(2 * time.Millisecond) - _, err = tc.StreamingInputCall(context.Background()) - // Loop until the settings of max concurrent streams is - // received by the client. - if err != nil { - break - } - } - if grpc.Code(err) != codes.Unavailable { - t.Fatalf("got %v, want error code %d", err, codes.Unavailable) - } -} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/test/grpc_testing/test.pb.go b/Godeps/_workspace/src/google.golang.org/grpc/test/grpc_testing/test.pb.go index 261ba1cc27..bd492fef47 100644 --- a/Godeps/_workspace/src/google.golang.org/grpc/test/grpc_testing/test.pb.go +++ b/Godeps/_workspace/src/google.golang.org/grpc/test/grpc_testing/test.pb.go @@ -1,12 +1,12 @@ // Code generated by protoc-gen-go. -// source: src/google.golang.org/grpc/test/grpc_testing/test.proto +// source: test.proto // DO NOT EDIT! /* Package grpc_testing is a generated protocol buffer package. It is generated from these files: - src/google.golang.org/grpc/test/grpc_testing/test.proto + test.proto It has these top-level messages: Empty @@ -539,9 +539,9 @@ func RegisterTestServiceServer(s *grpc.Server, srv TestServiceServer) { s.RegisterService(&_TestService_serviceDesc, srv) } -func _TestService_EmptyCall_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { +func _TestService_EmptyCall_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { in := new(Empty) - if err := codec.Unmarshal(buf, in); err != nil { + if err := dec(in); err != nil { return nil, err } out, err := srv.(TestServiceServer).EmptyCall(ctx, in) @@ -551,9 +551,9 @@ func _TestService_EmptyCall_Handler(srv interface{}, ctx context.Context, codec return out, nil } -func _TestService_UnaryCall_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { +func _TestService_UnaryCall_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { in := new(SimpleRequest) - if err := codec.Unmarshal(buf, in); err != nil { + if err := dec(in); err != nil { return nil, err } out, err := srv.(TestServiceServer).UnaryCall(ctx, in) diff --git a/Godeps/_workspace/src/google.golang.org/grpc/test/testdata/ca.pem b/Godeps/_workspace/src/google.golang.org/grpc/test/testdata/ca.pem deleted file mode 100644 index 999da89775..0000000000 --- a/Godeps/_workspace/src/google.golang.org/grpc/test/testdata/ca.pem +++ /dev/null @@ -1,14 +0,0 @@ ------BEGIN CERTIFICATE----- -MIICIzCCAYwCCQCFTbF7XNSvvjANBgkqhkiG9w0BAQUFADBWMQswCQYDVQQGEwJB -VTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0 -cyBQdHkgTHRkMQ8wDQYDVQQDDAZ0ZXN0Y2EwHhcNMTQwNzE3MjMxNzUxWhcNMjQw -NzE0MjMxNzUxWjBWMQswCQYDVQQGEwJBVTETMBEGA1UECAwKU29tZS1TdGF0ZTEh -MB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMQ8wDQYDVQQDDAZ0ZXN0 -Y2EwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAMBA3wVeTGHZR1Rye/i+J8a2 -cu5gXwFV6TnObzGM7bLFCO5i9v4mLo4iFzPsHmWDUxKS3Y8iXbu0eYBlLoNY0lSv -xDx33O+DuwMmVN+DzSD+Eod9zfvwOWHsazYCZT2PhNxnVWIuJXViY4JAHUGodjx+ -QAi6yCAurUZGvYXGgZSBAgMBAAEwDQYJKoZIhvcNAQEFBQADgYEAQoQVD8bwdtWJ -AniGBwcCfqYyH+/KpA10AcebJVVTyYbYvI9Q8d6RSVu4PZy9OALHR/QrWBdYTAyz -fNAmc2cmdkSRJzjhIaOstnQy1J+Fk0T9XyvQtq499yFbq9xogUVlEGH62xP6vH0Y -5ukK//dCPAzA11YuX2rnex0JhuTQfcI= ------END CERTIFICATE----- diff --git a/Godeps/_workspace/src/google.golang.org/grpc/test/testdata/server1.key b/Godeps/_workspace/src/google.golang.org/grpc/test/testdata/server1.key deleted file mode 100644 index ed00cd500e..0000000000 --- a/Godeps/_workspace/src/google.golang.org/grpc/test/testdata/server1.key +++ /dev/null @@ -1,15 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIICWwIBAAKBgQDhwxUnKCwlSaWAwzOB2LSHVegJHv7DDWminTg4wzLLsf+LQ8nZ -bpjfn5vgIzxCuRh4Rp9QYM5FhfrJX9wcYawP/HTbJ7p7LVQO2QYAP+akMTHxgKuM -BzVV++3wWToKfVZUjFX8nfTfGMGwWAHJDnlEGnU4tl9UujoCV4ENJtzFoQIDAQAB -AoGAJ+6hpzNr24yTQZtFWQpDpEyFplddKJMOxDya3S9ppK3vTWrIITV2xNcucw7I -ceTbdyrGsyjsU0/HdCcIf9ym2jfmGLUwmyhltKVw0QYcFB0XLkc0nI5YvEYoeVDg -omZIXn1E3EW+sSIWSbkMu9bY2kstKXR2UZmMgWDtmBEPMaECQQD6yT4TAZM5hGBb -ciBKgMUP6PwOhPhOMPIvijO50Aiu6iuCV88l1QIy38gWVhxjNrq6P346j4IBg+kB -9alwpCODAkEA5nSnm9k6ykYeQWNS0fNWiRinCdl23A7usDGSuKKlm019iomJ/Rgd -MKDOp0q/2OostbteOWM2MRFf4jMH3wyVCwJAfAdjJ8szoNKTRSagSbh9vWygnB2v -IByc6l4TTuZQJRGzCveafz9lovuB3WohCABdQRd9ukCXL2CpsEpqzkafOQJAJUjc -USedDlq3zGZwYM1Yw8d8RuirBUFZNqJelYai+nRYClDkRVFgb5yksoYycbq5TxGo -VeqKOvgPpj4RWPHlLwJAGUMk3bqT91xBUCnLRs/vfoCpHpg6eywQTBDAV6xkyz4a -RH3I7/+yj3ZxR2JoWHgUwZ7lZk1VnhffFye7SBXyag== ------END RSA PRIVATE KEY----- diff --git a/Godeps/_workspace/src/google.golang.org/grpc/test/testdata/server1.pem b/Godeps/_workspace/src/google.golang.org/grpc/test/testdata/server1.pem deleted file mode 100644 index 8e582e571f..0000000000 --- a/Godeps/_workspace/src/google.golang.org/grpc/test/testdata/server1.pem +++ /dev/null @@ -1,16 +0,0 @@ ------BEGIN CERTIFICATE----- -MIICmzCCAgSgAwIBAgIBAzANBgkqhkiG9w0BAQUFADBWMQswCQYDVQQGEwJBVTET -MBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0cyBQ -dHkgTHRkMQ8wDQYDVQQDDAZ0ZXN0Y2EwHhcNMTQwNzIyMDYwMDU3WhcNMjQwNzE5 -MDYwMDU3WjBkMQswCQYDVQQGEwJVUzERMA8GA1UECBMISWxsaW5vaXMxEDAOBgNV -BAcTB0NoaWNhZ28xFDASBgNVBAoTC0dvb2dsZSBJbmMuMRowGAYDVQQDFBEqLnRl -c3QuZ29vZ2xlLmNvbTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA4cMVJygs -JUmlgMMzgdi0h1XoCR7+ww1pop04OMMyy7H/i0PJ2W6Y35+b4CM8QrkYeEafUGDO -RYX6yV/cHGGsD/x02ye6ey1UDtkGAD/mpDEx8YCrjAc1Vfvt8Fk6Cn1WVIxV/J30 -3xjBsFgByQ55RBp1OLZfVLo6AleBDSbcxaECAwEAAaNrMGkwCQYDVR0TBAIwADAL -BgNVHQ8EBAMCBeAwTwYDVR0RBEgwRoIQKi50ZXN0Lmdvb2dsZS5mcoIYd2F0ZXJ6 -b29pLnRlc3QuZ29vZ2xlLmJlghIqLnRlc3QueW91dHViZS5jb22HBMCoAQMwDQYJ -KoZIhvcNAQEFBQADgYEAM2Ii0LgTGbJ1j4oqX9bxVcxm+/R5Yf8oi0aZqTJlnLYS -wXcBykxTx181s7WyfJ49WwrYXo78zTDAnf1ma0fPq3e4mpspvyndLh1a+OarHa1e -aT0DIIYk7qeEa1YcVljx2KyLd0r1BBAfrwyGaEPVeJQVYWaOJRU2we/KD4ojf9s= ------END CERTIFICATE----- diff --git a/Godeps/_workspace/src/google.golang.org/grpc/trace.go b/Godeps/_workspace/src/google.golang.org/grpc/trace.go new file mode 100644 index 0000000000..9b88444f6e --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/trace.go @@ -0,0 +1,106 @@ +/* + * + * Copyright 2015, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package grpc + +import ( + "bytes" + "fmt" + "io" + "net" + "time" + + "golang.org/x/net/trace" +) + +// EnableTracing controls whether to trace RPCs using the golang.org/x/net/trace package. +// This should only be set before any RPCs are sent or received by this program. +var EnableTracing = true + +// traceInfo contains tracing information for an RPC. +type traceInfo struct { + tr trace.Trace + firstLine firstLine +} + +// firstLine is the first line of an RPC trace. +type firstLine struct { + client bool // whether this is a client (outgoing) RPC + remoteAddr net.Addr + deadline time.Duration // may be zero +} + +func (f *firstLine) String() string { + var line bytes.Buffer + io.WriteString(&line, "RPC: ") + if f.client { + io.WriteString(&line, "to") + } else { + io.WriteString(&line, "from") + } + fmt.Fprintf(&line, " %v deadline:", f.remoteAddr) + if f.deadline != 0 { + fmt.Fprint(&line, f.deadline) + } else { + io.WriteString(&line, "none") + } + return line.String() +} + +// payload represents an RPC request or response payload. +type payload struct { + sent bool // whether this is an outgoing payload + msg interface{} // e.g. a proto.Message + // TODO(dsymonds): add stringifying info to codec, and limit how much we hold here? +} + +func (p payload) String() string { + if p.sent { + return fmt.Sprintf("sent: %v", p.msg) + } else { + return fmt.Sprintf("recv: %v", p.msg) + } +} + +type fmtStringer struct { + format string + a []interface{} +} + +func (f *fmtStringer) String() string { + return fmt.Sprintf(f.format, f.a...) +} + +type stringer string + +func (s stringer) String() string { return string(s) } diff --git a/Godeps/_workspace/src/google.golang.org/grpc/transport/control.go b/Godeps/_workspace/src/google.golang.org/grpc/transport/control.go index 306b585182..6b5201e7a6 100644 --- a/Godeps/_workspace/src/google.golang.org/grpc/transport/control.go +++ b/Godeps/_workspace/src/google.golang.org/grpc/transport/control.go @@ -37,7 +37,7 @@ import ( "fmt" "sync" - "github.com/bradfitz/http2" + "golang.org/x/net/http2" ) const ( @@ -61,8 +61,8 @@ func (windowUpdate) isItem() bool { } type settings struct { - ack bool - setting []http2.Setting + ack bool + ss []http2.Setting } func (settings) isItem() bool { @@ -104,8 +104,14 @@ type quotaPool struct { // newQuotaPool creates a quotaPool which has quota q available to consume. func newQuotaPool(q int) *quotaPool { - qb := "aPool{c: make(chan int, 1)} - qb.c <- q + qb := "aPool{ + c: make(chan int, 1), + } + if q > 0 { + qb.c <- q + } else { + qb.quota = q + } return qb } diff --git a/Godeps/_workspace/src/google.golang.org/grpc/transport/http2_client.go b/Godeps/_workspace/src/google.golang.org/grpc/transport/http2_client.go index 98cfb8035b..526f5115a5 100644 --- a/Godeps/_workspace/src/google.golang.org/grpc/transport/http2_client.go +++ b/Godeps/_workspace/src/google.golang.org/grpc/transport/http2_client.go @@ -37,25 +37,28 @@ import ( "bytes" "errors" "io" - "log" "math" "net" + "strings" "sync" "time" - "github.com/bradfitz/http2" - "github.com/bradfitz/http2/hpack" "golang.org/x/net/context" + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" "google.golang.org/grpc/metadata" ) // http2Client implements the ClientTransport interface with HTTP2. type http2Client struct { - target string // server name/addr - conn net.Conn // underlying communication channel - nextID uint32 // the next stream ID to be used + target string // server name/addr + userAgent string + conn net.Conn // underlying communication channel + authInfo credentials.AuthInfo // auth info about the connection + nextID uint32 // the next stream ID to be used // writableChan synchronizes write access to the transport. // A writer acquires the write lock by sending a value on writableChan @@ -79,6 +82,8 @@ type http2Client struct { fc *inFlow // sendQuotaPool provides flow control to outbound message. sendQuotaPool *quotaPool + // streamsQuota limits the max number of concurrent streams. + streamsQuota *quotaPool // The scheme used: https if TLS is on, http otherwise. scheme string @@ -89,7 +94,7 @@ type http2Client struct { state transportState // the state of underlying connection activeStreams map[uint32]*Stream // The max number of concurrent streams - maxStreams uint32 + maxStreams int // the per-stream outbound flow control window size set by the peer. streamSendQuota uint32 } @@ -111,6 +116,7 @@ func newHTTP2Client(addr string, opts *ConnectOptions) (_ ClientTransport, err e if connErr != nil { return nil, ConnectionErrorf("transport: %v", connErr) } + var authInfo credentials.AuthInfo for _, c := range opts.AuthOptions { if ccreds, ok := c.(credentials.TransportAuthenticator); ok { scheme = "https" @@ -121,7 +127,7 @@ func newHTTP2Client(addr string, opts *ConnectOptions) (_ ClientTransport, err e if timeout > 0 { timeout -= time.Since(startT) } - conn, connErr = ccreds.Handshake(addr, conn, timeout) + conn, authInfo, connErr = ccreds.ClientHandshake(addr, conn, timeout) break } } @@ -156,10 +162,16 @@ func newHTTP2Client(addr string, opts *ConnectOptions) (_ ClientTransport, err e return nil, ConnectionErrorf("transport: %v", err) } } + ua := primaryUA + if opts.UserAgent != "" { + ua = opts.UserAgent + " " + ua + } var buf bytes.Buffer t := &http2Client{ - target: addr, - conn: conn, + target: addr, + userAgent: ua, + conn: conn, + authInfo: authInfo, // The client initiated stream id is odd starting from 1. nextID: 1, writableChan: make(chan int, 1), @@ -174,8 +186,8 @@ func newHTTP2Client(addr string, opts *ConnectOptions) (_ ClientTransport, err e scheme: scheme, state: reachable, activeStreams: make(map[uint32]*Stream), - maxStreams: math.MaxUint32, authCreds: opts.AuthOptions, + maxStreams: math.MaxInt32, streamSendQuota: defaultWindowSize, } go t.controller() @@ -226,9 +238,26 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea return nil, ContextErr(context.DeadlineExceeded) } } + // Attach Auth info if there is any. + if t.authInfo != nil { + ctx = credentials.NewContext(ctx, t.authInfo) + } authData := make(map[string]string) for _, c := range t.authCreds { - data, err := c.GetRequestMetadata(ctx) + // Construct URI required to get auth request metadata. + var port string + if pos := strings.LastIndex(t.target, ":"); pos != -1 { + // Omit port if it is the default one. + if t.target[pos+1:] != "443" { + port = ":" + t.target[pos+1:] + } + } + pos := strings.LastIndex(callHdr.Method, "/") + if pos == -1 { + return nil, StreamErrorf(codes.InvalidArgument, "transport: malformed method name: %q", callHdr.Method) + } + audience := "https://" + callHdr.Host + port + callHdr.Method[:pos] + data, err := c.GetRequestMetadata(ctx, audience) if err != nil { return nil, StreamErrorf(codes.InvalidArgument, "transport: %v", err) } @@ -236,7 +265,25 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea authData[k] = v } } + t.mu.Lock() + if t.state != reachable { + t.mu.Unlock() + return nil, ErrConnClosing + } + checkStreamsQuota := t.streamsQuota != nil + t.mu.Unlock() + if checkStreamsQuota { + sq, err := wait(ctx, t.shutdownChan, t.streamsQuota.acquire()) + if err != nil { + return nil, err + } + // Returns the quota balance back. + if sq > 1 { + t.streamsQuota.add(sq - 1) + } + } if _, err := wait(ctx, t.shutdownChan, t.writableChan); err != nil { + // t.streamsQuota will be updated when t.CloseStream is invoked. return nil, err } t.mu.Lock() @@ -244,11 +291,6 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea t.mu.Unlock() return nil, ErrConnClosing } - if uint32(len(t.activeStreams)) >= t.maxStreams { - t.mu.Unlock() - t.writableChan <- 0 - return nil, StreamErrorf(codes.Unavailable, "transport: failed to create new stream because the limit has been reached.") - } s := t.newStream(ctx, callHdr) t.activeStreams[s.id] = s t.mu.Unlock() @@ -261,7 +303,9 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea t.hEnc.WriteField(hpack.HeaderField{Name: ":path", Value: callHdr.Method}) t.hEnc.WriteField(hpack.HeaderField{Name: ":authority", Value: callHdr.Host}) t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) + t.hEnc.WriteField(hpack.HeaderField{Name: "user-agent", Value: t.userAgent}) t.hEnc.WriteField(hpack.HeaderField{Name: "te", Value: "trailers"}) + if timeout > 0 { t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-timeout", Value: timeoutEncode(timeout)}) } @@ -275,7 +319,9 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea if md, ok := metadata.FromContext(ctx); ok { hasMD = true for k, v := range md { - t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: v}) + for _, entry := range v { + t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry}) + } } } first := true @@ -316,9 +362,16 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea // CloseStream clears the footprint of a stream when the stream is not needed any more. // This must not be executed in reader's goroutine. func (t *http2Client) CloseStream(s *Stream, err error) { + var updateStreams bool t.mu.Lock() + if t.streamsQuota != nil { + updateStreams = true + } delete(t.activeStreams, s.id) t.mu.Unlock() + if updateStreams { + t.streamsQuota.add(1) + } s.mu.Lock() if q := s.fc.restoreConn(); q > 0 { t.controlBuf.put(&windowUpdate{0, q}) @@ -503,30 +556,46 @@ func (t *http2Client) handleData(f *http2.DataFrame) { return } size := len(f.Data()) - if err := s.fc.onData(uint32(size)); err != nil { - if _, ok := err.(ConnectionError); ok { - t.notifyError(err) - return - } - s.mu.Lock() - if s.state == streamDone { + if size > 0 { + if err := s.fc.onData(uint32(size)); err != nil { + if _, ok := err.(ConnectionError); ok { + t.notifyError(err) + return + } + s.mu.Lock() + if s.state == streamDone { + s.mu.Unlock() + return + } + s.state = streamDone + s.statusCode = codes.Internal + s.statusDesc = err.Error() s.mu.Unlock() + s.write(recvMsg{err: io.EOF}) + t.controlBuf.put(&resetStream{s.id, http2.ErrCodeFlowControl}) return } - s.state = streamDone + // TODO(bradfitz, zhaoq): A copy is required here because there is no + // guarantee f.Data() is consumed before the arrival of next frame. + // Can this copy be eliminated? + data := make([]byte, size) + copy(data, f.Data()) + s.write(recvMsg{data: data}) + } + // The server has closed the stream without sending trailers. Record that + // the read direction is closed, and set the status appropriately. + if f.FrameHeader.Flags.Has(http2.FlagDataEndStream) { + s.mu.Lock() + if s.state == streamWriteDone { + s.state = streamDone + } else { + s.state = streamReadDone + } s.statusCode = codes.Internal - s.statusDesc = err.Error() + s.statusDesc = "server closed the stream without sending trailers" s.mu.Unlock() s.write(recvMsg{err: io.EOF}) - t.controlBuf.put(&resetStream{s.id, http2.ErrCodeFlowControl}) - return } - // TODO(bradfitz, zhaoq): A copy is required here because there is no - // guarantee f.Data() is consumed before the arrival of next frame. - // Can this copy be eliminated? - data := make([]byte, size) - copy(data, f.Data()) - s.write(recvMsg{data: data}) } func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { @@ -540,9 +609,13 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { return } s.state = streamDone + if !s.headerDone { + close(s.headerChan) + s.headerDone = true + } s.statusCode, ok = http2RSTErrConvTab[http2.ErrCode(f.ErrCode)] if !ok { - log.Println("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error ", f.ErrCode) + grpclog.Println("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error ", f.ErrCode) } s.mu.Unlock() s.write(recvMsg{err: io.EOF}) @@ -552,24 +625,13 @@ func (t *http2Client) handleSettings(f *http2.SettingsFrame) { if f.IsAck() { return } + var ss []http2.Setting f.ForeachSetting(func(s http2.Setting) error { - if v, ok := f.Value(s.ID); ok { - t.mu.Lock() - defer t.mu.Unlock() - switch s.ID { - case http2.SettingMaxConcurrentStreams: - t.maxStreams = v - case http2.SettingInitialWindowSize: - for _, s := range t.activeStreams { - // Adjust the sending quota for each s. - s.sendQuotaPool.reset(int(v - t.streamSendQuota)) - } - t.streamSendQuota = v - } - } + ss = append(ss, s) return nil }) - t.controlBuf.put(&settings{ack: true}) + // The settings will be applied once the ack is sent. + t.controlBuf.put(&settings{ack: true, ss: ss}) } func (t *http2Client) handlePing(f *http2.PingFrame) { @@ -577,7 +639,7 @@ func (t *http2Client) handlePing(f *http2.PingFrame) { } func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { - // TODO(zhaoq): GoAwayFrame handler to be implemented" + // TODO(zhaoq): GoAwayFrame handler to be implemented } func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) { @@ -692,7 +754,40 @@ func (t *http2Client) reader() { case *http2.WindowUpdateFrame: t.handleWindowUpdate(frame) default: - log.Printf("transport: http2Client.reader got unhandled frame type %v.", frame) + grpclog.Printf("transport: http2Client.reader got unhandled frame type %v.", frame) + } + } +} + +func (t *http2Client) applySettings(ss []http2.Setting) { + for _, s := range ss { + switch s.ID { + case http2.SettingMaxConcurrentStreams: + // TODO(zhaoq): This is a hack to avoid significant refactoring of the + // code to deal with the unrealistic int32 overflow. Probably will try + // to find a better way to handle this later. + if s.Val > math.MaxInt32 { + s.Val = math.MaxInt32 + } + t.mu.Lock() + reset := t.streamsQuota != nil + if !reset { + t.streamsQuota = newQuotaPool(int(s.Val) - len(t.activeStreams)) + } + ms := t.maxStreams + t.maxStreams = int(s.Val) + t.mu.Unlock() + if reset { + t.streamsQuota.reset(int(s.Val) - ms) + } + case http2.SettingInitialWindowSize: + t.mu.Lock() + for _, stream := range t.activeStreams { + // Adjust the sending quota for each stream. + stream.sendQuotaPool.reset(int(s.Val - t.streamSendQuota)) + } + t.streamSendQuota = s.Val + t.mu.Unlock() } } } @@ -712,8 +807,9 @@ func (t *http2Client) controller() { case *settings: if i.ack { t.framer.writeSettingsAck(true) + t.applySettings(i.ss) } else { - t.framer.writeSettings(true, i.setting...) + t.framer.writeSettings(true, i.ss...) } case *resetStream: t.framer.writeRSTStream(true, i.streamID, i.code) @@ -724,7 +820,7 @@ func (t *http2Client) controller() { // meaningful content when this is actually in use. t.framer.writePing(true, i.ack, [8]byte{}) default: - log.Printf("transport: http2Client.controller got unexpected item type %v\n", i) + grpclog.Printf("transport: http2Client.controller got unexpected item type %v\n", i) } t.writableChan <- 0 continue @@ -748,6 +844,6 @@ func (t *http2Client) notifyError(err error) { if t.state == reachable { t.state = unreachable close(t.errorChan) - log.Printf("transport: http2Client.notifyError got notified that the client transport was broken %v.", err) + grpclog.Printf("transport: http2Client.notifyError got notified that the client transport was broken %v.", err) } } diff --git a/Godeps/_workspace/src/google.golang.org/grpc/transport/http2_server.go b/Godeps/_workspace/src/google.golang.org/grpc/transport/http2_server.go index 3dfc49e001..89371c23aa 100644 --- a/Godeps/_workspace/src/google.golang.org/grpc/transport/http2_server.go +++ b/Godeps/_workspace/src/google.golang.org/grpc/transport/http2_server.go @@ -37,16 +37,18 @@ import ( "bytes" "errors" "io" - "log" "math" "net" "strconv" "sync" - "github.com/bradfitz/http2" - "github.com/bradfitz/http2/hpack" "golang.org/x/net/context" + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" + "golang.org/x/net/trace" "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" "google.golang.org/grpc/metadata" ) @@ -57,7 +59,8 @@ var ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHe // http2Server implements the ServerTransport interface with HTTP2. type http2Server struct { conn net.Conn - maxStreamID uint32 // max stream ID ever seen + maxStreamID uint32 // max stream ID ever seen + authInfo credentials.AuthInfo // auth info about the connection // writableChan synchronizes write access to the transport. // A writer acquires the write lock by sending a value on writableChan // and releases it by receiving from writableChan. @@ -78,6 +81,8 @@ type http2Server struct { fc *inFlow // sendQuotaPool provides flow control to outbound message. sendQuotaPool *quotaPool + // tracing indicates whether tracing is on for this http2Server transport. + tracing bool mu sync.Mutex // guard the following state transportState @@ -88,11 +93,9 @@ type http2Server struct { // newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is // returned if something goes wrong. -func newHTTP2Server(conn net.Conn, maxStreams uint32) (_ ServerTransport, err error) { +func newHTTP2Server(conn net.Conn, maxStreams uint32, authInfo credentials.AuthInfo, tracing bool) (_ ServerTransport, err error) { framer := newFramer(conn) // Send initial settings as connection preface to client. - // TODO(zhaoq): Have a better way to signal "no limit" because 0 is - // permitted in the HTTP2 spec. var settings []http2.Setting // TODO(zhaoq): Have a better way to signal "no limit" because 0 is // permitted in the HTTP2 spec. @@ -116,6 +119,7 @@ func newHTTP2Server(conn net.Conn, maxStreams uint32) (_ ServerTransport, err er var buf bytes.Buffer t := &http2Server{ conn: conn, + authInfo: authInfo, framer: framer, hBuf: &buf, hEnc: hpack.NewEncoder(&buf), @@ -123,6 +127,7 @@ func newHTTP2Server(conn net.Conn, maxStreams uint32) (_ ServerTransport, err er controlBuf: newRecvBuffer(), fc: &inFlow{limit: initialConnWindowSize}, sendQuotaPool: newQuotaPool(defaultWindowSize), + tracing: tracing, state: reachable, writableChan: make(chan int, 1), shutdownChan: make(chan struct{}), @@ -149,7 +154,7 @@ func (t *http2Server) operateHeaders(hDec *hpackDecoder, s *Stream, frame header return nil } if err != nil { - log.Printf("transport: http2Server.operateHeader found %v", err) + grpclog.Printf("transport: http2Server.operateHeader found %v", err) if se, ok := err.(StreamError); ok { t.controlBuf.put(&resetStream{s.id, statusCodeConvTab[se.Code]}) } @@ -183,6 +188,10 @@ func (t *http2Server) operateHeaders(hDec *hpackDecoder, s *Stream, frame header } else { s.ctx, s.cancel = context.WithCancel(context.TODO()) } + // Attach Auth info if there is any. + if t.authInfo != nil { + s.ctx = credentials.NewContext(s.ctx, t.authInfo) + } // Cache the current stream to the context so that the server application // can find out. Required when the server wants to send some metadata // back to the client (unary call only). @@ -197,7 +206,10 @@ func (t *http2Server) operateHeaders(hDec *hpackDecoder, s *Stream, frame header recv: s.buf, } s.method = hDec.state.method - + if t.tracing { + s.tr = trace.New("grpc.Recv."+MethodFamily(s.method), s.method) + s.ctx = trace.NewContext(s.ctx, s.tr) + } wg.Add(1) go func() { handle(s) @@ -212,25 +224,25 @@ func (t *http2Server) HandleStreams(handle func(*Stream)) { // Check the validity of client preface. preface := make([]byte, len(clientPreface)) if _, err := io.ReadFull(t.conn, preface); err != nil { - log.Printf("transport: http2Server.HandleStreams failed to receive the preface from client: %v", err) + grpclog.Printf("transport: http2Server.HandleStreams failed to receive the preface from client: %v", err) t.Close() return } if !bytes.Equal(preface, clientPreface) { - log.Printf("transport: http2Server.HandleStreams received bogus greeting from client: %q", preface) + grpclog.Printf("transport: http2Server.HandleStreams received bogus greeting from client: %q", preface) t.Close() return } frame, err := t.framer.readFrame() if err != nil { - log.Printf("transport: http2Server.HandleStreams failed to read frame: %v", err) + grpclog.Printf("transport: http2Server.HandleStreams failed to read frame: %v", err) t.Close() return } sf, ok := frame.(*http2.SettingsFrame) if !ok { - log.Printf("transport: http2Server.HandleStreams saw invalid preface type %T from client", frame) + grpclog.Printf("transport: http2Server.HandleStreams saw invalid preface type %T from client", frame) t.Close() return } @@ -251,7 +263,7 @@ func (t *http2Server) HandleStreams(handle func(*Stream)) { id := frame.Header().StreamID if id%2 != 1 || id <= t.maxStreamID { // illegal gRPC stream id. - log.Println("transport: http2Server.HandleStreams received an illegal stream id: ", id) + grpclog.Println("transport: http2Server.HandleStreams received an illegal stream id: ", id) t.Close() break } @@ -281,8 +293,10 @@ func (t *http2Server) HandleStreams(handle func(*Stream)) { t.handlePing(frame) case *http2.WindowUpdateFrame: t.handleWindowUpdate(frame) + case *http2.GoAwayFrame: + break default: - log.Printf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame) + grpclog.Printf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame) } } } @@ -322,22 +336,24 @@ func (t *http2Server) handleData(f *http2.DataFrame) { return } size := len(f.Data()) - if err := s.fc.onData(uint32(size)); err != nil { - if _, ok := err.(ConnectionError); ok { - log.Printf("transport: http2Server %v", err) - t.Close() + if size > 0 { + if err := s.fc.onData(uint32(size)); err != nil { + if _, ok := err.(ConnectionError); ok { + grpclog.Printf("transport: http2Server %v", err) + t.Close() + return + } + t.closeStream(s) + t.controlBuf.put(&resetStream{s.id, http2.ErrCodeFlowControl}) return } - t.closeStream(s) - t.controlBuf.put(&resetStream{s.id, http2.ErrCodeFlowControl}) - return + // TODO(bradfitz, zhaoq): A copy is required here because there is no + // guarantee f.Data() is consumed before the arrival of next frame. + // Can this copy be eliminated? + data := make([]byte, size) + copy(data, f.Data()) + s.write(recvMsg{data: data}) } - // TODO(bradfitz, zhaoq): A copy is required here because there is no - // guarantee f.Data() is consumed before the arrival of next frame. - // Can this copy be eliminated? - data := make([]byte, size) - copy(data, f.Data()) - s.write(recvMsg{data: data}) if f.Header().Flags.Has(http2.FlagDataEndStream) { // Received the end of stream from the client. s.mu.Lock() @@ -358,11 +374,6 @@ func (t *http2Server) handleRSTStream(f *http2.RSTStreamFrame) { if !ok { return } - s.mu.Lock() - // Sets the stream state to avoid sending RSTStreamFrame to client - // unnecessarily. - s.state = streamDone - s.mu.Unlock() t.closeStream(s) } @@ -370,18 +381,13 @@ func (t *http2Server) handleSettings(f *http2.SettingsFrame) { if f.IsAck() { return } + var ss []http2.Setting f.ForeachSetting(func(s http2.Setting) error { - if v, ok := f.Value(http2.SettingInitialWindowSize); ok { - t.mu.Lock() - defer t.mu.Unlock() - for _, s := range t.activeStreams { - s.sendQuotaPool.reset(int(v - t.streamSendQuota)) - } - t.streamSendQuota = v - } + ss = append(ss, s) return nil }) - t.controlBuf.put(&settings{ack: true}) + // The settings will be applied once the ack is sent. + t.controlBuf.put(&settings{ack: true, ss: ss}) } func (t *http2Server) handlePing(f *http2.PingFrame) { @@ -448,7 +454,9 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) for k, v := range md { - t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: v}) + for _, entry := range v { + t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry}) + } } if err := t.writeHeaders(s, t.hBuf, false); err != nil { return err @@ -481,7 +489,9 @@ func (t *http2Server) WriteStatus(s *Stream, statusCode codes.Code, statusDesc s t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-message", Value: statusDesc}) // Attach the trailer metadata. for k, v := range s.trailer { - t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: v}) + for _, entry := range v { + t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry}) + } } if err := t.writeHeaders(s, t.hBuf, true); err != nil { t.Close() @@ -587,6 +597,20 @@ func (t *http2Server) Write(s *Stream, data []byte, opts *Options) error { } +func (t *http2Server) applySettings(ss []http2.Setting) { + for _, s := range ss { + if s.ID == http2.SettingInitialWindowSize { + t.mu.Lock() + defer t.mu.Unlock() + for _, stream := range t.activeStreams { + stream.sendQuotaPool.reset(int(s.Val - t.streamSendQuota)) + } + t.streamSendQuota = s.Val + } + + } +} + // controller running in a separate goroutine takes charge of sending control // frames (e.g., window update, reset stream, setting, etc.) to the server. func (t *http2Server) controller() { @@ -602,8 +626,9 @@ func (t *http2Server) controller() { case *settings: if i.ack { t.framer.writeSettingsAck(true) + t.applySettings(i.ss) } else { - t.framer.writeSettings(true, i.setting...) + t.framer.writeSettings(true, i.ss...) } case *resetStream: t.framer.writeRSTStream(true, i.streamID, i.code) @@ -614,7 +639,7 @@ func (t *http2Server) controller() { // meaningful content when this is actually in use. t.framer.writePing(true, i.ack, [8]byte{}) default: - log.Printf("transport: http2Server.controller got unexpected item type %v\n", i) + grpclog.Printf("transport: http2Server.controller got unexpected item type %v\n", i) } t.writableChan <- 0 continue @@ -671,3 +696,7 @@ func (t *http2Server) closeStream(s *Stream) { // other goroutines. s.cancel() } + +func (t *http2Server) RemoteAddr() net.Addr { + return t.conn.RemoteAddr() +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/transport/http_util.go b/Godeps/_workspace/src/google.golang.org/grpc/transport/http_util.go index fb67a0d28b..c41892d080 100644 --- a/Godeps/_workspace/src/google.golang.org/grpc/transport/http_util.go +++ b/Godeps/_workspace/src/google.golang.org/grpc/transport/http_util.go @@ -37,19 +37,22 @@ import ( "bufio" "fmt" "io" - "log" "net" "strconv" + "strings" "sync/atomic" "time" - "github.com/bradfitz/http2" - "github.com/bradfitz/http2/hpack" + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" "google.golang.org/grpc/metadata" ) const ( + // The primary user agent + primaryUA = "grpc-go/0.11" // http2MaxFrameLen specifies the max length of a HTTP2 frame. http2MaxFrameLen = 16384 // 16KB frame // http://http2.github.io/http2-spec/#SettingValues @@ -59,32 +62,30 @@ const ( ) var ( - clientPreface = []byte(http2.ClientPreface) + clientPreface = []byte(http2.ClientPreface) + http2RSTErrConvTab = map[http2.ErrCode]codes.Code{ + http2.ErrCodeNo: codes.Internal, + http2.ErrCodeProtocol: codes.Internal, + http2.ErrCodeInternal: codes.Internal, + http2.ErrCodeFlowControl: codes.ResourceExhausted, + http2.ErrCodeSettingsTimeout: codes.Internal, + http2.ErrCodeFrameSize: codes.Internal, + http2.ErrCodeRefusedStream: codes.Unavailable, + http2.ErrCodeCancel: codes.Canceled, + http2.ErrCodeCompression: codes.Internal, + http2.ErrCodeConnect: codes.Internal, + http2.ErrCodeEnhanceYourCalm: codes.ResourceExhausted, + http2.ErrCodeInadequateSecurity: codes.PermissionDenied, + } + statusCodeConvTab = map[codes.Code]http2.ErrCode{ + codes.Internal: http2.ErrCodeInternal, + codes.Canceled: http2.ErrCodeCancel, + codes.Unavailable: http2.ErrCodeRefusedStream, + codes.ResourceExhausted: http2.ErrCodeEnhanceYourCalm, + codes.PermissionDenied: http2.ErrCodeInadequateSecurity, + } ) -var http2RSTErrConvTab = map[http2.ErrCode]codes.Code{ - http2.ErrCodeNo: codes.Internal, - http2.ErrCodeProtocol: codes.Internal, - http2.ErrCodeInternal: codes.Internal, - http2.ErrCodeFlowControl: codes.Internal, - http2.ErrCodeSettingsTimeout: codes.Internal, - http2.ErrCodeFrameSize: codes.Internal, - http2.ErrCodeRefusedStream: codes.Unavailable, - http2.ErrCodeCancel: codes.Canceled, - http2.ErrCodeCompression: codes.Internal, - http2.ErrCodeConnect: codes.Internal, - http2.ErrCodeEnhanceYourCalm: codes.ResourceExhausted, - http2.ErrCodeInadequateSecurity: codes.PermissionDenied, -} - -var statusCodeConvTab = map[codes.Code]http2.ErrCode{ - codes.Internal: http2.ErrCodeInternal, // pick an arbitrary one which is matched. - codes.Canceled: http2.ErrCodeCancel, - codes.Unavailable: http2.ErrCodeRefusedStream, - codes.ResourceExhausted: http2.ErrCodeEnhanceYourCalm, - codes.PermissionDenied: http2.ErrCodeInadequateSecurity, -} - // Records the states during HPACK decoding. Must be reset once the // decoding of the entire headers are finished. type decodeState struct { @@ -97,7 +98,7 @@ type decodeState struct { timeout time.Duration method string // key-value metadata map from the peer. - mdata map[string]string + mdata map[string][]string } // An hpackDecoder decodes HTTP2 headers which may span multiple frames. @@ -128,8 +129,7 @@ func isReservedHeader(hdr string) bool { "grpc-message", "grpc-status", "grpc-timeout", - "te", - "user-agent": + "te": return true default: return false @@ -140,6 +140,14 @@ func newHPACKDecoder() *hpackDecoder { d := &hpackDecoder{} d.h = hpack.NewDecoder(http2InitHeaderTableSize, func(f hpack.HeaderField) { switch f.Name { + case "content-type": + // TODO(zhaoq): Tentatively disable the check until a bug is fixed. + /* + if !strings.Contains(f.Value, "application/grpc") { + d.err = StreamErrorf(codes.FailedPrecondition, "transport: received the unexpected header") + return + } + */ case "grpc-status": code, err := strconv.Atoi(f.Value) if err != nil { @@ -161,15 +169,24 @@ func newHPACKDecoder() *hpackDecoder { d.state.method = f.Value default: if !isReservedHeader(f.Name) { + if f.Name == "user-agent" { + i := strings.LastIndex(f.Value, " ") + if i == -1 { + // There is no application user agent string being set. + return + } + // Extract the application user agent string. + f.Value = f.Value[:i] + } if d.state.mdata == nil { - d.state.mdata = make(map[string]string) + d.state.mdata = make(map[string][]string) } k, v, err := metadata.DecodeKeyValue(f.Name, f.Value) if err != nil { - log.Printf("Failed to decode (%q, %q): %v", f.Name, f.Value, err) + grpclog.Printf("Failed to decode (%q, %q): %v", f.Name, f.Value, err) return } - d.state.mdata[k] = v + d.state.mdata[k] = append(d.state.mdata[k], v) } } }) diff --git a/Godeps/_workspace/src/google.golang.org/grpc/transport/testdata/ca.pem b/Godeps/_workspace/src/google.golang.org/grpc/transport/testdata/ca.pem deleted file mode 100644 index 999da89775..0000000000 --- a/Godeps/_workspace/src/google.golang.org/grpc/transport/testdata/ca.pem +++ /dev/null @@ -1,14 +0,0 @@ ------BEGIN CERTIFICATE----- -MIICIzCCAYwCCQCFTbF7XNSvvjANBgkqhkiG9w0BAQUFADBWMQswCQYDVQQGEwJB -VTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0 -cyBQdHkgTHRkMQ8wDQYDVQQDDAZ0ZXN0Y2EwHhcNMTQwNzE3MjMxNzUxWhcNMjQw -NzE0MjMxNzUxWjBWMQswCQYDVQQGEwJBVTETMBEGA1UECAwKU29tZS1TdGF0ZTEh -MB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMQ8wDQYDVQQDDAZ0ZXN0 -Y2EwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAMBA3wVeTGHZR1Rye/i+J8a2 -cu5gXwFV6TnObzGM7bLFCO5i9v4mLo4iFzPsHmWDUxKS3Y8iXbu0eYBlLoNY0lSv -xDx33O+DuwMmVN+DzSD+Eod9zfvwOWHsazYCZT2PhNxnVWIuJXViY4JAHUGodjx+ -QAi6yCAurUZGvYXGgZSBAgMBAAEwDQYJKoZIhvcNAQEFBQADgYEAQoQVD8bwdtWJ -AniGBwcCfqYyH+/KpA10AcebJVVTyYbYvI9Q8d6RSVu4PZy9OALHR/QrWBdYTAyz -fNAmc2cmdkSRJzjhIaOstnQy1J+Fk0T9XyvQtq499yFbq9xogUVlEGH62xP6vH0Y -5ukK//dCPAzA11YuX2rnex0JhuTQfcI= ------END CERTIFICATE----- diff --git a/Godeps/_workspace/src/google.golang.org/grpc/transport/testdata/server1.key b/Godeps/_workspace/src/google.golang.org/grpc/transport/testdata/server1.key deleted file mode 100644 index ed00cd500e..0000000000 --- a/Godeps/_workspace/src/google.golang.org/grpc/transport/testdata/server1.key +++ /dev/null @@ -1,15 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIICWwIBAAKBgQDhwxUnKCwlSaWAwzOB2LSHVegJHv7DDWminTg4wzLLsf+LQ8nZ -bpjfn5vgIzxCuRh4Rp9QYM5FhfrJX9wcYawP/HTbJ7p7LVQO2QYAP+akMTHxgKuM -BzVV++3wWToKfVZUjFX8nfTfGMGwWAHJDnlEGnU4tl9UujoCV4ENJtzFoQIDAQAB -AoGAJ+6hpzNr24yTQZtFWQpDpEyFplddKJMOxDya3S9ppK3vTWrIITV2xNcucw7I -ceTbdyrGsyjsU0/HdCcIf9ym2jfmGLUwmyhltKVw0QYcFB0XLkc0nI5YvEYoeVDg -omZIXn1E3EW+sSIWSbkMu9bY2kstKXR2UZmMgWDtmBEPMaECQQD6yT4TAZM5hGBb -ciBKgMUP6PwOhPhOMPIvijO50Aiu6iuCV88l1QIy38gWVhxjNrq6P346j4IBg+kB -9alwpCODAkEA5nSnm9k6ykYeQWNS0fNWiRinCdl23A7usDGSuKKlm019iomJ/Rgd -MKDOp0q/2OostbteOWM2MRFf4jMH3wyVCwJAfAdjJ8szoNKTRSagSbh9vWygnB2v -IByc6l4TTuZQJRGzCveafz9lovuB3WohCABdQRd9ukCXL2CpsEpqzkafOQJAJUjc -USedDlq3zGZwYM1Yw8d8RuirBUFZNqJelYai+nRYClDkRVFgb5yksoYycbq5TxGo -VeqKOvgPpj4RWPHlLwJAGUMk3bqT91xBUCnLRs/vfoCpHpg6eywQTBDAV6xkyz4a -RH3I7/+yj3ZxR2JoWHgUwZ7lZk1VnhffFye7SBXyag== ------END RSA PRIVATE KEY----- diff --git a/Godeps/_workspace/src/google.golang.org/grpc/transport/testdata/server1.pem b/Godeps/_workspace/src/google.golang.org/grpc/transport/testdata/server1.pem deleted file mode 100644 index 8e582e571f..0000000000 --- a/Godeps/_workspace/src/google.golang.org/grpc/transport/testdata/server1.pem +++ /dev/null @@ -1,16 +0,0 @@ ------BEGIN CERTIFICATE----- -MIICmzCCAgSgAwIBAgIBAzANBgkqhkiG9w0BAQUFADBWMQswCQYDVQQGEwJBVTET -MBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0cyBQ -dHkgTHRkMQ8wDQYDVQQDDAZ0ZXN0Y2EwHhcNMTQwNzIyMDYwMDU3WhcNMjQwNzE5 -MDYwMDU3WjBkMQswCQYDVQQGEwJVUzERMA8GA1UECBMISWxsaW5vaXMxEDAOBgNV -BAcTB0NoaWNhZ28xFDASBgNVBAoTC0dvb2dsZSBJbmMuMRowGAYDVQQDFBEqLnRl -c3QuZ29vZ2xlLmNvbTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA4cMVJygs -JUmlgMMzgdi0h1XoCR7+ww1pop04OMMyy7H/i0PJ2W6Y35+b4CM8QrkYeEafUGDO -RYX6yV/cHGGsD/x02ye6ey1UDtkGAD/mpDEx8YCrjAc1Vfvt8Fk6Cn1WVIxV/J30 -3xjBsFgByQ55RBp1OLZfVLo6AleBDSbcxaECAwEAAaNrMGkwCQYDVR0TBAIwADAL -BgNVHQ8EBAMCBeAwTwYDVR0RBEgwRoIQKi50ZXN0Lmdvb2dsZS5mcoIYd2F0ZXJ6 -b29pLnRlc3QuZ29vZ2xlLmJlghIqLnRlc3QueW91dHViZS5jb22HBMCoAQMwDQYJ -KoZIhvcNAQEFBQADgYEAM2Ii0LgTGbJ1j4oqX9bxVcxm+/R5Yf8oi0aZqTJlnLYS -wXcBykxTx181s7WyfJ49WwrYXo78zTDAnf1ma0fPq3e4mpspvyndLh1a+OarHa1e -aT0DIIYk7qeEa1YcVljx2KyLd0r1BBAfrwyGaEPVeJQVYWaOJRU2we/KD4ojf9s= ------END CERTIFICATE----- diff --git a/Godeps/_workspace/src/google.golang.org/grpc/transport/transport.go b/Godeps/_workspace/src/google.golang.org/grpc/transport/transport.go index 4b52d4a2cd..963ae8df39 100644 --- a/Godeps/_workspace/src/google.golang.org/grpc/transport/transport.go +++ b/Godeps/_workspace/src/google.golang.org/grpc/transport/transport.go @@ -43,15 +43,30 @@ import ( "fmt" "io" "net" + "strings" "sync" "time" "golang.org/x/net/context" + "golang.org/x/net/trace" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/metadata" ) +// MethodFamily returns the trace family for the given method. +// It turns "/pkg.Service/GetFoo" into "pkg.Service". +func MethodFamily(m string) string { + m = strings.TrimPrefix(m, "/") // remove leading slash + if i := strings.Index(m, "/"); i >= 0 { + m = m[:i] // remove everything from second slash + } + if i := strings.LastIndex(m, "."); i >= 0 { + m = m[i+1:] // cut down to last dotted component + } + return m +} + // recvMsg represents the received msg from the transport. All transport // protocol specific info has been removed. type recvMsg struct { @@ -169,10 +184,9 @@ type Stream struct { ctx context.Context cancel context.CancelFunc // method records the associated RPC method of the stream. - method string - buf *recvBuffer - dec io.Reader - + method string + buf *recvBuffer + dec io.Reader fc *inFlow recvQuota uint32 // The accumulated inbound quota pending for window update. @@ -199,6 +213,8 @@ type Stream struct { // the status received from the server. statusCode codes.Code statusDesc string + // tracing information + tr trace.Trace } // Header acquires the key-value pairs of header metadata once it @@ -233,6 +249,11 @@ func (s *Stream) Context() context.Context { return s.ctx } +// Trace returns the trace.Trace of the stream. +func (s *Stream) Trace() trace.Trace { + return s.tr +} + // Method returns the method for the stream. func (s *Stream) Method() string { return s.method @@ -309,15 +330,20 @@ const ( // NewServerTransport creates a ServerTransport with conn or non-nil error // if it fails. -func NewServerTransport(protocol string, conn net.Conn, maxStreams uint32) (ServerTransport, error) { - return newHTTP2Server(conn, maxStreams) +func NewServerTransport(protocol string, conn net.Conn, maxStreams uint32, authInfo credentials.AuthInfo, tracing bool) (ServerTransport, error) { + return newHTTP2Server(conn, maxStreams, authInfo, tracing) } // ConnectOptions covers all relevant options for dialing a server. type ConnectOptions struct { - Dialer func(string, time.Duration) (net.Conn, error) + // UserAgent is the application user agent. + UserAgent string + // Dialer specifies how to dial a network address. + Dialer func(string, time.Duration) (net.Conn, error) + // AuthOptions stores the credentials required to setup a client connection and/or issue RPCs. AuthOptions []credentials.Credentials - Timeout time.Duration + // Timeout specifies the timeout for dialing a client connection. + Timeout time.Duration } // NewClientTransport establishes the transport with the required ConnectOptions @@ -386,6 +412,8 @@ type ServerTransport interface { // should not be accessed any more. All the pending streams and their // handlers will be terminated asynchronously. Close() error + // RemoteAddr returns the remote network address. + RemoteAddr() net.Addr } // StreamErrorf creates an StreamError with the specified error code and description. diff --git a/Godeps/_workspace/src/google.golang.org/grpc/transport/transport_test.go b/Godeps/_workspace/src/google.golang.org/grpc/transport/transport_test.go deleted file mode 100644 index ca423c9160..0000000000 --- a/Godeps/_workspace/src/google.golang.org/grpc/transport/transport_test.go +++ /dev/null @@ -1,590 +0,0 @@ -/* - * - * Copyright 2014, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -package transport - -import ( - "bytes" - "io" - "log" - "math" - "net" - "reflect" - "strconv" - "sync" - "testing" - "time" - - "github.com/bradfitz/http2" - "golang.org/x/net/context" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials" -) - -type server struct { - lis net.Listener - port string - // channel to signal server is ready to serve. - readyChan chan bool - mu sync.Mutex - conns map[ServerTransport]bool -} - -var ( - tlsDir = "testdata/" - expectedRequest = []byte("ping") - expectedResponse = []byte("pong") - expectedRequestLarge = make([]byte, initialWindowSize*2) - expectedResponseLarge = make([]byte, initialWindowSize*2) -) - -type testStreamHandler struct { - t ServerTransport -} - -type hType int - -const ( - normal hType = iota - suspended - misbehaved -) - -func (h *testStreamHandler) handleStream(s *Stream) { - req := expectedRequest - resp := expectedResponse - if s.Method() == "foo.Large" { - req = expectedRequestLarge - resp = expectedResponseLarge - } - p := make([]byte, len(req)) - _, err := io.ReadFull(s, p) - if err != nil || !bytes.Equal(p, req) { - if err == ErrConnClosing { - return - } - log.Fatalf("handleStream got error: %v, want ; result: %v, want %v", err, p, req) - } - // send a response back to the client. - h.t.Write(s, resp, &Options{}) - // send the trailer to end the stream. - h.t.WriteStatus(s, codes.OK, "") -} - -// handleStreamSuspension blocks until s.ctx is canceled. -func (h *testStreamHandler) handleStreamSuspension(s *Stream) { - <-s.ctx.Done() -} - -func (h *testStreamHandler) handleStreamMisbehave(s *Stream) { - conn, ok := s.ServerTransport().(*http2Server) - if !ok { - log.Fatalf("Failed to convert %v to *http2Server", s.ServerTransport()) - } - size := 1 - if s.Method() == "foo.MaxFrame" { - size = http2MaxFrameLen - } - // Drain the client side stream flow control window. - var sent int - for sent <= initialWindowSize { - <-conn.writableChan - if err := conn.framer.writeData(true, s.id, false, make([]byte, size)); err != nil { - conn.writableChan <- 0 - break - } - conn.writableChan <- 0 - sent += size - } -} - -// start starts server. Other goroutines should block on s.readyChan for futher operations. -func (s *server) start(useTLS bool, port int, maxStreams uint32, ht hType) { - var err error - if port == 0 { - s.lis, err = net.Listen("tcp", ":0") - } else { - s.lis, err = net.Listen("tcp", ":"+strconv.Itoa(port)) - } - if err != nil { - log.Fatalf("failed to listen: %v", err) - } - if useTLS { - creds, err := credentials.NewServerTLSFromFile(tlsDir+"server1.pem", tlsDir+"server1.key") - if err != nil { - log.Fatalf("Failed to generate credentials %v", err) - } - s.lis = creds.NewListener(s.lis) - } - _, p, err := net.SplitHostPort(s.lis.Addr().String()) - if err != nil { - log.Fatalf("failed to parse listener address: %v", err) - } - s.port = p - s.conns = make(map[ServerTransport]bool) - if s.readyChan != nil { - close(s.readyChan) - } - for { - conn, err := s.lis.Accept() - if err != nil { - return - } - t, err := NewServerTransport("http2", conn, maxStreams) - if err != nil { - return - } - s.mu.Lock() - if s.conns == nil { - s.mu.Unlock() - t.Close() - return - } - s.conns[t] = true - s.mu.Unlock() - h := &testStreamHandler{t} - switch ht { - case suspended: - go t.HandleStreams(h.handleStreamSuspension) - case misbehaved: - go t.HandleStreams(h.handleStreamMisbehave) - default: - go t.HandleStreams(h.handleStream) - } - } -} - -func (s *server) wait(t *testing.T, timeout time.Duration) { - select { - case <-s.readyChan: - case <-time.After(timeout): - t.Fatalf("Timed out after %v waiting for server to be ready", timeout) - } -} - -func (s *server) stop() { - s.lis.Close() - s.mu.Lock() - for c := range s.conns { - c.Close() - } - s.conns = nil - s.mu.Unlock() -} - -func setUp(t *testing.T, useTLS bool, port int, maxStreams uint32, ht hType) (*server, ClientTransport) { - server := &server{readyChan: make(chan bool)} - go server.start(useTLS, port, maxStreams, ht) - server.wait(t, 2*time.Second) - addr := "localhost:" + server.port - var ( - ct ClientTransport - connErr error - ) - if useTLS { - creds, err := credentials.NewClientTLSFromFile(tlsDir+"ca.pem", "x.test.youtube.com") - if err != nil { - t.Fatalf("Failed to create credentials %v", err) - } - dopts := ConnectOptions{ - AuthOptions: []credentials.Credentials{creds}, - } - ct, connErr = NewClientTransport(addr, &dopts) - } else { - ct, connErr = NewClientTransport(addr, &ConnectOptions{}) - } - if connErr != nil { - t.Fatalf("failed to create transport: %v", connErr) - } - return server, ct -} - -func TestClientSendAndReceive(t *testing.T) { - server, ct := setUp(t, true, 0, math.MaxUint32, normal) - callHdr := &CallHdr{ - Host: "localhost", - Method: "foo.Small", - } - s1, err1 := ct.NewStream(context.Background(), callHdr) - if err1 != nil { - t.Fatalf("failed to open stream: %v", err1) - } - if s1.id != 1 { - t.Fatalf("wrong stream id: %d", s1.id) - } - s2, err2 := ct.NewStream(context.Background(), callHdr) - if err2 != nil { - t.Fatalf("failed to open stream: %v", err2) - } - if s2.id != 3 { - t.Fatalf("wrong stream id: %d", s2.id) - } - opts := Options{ - Last: true, - Delay: false, - } - if err := ct.Write(s1, expectedRequest, &opts); err != nil { - t.Fatalf("failed to send data: %v", err) - } - p := make([]byte, len(expectedResponse)) - _, recvErr := io.ReadFull(s1, p) - if recvErr != nil || !bytes.Equal(p, expectedResponse) { - t.Fatalf("Error: %v, want ; Result: %v, want %v", recvErr, p, expectedResponse) - } - _, recvErr = io.ReadFull(s1, p) - if recvErr != io.EOF { - t.Fatalf("Error: %v; want ", recvErr) - } - ct.Close() - server.stop() -} - -func TestClientErrorNotify(t *testing.T) { - server, ct := setUp(t, true, 0, math.MaxUint32, normal) - go server.stop() - // ct.reader should detect the error and activate ct.Error(). - <-ct.Error() - ct.Close() -} - -func performOneRPC(ct ClientTransport) { - callHdr := &CallHdr{ - Host: "localhost", - Method: "foo.Small", - } - s, err := ct.NewStream(context.Background(), callHdr) - if err != nil { - return - } - opts := Options{ - Last: true, - Delay: false, - } - if err := ct.Write(s, expectedRequest, &opts); err == nil { - time.Sleep(5 * time.Millisecond) - // The following s.Recv()'s could error out because the - // underlying transport is gone. - // - // Read response - p := make([]byte, len(expectedResponse)) - io.ReadFull(s, p) - // Read io.EOF - io.ReadFull(s, p) - } -} - -func TestClientMix(t *testing.T) { - s, ct := setUp(t, true, 0, math.MaxUint32, normal) - go func(s *server) { - time.Sleep(5 * time.Second) - s.stop() - }(s) - go func(ct ClientTransport) { - <-ct.Error() - ct.Close() - }(ct) - for i := 0; i < 1000; i++ { - time.Sleep(10 * time.Millisecond) - go performOneRPC(ct) - } -} - -func TestExceedMaxStreamsLimit(t *testing.T) { - server, ct := setUp(t, true, 0, 1, normal) - defer func() { - ct.Close() - server.stop() - }() - callHdr := &CallHdr{ - Host: "localhost", - Method: "foo.Small", - } - // Creates the 1st stream and keep it alive. - _, err1 := ct.NewStream(context.Background(), callHdr) - if err1 != nil { - t.Fatalf("failed to open stream: %v", err1) - } - // Creates the 2nd stream. It has chance to succeed when the settings - // frame from the server has not received at the client. - s, err2 := ct.NewStream(context.Background(), callHdr) - if err2 != nil { - se, ok := err2.(StreamError) - if !ok { - t.Fatalf("Received unexpected error %v", err2) - } - if se.Code != codes.Unavailable { - t.Fatalf("Got error code: %d, want: %d", se.Code, codes.Unavailable) - } - return - } - // If the 2nd stream is created successfully, sends the request. - if err := ct.Write(s, expectedRequest, &Options{Last: true, Delay: false}); err != nil { - t.Fatalf("failed to send data: %v", err) - } - // The 2nd stream was rejected by the server via a reset. - p := make([]byte, len(expectedResponse)) - _, recvErr := io.ReadFull(s, p) - if recvErr != io.EOF || s.StatusCode() != codes.Unavailable { - t.Fatalf("Error: %v, StatusCode: %d; want , %d", recvErr, s.StatusCode(), codes.Unavailable) - } - // Server's setting has been received. From now on, new stream will be rejected instantly. - _, err3 := ct.NewStream(context.Background(), callHdr) - if err3 == nil { - t.Fatalf("Received unexpected , want an error with code %d", codes.Unavailable) - } - if se, ok := err3.(StreamError); !ok || se.Code != codes.Unavailable { - t.Fatalf("Got: %v, want a StreamError with error code %d", err3, codes.Unavailable) - } -} - -func TestLargeMessage(t *testing.T) { - server, ct := setUp(t, true, 0, math.MaxUint32, normal) - callHdr := &CallHdr{ - Host: "localhost", - Method: "foo.Large", - } - var wg sync.WaitGroup - for i := 0; i < 2; i++ { - wg.Add(1) - go func() { - s, err := ct.NewStream(context.Background(), callHdr) - if err != nil { - t.Fatalf("failed to open stream: %v", err) - } - if err := ct.Write(s, expectedRequestLarge, &Options{Last: true, Delay: false}); err != nil { - t.Fatalf("failed to send data: %v", err) - } - p := make([]byte, len(expectedResponseLarge)) - _, recvErr := io.ReadFull(s, p) - if recvErr != nil || !bytes.Equal(p, expectedResponseLarge) { - t.Fatalf("Error: %v, want ; Result len: %d, want len %d", recvErr, len(p), len(expectedResponseLarge)) - } - _, recvErr = io.ReadFull(s, p) - if recvErr != io.EOF { - t.Fatalf("Error: %v; want ", recvErr) - } - wg.Done() - }() - } - wg.Wait() - ct.Close() - server.stop() -} - -func TestLargeMessageSuspension(t *testing.T) { - server, ct := setUp(t, true, 0, math.MaxUint32, suspended) - callHdr := &CallHdr{ - Host: "localhost", - Method: "foo.Large", - } - // Set a long enough timeout for writing a large message out. - ctx, _ := context.WithTimeout(context.Background(), time.Second) - s, err := ct.NewStream(ctx, callHdr) - if err != nil { - t.Fatalf("failed to open stream: %v", err) - } - // Write should not be done successfully due to flow control. - err = ct.Write(s, expectedRequestLarge, &Options{Last: true, Delay: false}) - expectedErr := StreamErrorf(codes.DeadlineExceeded, "%v", context.DeadlineExceeded) - if err == nil || err != expectedErr { - t.Fatalf("Write got %v, want %v", err, expectedErr) - } - ct.Close() - server.stop() -} - -func TestServerWithMisbehavedClient(t *testing.T) { - server, ct := setUp(t, true, 0, math.MaxUint32, suspended) - callHdr := &CallHdr{ - Host: "localhost", - Method: "foo", - } - var sc *http2Server - // Wait until the server transport is setup. - for { - server.mu.Lock() - if len(server.conns) == 0 { - server.mu.Unlock() - time.Sleep(time.Millisecond) - continue - } - for k, _ := range server.conns { - var ok bool - sc, ok = k.(*http2Server) - if !ok { - t.Fatalf("Failed to convert %v to *http2Server", k) - } - } - server.mu.Unlock() - break - } - cc, ok := ct.(*http2Client) - if !ok { - t.Fatalf("Failed to convert %v to *http2Client", ct) - } - // Test server behavior for violation of stream flow control window size restriction. - s, err := ct.NewStream(context.Background(), callHdr) - if err != nil { - t.Fatalf("Failed to open stream: %v", err) - } - var sent int - // Drain the stream flow control window - <-cc.writableChan - if err = cc.framer.writeData(true, s.id, false, make([]byte, http2MaxFrameLen)); err != nil { - t.Fatalf("Failed to write data: %v", err) - } - cc.writableChan <- 0 - sent += http2MaxFrameLen - // Wait until the server creates the corresponding stream and receive some data. - var ss *Stream - for { - time.Sleep(time.Millisecond) - sc.mu.Lock() - if len(sc.activeStreams) == 0 { - sc.mu.Unlock() - continue - } - ss = sc.activeStreams[s.id] - sc.mu.Unlock() - ss.fc.mu.Lock() - if ss.fc.pendingData > 0 { - ss.fc.mu.Unlock() - break - } - ss.fc.mu.Unlock() - } - if ss.fc.pendingData != http2MaxFrameLen || ss.fc.pendingUpdate != 0 || sc.fc.pendingData != http2MaxFrameLen || sc.fc.pendingUpdate != 0 { - t.Fatalf("Server mistakenly updates inbound flow control params: got %d, %d, %d, %d; want %d, %d, %d, %d", ss.fc.pendingData, ss.fc.pendingUpdate, sc.fc.pendingData, sc.fc.pendingUpdate, http2MaxFrameLen, 0, http2MaxFrameLen, 0) - } - // Keep sending until the server inbound window is drained for that stream. - for sent <= initialWindowSize { - <-cc.writableChan - if err = cc.framer.writeData(true, s.id, false, make([]byte, 1)); err != nil { - t.Fatalf("Failed to write data: %v", err) - } - cc.writableChan <- 0 - sent += 1 - } - // Server sent a resetStream for s already. - code := http2RSTErrConvTab[http2.ErrCodeFlowControl] - if _, err := io.ReadFull(s, make([]byte, 1)); err != io.EOF || s.statusCode != code { - t.Fatalf("%v got err %v with statusCode %d, want err with statusCode %d", s, err, s.statusCode, code) - } - - if ss.fc.pendingData != 0 || ss.fc.pendingUpdate != 0 || sc.fc.pendingData != 0 || sc.fc.pendingUpdate != initialWindowSize { - t.Fatalf("Server mistakenly resets inbound flow control params: got %d, %d, %d, %d; want 0, 0, 0, %d", ss.fc.pendingData, ss.fc.pendingUpdate, sc.fc.pendingData, sc.fc.pendingUpdate, initialWindowSize) - } - ct.CloseStream(s, nil) - // Test server behavior for violation of connection flow control window size restriction. - // - // Keep creating new streams until the connection window is drained on the server and - // the server tears down the connection. - for { - s, err := ct.NewStream(context.Background(), callHdr) - if err != nil { - // The server tears down the connection. - break - } - <-cc.writableChan - cc.framer.writeData(true, s.id, true, make([]byte, http2MaxFrameLen)) - cc.writableChan <- 0 - } - ct.Close() - server.stop() -} - -func TestClientWithMisbehavedServer(t *testing.T) { - server, ct := setUp(t, true, 0, math.MaxUint32, misbehaved) - callHdr := &CallHdr{ - Host: "localhost", - Method: "foo", - } - conn, ok := ct.(*http2Client) - if !ok { - t.Fatalf("Failed to convert %v to *http2Client", ct) - } - // Test the logic for the violation of stream flow control window size restriction. - s, err := ct.NewStream(context.Background(), callHdr) - if err != nil { - t.Fatalf("Failed to open stream: %v", err) - } - if err := ct.Write(s, expectedRequest, &Options{Last: true, Delay: false}); err != nil { - t.Fatalf("Failed to write: %v", err) - } - // Read without window update. - for { - p := make([]byte, http2MaxFrameLen) - if _, err = s.dec.Read(p); err != nil { - break - } - } - if s.fc.pendingData != initialWindowSize || s.fc.pendingUpdate != 0 || conn.fc.pendingData != initialWindowSize || conn.fc.pendingUpdate != 0 { - t.Fatalf("Client mistakenly updates inbound flow control params: got %d, %d, %d, %d; want %d, %d, %d, %d", s.fc.pendingData, s.fc.pendingUpdate, conn.fc.pendingData, conn.fc.pendingUpdate, initialWindowSize, 0, initialWindowSize, 0) - } - if err != io.EOF || s.statusCode != codes.Internal { - t.Fatalf("Got err %v and the status code %d, want and the code %d", err, s.statusCode, codes.Internal) - } - conn.CloseStream(s, err) - if s.fc.pendingData != 0 || s.fc.pendingUpdate != 0 || conn.fc.pendingData != 0 || conn.fc.pendingUpdate != initialWindowSize { - t.Fatalf("Client mistakenly resets inbound flow control params: got %d, %d, %d, %d; want 0, 0, 0, %d", s.fc.pendingData, s.fc.pendingUpdate, conn.fc.pendingData, conn.fc.pendingUpdate, initialWindowSize) - } - // Test the logic for the violation of the connection flow control window size restriction. - // - // Generate enough streams to drain the connection window. - callHdr = &CallHdr{ - Host: "localhost", - Method: "foo.MaxFrame", - } - for i := 0; i < int(initialConnWindowSize/initialWindowSize+10); i++ { - s, err := ct.NewStream(context.Background(), callHdr) - if err != nil { - t.Fatalf("Failed to open stream: %v", err) - } - if err := ct.Write(s, expectedRequest, &Options{Last: true, Delay: false}); err != nil { - break - } - } - // http2Client.errChan is closed due to connection flow control window size violation. - <-conn.Error() - ct.Close() - server.stop() -} - -func TestStreamContext(t *testing.T) { - expectedStream := Stream{} - ctx := newContextWithStream(context.Background(), &expectedStream) - s, ok := StreamFromContext(ctx) - if !ok || !reflect.DeepEqual(expectedStream, *s) { - t.Fatalf("GetStreamFromContext(%v) = %v, %t, want: %v, true", ctx, *s, ok, expectedStream) - } -} diff --git a/cmd/notary-signer/main.go b/cmd/notary-signer/main.go index ef89f59ac1..b4d1c227fb 100644 --- a/cmd/notary-signer/main.go +++ b/cmd/notary-signer/main.go @@ -13,11 +13,12 @@ import ( "os" "path/filepath" "strings" + "time" "google.golang.org/grpc" "google.golang.org/grpc/credentials" - _ "github.com/docker/distribution/health" + "github.com/docker/distribution/health" "github.com/docker/notary/cryptoservice" "github.com/docker/notary/signer" "github.com/docker/notary/signer/api" @@ -147,6 +148,10 @@ func main() { if err != nil { log.Fatalf("failed to create a new keydbstore: %v", err) } + + health.RegisterPeriodicFunc( + "DB operational", keyStore.HealthCheck, time.Second*60) + cryptoService := cryptoservice.NewCryptoService("", keyStore) cryptoServices[data.ED25519Key] = cryptoService @@ -156,10 +161,6 @@ func main() { kms := &api.KeyManagementServer{CryptoServices: cryptoServices} ss := &api.SignerServer{CryptoServices: cryptoServices} - grpcServer := grpc.NewServer() - pb.RegisterKeyManagementServer(grpcServer, kms) - pb.RegisterSignerServer(grpcServer, ss) - rpcAddr := viper.GetString("server.grpc_addr") lis, err := net.Listen("tcp", rpcAddr) if err != nil { @@ -169,7 +170,13 @@ func main() { if err != nil { log.Fatalf("failed to generate credentials %v", err) } - go grpcServer.Serve(creds.NewListener(lis)) + opts := []grpc.ServerOption{grpc.Creds(creds)} + grpcServer := grpc.NewServer(opts...) + + pb.RegisterKeyManagementServer(grpcServer, kms) + pb.RegisterSignerServer(grpcServer, ss) + + go grpcServer.Serve(lis) httpAddr := viper.GetString("server.http_addr") if httpAddr == "" { diff --git a/proto/signer.pb.go b/proto/signer.pb.go index c7ccd824ff..bf74faadab 100644 --- a/proto/signer.pb.go +++ b/proto/signer.pb.go @@ -16,22 +16,23 @@ It has these top-level messages: Signature SignatureRequest Void + HealthStatus */ package proto import proto1 "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" import ( context "golang.org/x/net/context" grpc "google.golang.org/grpc" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - // Reference imports to suppress errors if they are not otherwise used. var _ = proto1.Marshal +var _ = fmt.Errorf +var _ = math.Inf // KeyInfo holds a KeyID that is used to reference the key and it's algorithm type KeyInfo struct { @@ -59,7 +60,7 @@ func (m *KeyInfo) GetAlgorithm() *Algorithm { // KeyID holds an ID that is used to reference the key type KeyID struct { - ID string `protobuf:"bytes,1,opt" json:"ID,omitempty"` + ID string `protobuf:"bytes,1,opt,name=ID" json:"ID,omitempty"` } func (m *KeyID) Reset() { *m = KeyID{} } @@ -142,6 +143,26 @@ func (m *Void) Reset() { *m = Void{} } func (m *Void) String() string { return proto1.CompactTextString(m) } func (*Void) ProtoMessage() {} +// A mapping of health check name to the check result message +type HealthStatus struct { + Status map[string]string `protobuf:"bytes,1,rep,name=status" json:"status,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *HealthStatus) Reset() { *m = HealthStatus{} } +func (m *HealthStatus) String() string { return proto1.CompactTextString(m) } +func (*HealthStatus) ProtoMessage() {} + +func (m *HealthStatus) GetStatus() map[string]string { + if m != nil { + return m.Status + } + return nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + // Client API for KeyManagement service type KeyManagementClient interface { @@ -151,6 +172,8 @@ type KeyManagementClient interface { DeleteKey(ctx context.Context, in *KeyID, opts ...grpc.CallOption) (*Void, error) // GetKeyInfo returns the PublicKey associated with a KeyID GetKeyInfo(ctx context.Context, in *KeyID, opts ...grpc.CallOption) (*PublicKey, error) + // CheckHealth returns the HealthStatus with the service + CheckHealth(ctx context.Context, in *Void, opts ...grpc.CallOption) (*HealthStatus, error) } type keyManagementClient struct { @@ -188,6 +211,15 @@ func (c *keyManagementClient) GetKeyInfo(ctx context.Context, in *KeyID, opts .. return out, nil } +func (c *keyManagementClient) CheckHealth(ctx context.Context, in *Void, opts ...grpc.CallOption) (*HealthStatus, error) { + out := new(HealthStatus) + err := grpc.Invoke(ctx, "/proto.KeyManagement/CheckHealth", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // Server API for KeyManagement service type KeyManagementServer interface { @@ -197,15 +229,17 @@ type KeyManagementServer interface { DeleteKey(context.Context, *KeyID) (*Void, error) // GetKeyInfo returns the PublicKey associated with a KeyID GetKeyInfo(context.Context, *KeyID) (*PublicKey, error) + // CheckHealth returns the HealthStatus with the service + CheckHealth(context.Context, *Void) (*HealthStatus, error) } func RegisterKeyManagementServer(s *grpc.Server, srv KeyManagementServer) { s.RegisterService(&_KeyManagement_serviceDesc, srv) } -func _KeyManagement_CreateKey_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { +func _KeyManagement_CreateKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { in := new(Algorithm) - if err := codec.Unmarshal(buf, in); err != nil { + if err := dec(in); err != nil { return nil, err } out, err := srv.(KeyManagementServer).CreateKey(ctx, in) @@ -215,9 +249,9 @@ func _KeyManagement_CreateKey_Handler(srv interface{}, ctx context.Context, code return out, nil } -func _KeyManagement_DeleteKey_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { +func _KeyManagement_DeleteKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { in := new(KeyID) - if err := codec.Unmarshal(buf, in); err != nil { + if err := dec(in); err != nil { return nil, err } out, err := srv.(KeyManagementServer).DeleteKey(ctx, in) @@ -227,9 +261,9 @@ func _KeyManagement_DeleteKey_Handler(srv interface{}, ctx context.Context, code return out, nil } -func _KeyManagement_GetKeyInfo_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { +func _KeyManagement_GetKeyInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { in := new(KeyID) - if err := codec.Unmarshal(buf, in); err != nil { + if err := dec(in); err != nil { return nil, err } out, err := srv.(KeyManagementServer).GetKeyInfo(ctx, in) @@ -239,6 +273,18 @@ func _KeyManagement_GetKeyInfo_Handler(srv interface{}, ctx context.Context, cod return out, nil } +func _KeyManagement_CheckHealth_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { + in := new(Void) + if err := dec(in); err != nil { + return nil, err + } + out, err := srv.(KeyManagementServer).CheckHealth(ctx, in) + if err != nil { + return nil, err + } + return out, nil +} + var _KeyManagement_serviceDesc = grpc.ServiceDesc{ ServiceName: "proto.KeyManagement", HandlerType: (*KeyManagementServer)(nil), @@ -255,6 +301,10 @@ var _KeyManagement_serviceDesc = grpc.ServiceDesc{ MethodName: "GetKeyInfo", Handler: _KeyManagement_GetKeyInfo_Handler, }, + { + MethodName: "CheckHealth", + Handler: _KeyManagement_CheckHealth_Handler, + }, }, Streams: []grpc.StreamDesc{}, } @@ -264,6 +314,8 @@ var _KeyManagement_serviceDesc = grpc.ServiceDesc{ type SignerClient interface { // Sign calculates a cryptographic signature using the Key associated with a KeyID and returns the signature Sign(ctx context.Context, in *SignatureRequest, opts ...grpc.CallOption) (*Signature, error) + // CheckHealth returns the HealthStatus with the service + CheckHealth(ctx context.Context, in *Void, opts ...grpc.CallOption) (*HealthStatus, error) } type signerClient struct { @@ -283,20 +335,31 @@ func (c *signerClient) Sign(ctx context.Context, in *SignatureRequest, opts ...g return out, nil } +func (c *signerClient) CheckHealth(ctx context.Context, in *Void, opts ...grpc.CallOption) (*HealthStatus, error) { + out := new(HealthStatus) + err := grpc.Invoke(ctx, "/proto.Signer/CheckHealth", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // Server API for Signer service type SignerServer interface { // Sign calculates a cryptographic signature using the Key associated with a KeyID and returns the signature Sign(context.Context, *SignatureRequest) (*Signature, error) + // CheckHealth returns the HealthStatus with the service + CheckHealth(context.Context, *Void) (*HealthStatus, error) } func RegisterSignerServer(s *grpc.Server, srv SignerServer) { s.RegisterService(&_Signer_serviceDesc, srv) } -func _Signer_Sign_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { +func _Signer_Sign_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { in := new(SignatureRequest) - if err := codec.Unmarshal(buf, in); err != nil { + if err := dec(in); err != nil { return nil, err } out, err := srv.(SignerServer).Sign(ctx, in) @@ -306,6 +369,18 @@ func _Signer_Sign_Handler(srv interface{}, ctx context.Context, codec grpc.Codec return out, nil } +func _Signer_CheckHealth_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { + in := new(Void) + if err := dec(in); err != nil { + return nil, err + } + out, err := srv.(SignerServer).CheckHealth(ctx, in) + if err != nil { + return nil, err + } + return out, nil +} + var _Signer_serviceDesc = grpc.ServiceDesc{ ServiceName: "proto.Signer", HandlerType: (*SignerServer)(nil), @@ -314,6 +389,10 @@ var _Signer_serviceDesc = grpc.ServiceDesc{ MethodName: "Sign", Handler: _Signer_Sign_Handler, }, + { + MethodName: "CheckHealth", + Handler: _Signer_CheckHealth_Handler, + }, }, Streams: []grpc.StreamDesc{}, } diff --git a/proto/signer.proto b/proto/signer.proto index 5278bc26cb..5421988b8d 100644 --- a/proto/signer.proto +++ b/proto/signer.proto @@ -13,12 +13,18 @@ service KeyManagement { // GetKeyInfo returns the PublicKey associated with a KeyID rpc GetKeyInfo(KeyID) returns (PublicKey) {} + + // CheckHealth returns the HealthStatus with the service + rpc CheckHealth(Void) returns (HealthStatus) {} } // Signer Interface service Signer { // Sign calculates a cryptographic signature using the Key associated with a KeyID and returns the signature rpc Sign(SignatureRequest) returns (Signature) {} + + // CheckHealth returns the HealthStatus with the service + rpc CheckHealth(Void) returns (HealthStatus) {} } // KeyInfo holds a KeyID that is used to reference the key and it's algorithm @@ -59,3 +65,8 @@ message SignatureRequest { // Void represents an empty message type message Void { } + +// A mapping of health check name to the check result message +message HealthStatus { + map status = 1; +} diff --git a/signer/api/rpc_api.go b/signer/api/rpc_api.go index de1090c30e..c3f16e6e54 100644 --- a/signer/api/rpc_api.go +++ b/signer/api/rpc_api.go @@ -18,11 +18,13 @@ import ( //KeyManagementServer implements the KeyManagementServer grpc interface type KeyManagementServer struct { CryptoServices signer.CryptoServiceIndex + HealthChecker func() map[string]string } //SignerServer implements the SignerServer grpc interface type SignerServer struct { CryptoServices signer.CryptoServiceIndex + HealthChecker func() map[string]string } //CreateKey returns a PublicKey created using KeyManagementServer's SigningService @@ -106,6 +108,13 @@ func (s *KeyManagementServer) GetKeyInfo(ctx context.Context, keyID *pb.KeyID) ( }, nil } +//CheckHealth returns the HealthStatus with the service +func (s *KeyManagementServer) CheckHealth(ctx context.Context, v *pb.Void) (*pb.HealthStatus, error) { + return &pb.HealthStatus{ + Status: s.HealthChecker(), + }, nil +} + //Sign signs a message and returns the signature using a private key associate with the KeyID from the SignatureRequest func (s *SignerServer) Sign(ctx context.Context, sr *pb.SignatureRequest) (*pb.Signature, error) { tufKey, service, err := FindKeyByID(s.CryptoServices, sr.KeyID) @@ -136,3 +145,10 @@ func (s *SignerServer) Sign(ctx context.Context, sr *pb.SignatureRequest) (*pb.S return signature, nil } + +//CheckHealth returns the HealthStatus with the service +func (s *SignerServer) CheckHealth(ctx context.Context, v *pb.Void) (*pb.HealthStatus, error) { + return &pb.HealthStatus{ + Status: s.HealthChecker(), + }, nil +} diff --git a/signer/api/rpc_api_test.go b/signer/api/rpc_api_test.go index 4021c7dae9..9734af35ed 100644 --- a/signer/api/rpc_api_test.go +++ b/signer/api/rpc_api_test.go @@ -26,6 +26,10 @@ var ( grpcServer *grpc.Server void *pb.Void pr passphrase.Retriever + health = map[string]string{ + "db": "ok", + "other": "not ok", + } ) func init() { @@ -34,9 +38,16 @@ func init() { cryptoService := cryptoservice.NewCryptoService("", keyStore) cryptoServices := signer.CryptoServiceIndex{data.ED25519Key: cryptoService, data.RSAKey: cryptoService, data.ECDSAKey: cryptoService} void = &pb.Void{} + + fakeHealth := func() map[string]string { + return health + } + //server setup - kms := &api.KeyManagementServer{CryptoServices: cryptoServices} - ss := &api.SignerServer{CryptoServices: cryptoServices} + kms := &api.KeyManagementServer{CryptoServices: cryptoServices, + HealthChecker: fakeHealth} + ss := &api.SignerServer{CryptoServices: cryptoServices, + HealthChecker: fakeHealth} grpcServer = grpc.NewServer() pb.RegisterKeyManagementServer(grpcServer, kms) pb.RegisterSignerServer(grpcServer, ss) @@ -48,10 +59,11 @@ func init() { go grpcServer.Serve(lis) //client setup - conn, err := grpc.Dial("127.0.0.1:7899") + conn, err := grpc.Dial("127.0.0.1:7899", grpc.WithInsecure()) if err != nil { log.Fatalf("fail to dial: %v", err) } + kmClient = pb.NewKeyManagementClient(conn) sClient = pb.NewSignerClient(conn) } @@ -140,3 +152,13 @@ func TestSignReturnsNotFoundOnNonexistKeys(t *testing.T) { assert.Equal(t, grpc.Code(err), codes.NotFound) assert.Nil(t, ret) } + +func TestHealthChecksForServices(t *testing.T) { + sHealthStatus, err := sClient.CheckHealth(context.Background(), void) + assert.Nil(t, err) + assert.Equal(t, health, sHealthStatus.Status) + + kmHealthStatus, err := kmClient.CheckHealth(context.Background(), void) + assert.Nil(t, err) + assert.Equal(t, health, kmHealthStatus.Status) +} diff --git a/signer/keydbstore.go b/signer/keydbstore.go index 72b3e65f1a..a7fa5aa1ca 100644 --- a/signer/keydbstore.go +++ b/signer/keydbstore.go @@ -195,3 +195,17 @@ func (s *KeyDBStore) RotateKeyPassphrase(name, newPassphraseAlias string) error return nil } + +// HealthCheck verifies that DB exists and is query-able +func (s *KeyDBStore) HealthCheck() error { + dbPrivateKey := GormPrivateKey{} + tableOk := s.db.HasTable(&dbPrivateKey) + switch { + case s.db.Error != nil: + return s.db.Error + case !tableOk: + return fmt.Errorf( + "Cannot access table: %s", dbPrivateKey.TableName()) + } + return nil +} diff --git a/signer/keydbstore_test.go b/signer/keydbstore_test.go index 10e798a5c6..8e40a8be60 100644 --- a/signer/keydbstore_test.go +++ b/signer/keydbstore_test.go @@ -161,3 +161,35 @@ func TestKeyRotation(t *testing.T) { err = dbStore.RotateKeyPassphrase(testKey.ID(), "alias_3") assert.Error(t, err, "password alias no found") } + +func TestDBHealthCheck(t *testing.T) { + tempBaseDir, err := ioutil.TempDir("", "notary-test-") + defer os.RemoveAll(tempBaseDir) + + // We are using SQLite for the tests + db, err := sql.Open("sqlite3", tempBaseDir+"test_db") + assert.NoError(t, err) + + // Create a new KeyDB store + dbStore, err := NewKeyDBStore(retriever, "", "sqlite3", db) + assert.NoError(t, err) + + // No key table, health check fails + err = dbStore.HealthCheck() + assert.Error(t, err, "Cannot access table:") + + // Ensure that the private_key table exists + dbStore.db.CreateTable(&GormPrivateKey{}) + + // Heath check success because the table exists + err = dbStore.HealthCheck() + assert.NoError(t, err) + + // Close the connection + err = dbStore.db.Close() + assert.NoError(t, err) + + // Heath check fail because the connection is closed + err = dbStore.HealthCheck() + assert.Error(t, err, "Cannot access table:") +}