Merge pull request #205 from cyli/signer-healthcheck

Adding a DB check to the signer health check
This commit is contained in:
Ying Li 2015-10-08 16:53:57 -07:00
commit d092f78f05
206 changed files with 8277 additions and 27629 deletions

24
Godeps/Godeps.json generated
View File

@ -1,6 +1,6 @@
{
"ImportPath": "github.com/docker/notary",
"GoVersion": "go1.4.2",
"GoVersion": "go1.5.1",
"Packages": [
"./..."
],
@ -28,10 +28,6 @@
"ImportPath": "github.com/agl/ed25519",
"Rev": "d2b94fd789ea21d12fac1a4443dd3a3f79cda72c"
},
{
"ImportPath": "github.com/bradfitz/http2",
"Rev": "97124afb234048ae0c91b8883c59fcd890bf8145"
},
{
"ImportPath": "github.com/bugsnag/bugsnag-go",
"Comment": "v1.0.4-2-g13fd6b8",
@ -119,7 +115,7 @@
},
{
"ImportPath": "github.com/golang/protobuf/proto",
"Rev": "655cdfa588ea190e901bc5590e65d5621688847c"
"Rev": "3d2510a4dd961caffa2ae781669c628d82db700a"
},
{
"ImportPath": "github.com/google/gofuzz",
@ -216,23 +212,23 @@
},
{
"ImportPath": "golang.org/x/net/context",
"Rev": "1dfe7915deaf3f80b962c163b918868d8a6d8974"
"Rev": "47990a1ba55743e6ef1affd3a14e5bac8553615d"
},
{
"ImportPath": "golang.org/x/oauth2",
"Rev": "ce5ea7da934b76b1066c527632359e2b8f65db97"
"ImportPath": "golang.org/x/net/http2",
"Rev": "47990a1ba55743e6ef1affd3a14e5bac8553615d"
},
{
"ImportPath": "google.golang.org/cloud/compute/metadata",
"Rev": "f20d6dcccb44ed49de45ae3703312cb46e627db1"
"ImportPath": "golang.org/x/net/internal/timeseries",
"Rev": "47990a1ba55743e6ef1affd3a14e5bac8553615d"
},
{
"ImportPath": "google.golang.org/cloud/internal",
"Rev": "f20d6dcccb44ed49de45ae3703312cb46e627db1"
"ImportPath": "golang.org/x/net/trace",
"Rev": "47990a1ba55743e6ef1affd3a14e5bac8553615d"
},
{
"ImportPath": "google.golang.org/grpc",
"Rev": "97f42dd262e97f4632986eddbc74c19fa022ea08"
"Rev": "3e7b7e58f491074e9577050058fb95d2351a60b0"
},
{
"ImportPath": "gopkg.in/yaml.v2",

View File

@ -1 +0,0 @@
*~

View File

@ -1,19 +0,0 @@
# This file is like Go's AUTHORS file: it lists Copyright holders.
# The list of humans who have contributd is in the CONTRIBUTORS file.
#
# To contribute to this project, because it will eventually be folded
# back in to Go itself, you need to submit a CLA:
#
# http://golang.org/doc/contribute.html#copyright
#
# Then you get added to CONTRIBUTORS and you or your company get added
# to the AUTHORS file.
Blake Mizerany <blake.mizerany@gmail.com> github=bmizerany
Daniel Morsing <daniel.morsing@gmail.com> github=DanielMorsing
Gabriel Aszalos <gabriel.aszalos@gmail.com> github=gbbr
Google, Inc.
Keith Rarick <kr@xph.us> github=kr
Matthew Keenan <tank.en.mate@gmail.com> <github@mattkeenan.net> github=mattkeenan
Matt Layher <mdlayher@gmail.com> github=mdlayher
Tatsuhiro Tsujikawa <tatsuhiro.t@gmail.com> github=tatsuhiro-t

View File

@ -1,19 +0,0 @@
# This file is like Go's CONTRIBUTORS file: it lists humans.
# The list of copyright holders (which may be companies) are in the AUTHORS file.
#
# To contribute to this project, because it will eventually be folded
# back in to Go itself, you need to submit a CLA:
#
# http://golang.org/doc/contribute.html#copyright
#
# Then you get added to CONTRIBUTORS and you or your company get added
# to the AUTHORS file.
Blake Mizerany <blake.mizerany@gmail.com> github=bmizerany
Brad Fitzpatrick <bradfitz@golang.org> github=bradfitz
Daniel Morsing <daniel.morsing@gmail.com> github=DanielMorsing
Gabriel Aszalos <gabriel.aszalos@gmail.com> github=gbbr
Keith Rarick <kr@xph.us> github=kr
Matthew Keenan <tank.en.mate@gmail.com> <github@mattkeenan.net> github=mattkeenan
Matt Layher <mdlayher@gmail.com> github=mdlayher
Tatsuhiro Tsujikawa <tatsuhiro.t@gmail.com> github=tatsuhiro-t

View File

@ -1,5 +0,0 @@
We only accept contributions from users who have gone through Go's
contribution process (signed a CLA).
Please acknowledge whether you have (and use the same email) if
sending a pull request.

View File

@ -1,7 +0,0 @@
Copyright 2014 Google & the Go AUTHORS
Go AUTHORS are:
See https://code.google.com/p/go/source/browse/AUTHORS
Licensed under the terms of Go itself:
https://code.google.com/p/go/source/browse/LICENSE

View File

@ -1,73 +0,0 @@
// Copyright 2014 The Go Authors.
// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
// Licensed under the same terms as Go itself:
// https://code.google.com/p/go/source/browse/LICENSE
package http2
import (
"io"
"reflect"
"testing"
)
var bufferReadTests = []struct {
buf buffer
read, wn int
werr error
wp []byte
wbuf buffer
}{
{
buffer{[]byte{'a', 0}, 0, 1, false, nil},
5, 1, nil, []byte{'a'},
buffer{[]byte{'a', 0}, 1, 1, false, nil},
},
{
buffer{[]byte{'a', 0}, 0, 1, true, io.EOF},
5, 1, io.EOF, []byte{'a'},
buffer{[]byte{'a', 0}, 1, 1, true, io.EOF},
},
{
buffer{[]byte{0, 'a'}, 1, 2, false, nil},
5, 1, nil, []byte{'a'},
buffer{[]byte{0, 'a'}, 2, 2, false, nil},
},
{
buffer{[]byte{0, 'a'}, 1, 2, true, io.EOF},
5, 1, io.EOF, []byte{'a'},
buffer{[]byte{0, 'a'}, 2, 2, true, io.EOF},
},
{
buffer{[]byte{}, 0, 0, false, nil},
5, 0, errReadEmpty, []byte{},
buffer{[]byte{}, 0, 0, false, nil},
},
{
buffer{[]byte{}, 0, 0, true, io.EOF},
5, 0, io.EOF, []byte{},
buffer{[]byte{}, 0, 0, true, io.EOF},
},
}
func TestBufferRead(t *testing.T) {
for i, tt := range bufferReadTests {
read := make([]byte, tt.read)
n, err := tt.buf.Read(read)
if n != tt.wn {
t.Errorf("#%d: wn = %d want %d", i, n, tt.wn)
continue
}
if err != tt.werr {
t.Errorf("#%d: werr = %v want %v", i, err, tt.werr)
continue
}
read = read[:n]
if !reflect.DeepEqual(read, tt.wp) {
t.Errorf("#%d: read = %+v want %+v", i, read, tt.wp)
}
if !reflect.DeepEqual(tt.buf, tt.wbuf) {
t.Errorf("#%d: buf = %+v want %+v", i, tt.buf, tt.wbuf)
}
}
}

View File

@ -1,27 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
// Licensed under the same terms as Go itself:
// https://code.google.com/p/go/source/browse/LICENSE
package http2
import "testing"
func TestErrCodeString(t *testing.T) {
tests := []struct {
err ErrCode
want string
}{
{ErrCodeProtocol, "PROTOCOL_ERROR"},
{0xd, "HTTP_1_1_REQUIRED"},
{0xf, "unknown error code 0xf"},
}
for i, tt := range tests {
got := tt.err.String()
if got != tt.want {
t.Errorf("%d. Error = %q; want %q", i, got, tt.want)
}
}
}

View File

@ -1,54 +0,0 @@
// Copyright 2014 The Go Authors.
// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
// Licensed under the same terms as Go itself:
// https://code.google.com/p/go/source/browse/LICENSE
package http2
import "testing"
func TestFlow(t *testing.T) {
var st flow
var conn flow
st.add(3)
conn.add(2)
if got, want := st.available(), int32(3); got != want {
t.Errorf("available = %d; want %d", got, want)
}
st.setConnFlow(&conn)
if got, want := st.available(), int32(2); got != want {
t.Errorf("after parent setup, available = %d; want %d", got, want)
}
st.take(2)
if got, want := conn.available(), int32(0); got != want {
t.Errorf("after taking 2, conn = %d; want %d", got, want)
}
if got, want := st.available(), int32(0); got != want {
t.Errorf("after taking 2, stream = %d; want %d", got, want)
}
}
func TestFlowAdd(t *testing.T) {
var f flow
if !f.add(1) {
t.Fatal("failed to add 1")
}
if !f.add(-1) {
t.Fatal("failed to add -1")
}
if got, want := f.available(), int32(0); got != want {
t.Fatalf("size = %d; want %d", got, want)
}
if !f.add(1<<31 - 1) {
t.Fatal("failed to add 2^31-1")
}
if got, want := f.available(), int32(1<<31-1); got != want {
t.Fatalf("size = %d; want %d", got, want)
}
if f.add(1) {
t.Fatal("adding 1 to max shouldn't be allowed")
}
}

View File

@ -1,578 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http2
import (
"bytes"
"reflect"
"strings"
"testing"
"unsafe"
)
func testFramer() (*Framer, *bytes.Buffer) {
buf := new(bytes.Buffer)
return NewFramer(buf, buf), buf
}
func TestFrameSizes(t *testing.T) {
// Catch people rearranging the FrameHeader fields.
if got, want := int(unsafe.Sizeof(FrameHeader{})), 12; got != want {
t.Errorf("FrameHeader size = %d; want %d", got, want)
}
}
func TestWriteRST(t *testing.T) {
fr, buf := testFramer()
var streamID uint32 = 1<<24 + 2<<16 + 3<<8 + 4
var errCode uint32 = 7<<24 + 6<<16 + 5<<8 + 4
fr.WriteRSTStream(streamID, ErrCode(errCode))
const wantEnc = "\x00\x00\x04\x03\x00\x01\x02\x03\x04\x07\x06\x05\x04"
if buf.String() != wantEnc {
t.Errorf("encoded as %q; want %q", buf.Bytes(), wantEnc)
}
f, err := fr.ReadFrame()
if err != nil {
t.Fatal(err)
}
want := &RSTStreamFrame{
FrameHeader: FrameHeader{
valid: true,
Type: 0x3,
Flags: 0x0,
Length: 0x4,
StreamID: 0x1020304,
},
ErrCode: 0x7060504,
}
if !reflect.DeepEqual(f, want) {
t.Errorf("parsed back %#v; want %#v", f, want)
}
}
func TestWriteData(t *testing.T) {
fr, buf := testFramer()
var streamID uint32 = 1<<24 + 2<<16 + 3<<8 + 4
data := []byte("ABC")
fr.WriteData(streamID, true, data)
const wantEnc = "\x00\x00\x03\x00\x01\x01\x02\x03\x04ABC"
if buf.String() != wantEnc {
t.Errorf("encoded as %q; want %q", buf.Bytes(), wantEnc)
}
f, err := fr.ReadFrame()
if err != nil {
t.Fatal(err)
}
df, ok := f.(*DataFrame)
if !ok {
t.Fatalf("got %T; want *DataFrame", f)
}
if !bytes.Equal(df.Data(), data) {
t.Errorf("got %q; want %q", df.Data(), data)
}
if f.Header().Flags&1 == 0 {
t.Errorf("didn't see END_STREAM flag")
}
}
func TestWriteHeaders(t *testing.T) {
tests := []struct {
name string
p HeadersFrameParam
wantEnc string
wantFrame *HeadersFrame
}{
{
"basic",
HeadersFrameParam{
StreamID: 42,
BlockFragment: []byte("abc"),
Priority: PriorityParam{},
},
"\x00\x00\x03\x01\x00\x00\x00\x00*abc",
&HeadersFrame{
FrameHeader: FrameHeader{
valid: true,
StreamID: 42,
Type: FrameHeaders,
Length: uint32(len("abc")),
},
Priority: PriorityParam{},
headerFragBuf: []byte("abc"),
},
},
{
"basic + end flags",
HeadersFrameParam{
StreamID: 42,
BlockFragment: []byte("abc"),
EndStream: true,
EndHeaders: true,
Priority: PriorityParam{},
},
"\x00\x00\x03\x01\x05\x00\x00\x00*abc",
&HeadersFrame{
FrameHeader: FrameHeader{
valid: true,
StreamID: 42,
Type: FrameHeaders,
Flags: FlagHeadersEndStream | FlagHeadersEndHeaders,
Length: uint32(len("abc")),
},
Priority: PriorityParam{},
headerFragBuf: []byte("abc"),
},
},
{
"with padding",
HeadersFrameParam{
StreamID: 42,
BlockFragment: []byte("abc"),
EndStream: true,
EndHeaders: true,
PadLength: 5,
Priority: PriorityParam{},
},
"\x00\x00\t\x01\r\x00\x00\x00*\x05abc\x00\x00\x00\x00\x00",
&HeadersFrame{
FrameHeader: FrameHeader{
valid: true,
StreamID: 42,
Type: FrameHeaders,
Flags: FlagHeadersEndStream | FlagHeadersEndHeaders | FlagHeadersPadded,
Length: uint32(1 + len("abc") + 5), // pad length + contents + padding
},
Priority: PriorityParam{},
headerFragBuf: []byte("abc"),
},
},
{
"with priority",
HeadersFrameParam{
StreamID: 42,
BlockFragment: []byte("abc"),
EndStream: true,
EndHeaders: true,
PadLength: 2,
Priority: PriorityParam{
StreamDep: 15,
Exclusive: true,
Weight: 127,
},
},
"\x00\x00\v\x01-\x00\x00\x00*\x02\x80\x00\x00\x0f\u007fabc\x00\x00",
&HeadersFrame{
FrameHeader: FrameHeader{
valid: true,
StreamID: 42,
Type: FrameHeaders,
Flags: FlagHeadersEndStream | FlagHeadersEndHeaders | FlagHeadersPadded | FlagHeadersPriority,
Length: uint32(1 + 5 + len("abc") + 2), // pad length + priority + contents + padding
},
Priority: PriorityParam{
StreamDep: 15,
Exclusive: true,
Weight: 127,
},
headerFragBuf: []byte("abc"),
},
},
}
for _, tt := range tests {
fr, buf := testFramer()
if err := fr.WriteHeaders(tt.p); err != nil {
t.Errorf("test %q: %v", tt.name, err)
continue
}
if buf.String() != tt.wantEnc {
t.Errorf("test %q: encoded %q; want %q", tt.name, buf.Bytes(), tt.wantEnc)
}
f, err := fr.ReadFrame()
if err != nil {
t.Errorf("test %q: failed to read the frame back: %v", tt.name, err)
continue
}
if !reflect.DeepEqual(f, tt.wantFrame) {
t.Errorf("test %q: mismatch.\n got: %#v\nwant: %#v\n", tt.name, f, tt.wantFrame)
}
}
}
func TestWriteContinuation(t *testing.T) {
const streamID = 42
tests := []struct {
name string
end bool
frag []byte
wantFrame *ContinuationFrame
}{
{
"not end",
false,
[]byte("abc"),
&ContinuationFrame{
FrameHeader: FrameHeader{
valid: true,
StreamID: streamID,
Type: FrameContinuation,
Length: uint32(len("abc")),
},
headerFragBuf: []byte("abc"),
},
},
{
"end",
true,
[]byte("def"),
&ContinuationFrame{
FrameHeader: FrameHeader{
valid: true,
StreamID: streamID,
Type: FrameContinuation,
Flags: FlagContinuationEndHeaders,
Length: uint32(len("def")),
},
headerFragBuf: []byte("def"),
},
},
}
for _, tt := range tests {
fr, _ := testFramer()
if err := fr.WriteContinuation(streamID, tt.end, tt.frag); err != nil {
t.Errorf("test %q: %v", tt.name, err)
continue
}
f, err := fr.ReadFrame()
if err != nil {
t.Errorf("test %q: failed to read the frame back: %v", tt.name, err)
continue
}
if !reflect.DeepEqual(f, tt.wantFrame) {
t.Errorf("test %q: mismatch.\n got: %#v\nwant: %#v\n", tt.name, f, tt.wantFrame)
}
}
}
func TestWritePriority(t *testing.T) {
const streamID = 42
tests := []struct {
name string
priority PriorityParam
wantFrame *PriorityFrame
}{
{
"not exclusive",
PriorityParam{
StreamDep: 2,
Exclusive: false,
Weight: 127,
},
&PriorityFrame{
FrameHeader{
valid: true,
StreamID: streamID,
Type: FramePriority,
Length: 5,
},
PriorityParam{
StreamDep: 2,
Exclusive: false,
Weight: 127,
},
},
},
{
"exclusive",
PriorityParam{
StreamDep: 3,
Exclusive: true,
Weight: 77,
},
&PriorityFrame{
FrameHeader{
valid: true,
StreamID: streamID,
Type: FramePriority,
Length: 5,
},
PriorityParam{
StreamDep: 3,
Exclusive: true,
Weight: 77,
},
},
},
}
for _, tt := range tests {
fr, _ := testFramer()
if err := fr.WritePriority(streamID, tt.priority); err != nil {
t.Errorf("test %q: %v", tt.name, err)
continue
}
f, err := fr.ReadFrame()
if err != nil {
t.Errorf("test %q: failed to read the frame back: %v", tt.name, err)
continue
}
if !reflect.DeepEqual(f, tt.wantFrame) {
t.Errorf("test %q: mismatch.\n got: %#v\nwant: %#v\n", tt.name, f, tt.wantFrame)
}
}
}
func TestWriteSettings(t *testing.T) {
fr, buf := testFramer()
settings := []Setting{{1, 2}, {3, 4}}
fr.WriteSettings(settings...)
const wantEnc = "\x00\x00\f\x04\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x02\x00\x03\x00\x00\x00\x04"
if buf.String() != wantEnc {
t.Errorf("encoded as %q; want %q", buf.Bytes(), wantEnc)
}
f, err := fr.ReadFrame()
if err != nil {
t.Fatal(err)
}
sf, ok := f.(*SettingsFrame)
if !ok {
t.Fatalf("Got a %T; want a SettingsFrame", f)
}
var got []Setting
sf.ForeachSetting(func(s Setting) error {
got = append(got, s)
valBack, ok := sf.Value(s.ID)
if !ok || valBack != s.Val {
t.Errorf("Value(%d) = %v, %v; want %v, true", s.ID, valBack, ok, s.Val)
}
return nil
})
if !reflect.DeepEqual(settings, got) {
t.Errorf("Read settings %+v != written settings %+v", got, settings)
}
}
func TestWriteSettingsAck(t *testing.T) {
fr, buf := testFramer()
fr.WriteSettingsAck()
const wantEnc = "\x00\x00\x00\x04\x01\x00\x00\x00\x00"
if buf.String() != wantEnc {
t.Errorf("encoded as %q; want %q", buf.Bytes(), wantEnc)
}
}
func TestWriteWindowUpdate(t *testing.T) {
fr, buf := testFramer()
const streamID = 1<<24 + 2<<16 + 3<<8 + 4
const incr = 7<<24 + 6<<16 + 5<<8 + 4
if err := fr.WriteWindowUpdate(streamID, incr); err != nil {
t.Fatal(err)
}
const wantEnc = "\x00\x00\x04\x08\x00\x01\x02\x03\x04\x07\x06\x05\x04"
if buf.String() != wantEnc {
t.Errorf("encoded as %q; want %q", buf.Bytes(), wantEnc)
}
f, err := fr.ReadFrame()
if err != nil {
t.Fatal(err)
}
want := &WindowUpdateFrame{
FrameHeader: FrameHeader{
valid: true,
Type: 0x8,
Flags: 0x0,
Length: 0x4,
StreamID: 0x1020304,
},
Increment: 0x7060504,
}
if !reflect.DeepEqual(f, want) {
t.Errorf("parsed back %#v; want %#v", f, want)
}
}
func TestWritePing(t *testing.T) { testWritePing(t, false) }
func TestWritePingAck(t *testing.T) { testWritePing(t, true) }
func testWritePing(t *testing.T, ack bool) {
fr, buf := testFramer()
if err := fr.WritePing(ack, [8]byte{1, 2, 3, 4, 5, 6, 7, 8}); err != nil {
t.Fatal(err)
}
var wantFlags Flags
if ack {
wantFlags = FlagPingAck
}
var wantEnc = "\x00\x00\x08\x06" + string(wantFlags) + "\x00\x00\x00\x00" + "\x01\x02\x03\x04\x05\x06\x07\x08"
if buf.String() != wantEnc {
t.Errorf("encoded as %q; want %q", buf.Bytes(), wantEnc)
}
f, err := fr.ReadFrame()
if err != nil {
t.Fatal(err)
}
want := &PingFrame{
FrameHeader: FrameHeader{
valid: true,
Type: 0x6,
Flags: wantFlags,
Length: 0x8,
StreamID: 0,
},
Data: [8]byte{1, 2, 3, 4, 5, 6, 7, 8},
}
if !reflect.DeepEqual(f, want) {
t.Errorf("parsed back %#v; want %#v", f, want)
}
}
func TestReadFrameHeader(t *testing.T) {
tests := []struct {
in string
want FrameHeader
}{
{in: "\x00\x00\x00" + "\x00" + "\x00" + "\x00\x00\x00\x00", want: FrameHeader{}},
{in: "\x01\x02\x03" + "\x04" + "\x05" + "\x06\x07\x08\x09", want: FrameHeader{
Length: 66051, Type: 4, Flags: 5, StreamID: 101124105,
}},
// Ignore high bit:
{in: "\xff\xff\xff" + "\xff" + "\xff" + "\xff\xff\xff\xff", want: FrameHeader{
Length: 16777215, Type: 255, Flags: 255, StreamID: 2147483647}},
{in: "\xff\xff\xff" + "\xff" + "\xff" + "\x7f\xff\xff\xff", want: FrameHeader{
Length: 16777215, Type: 255, Flags: 255, StreamID: 2147483647}},
}
for i, tt := range tests {
got, err := readFrameHeader(make([]byte, 9), strings.NewReader(tt.in))
if err != nil {
t.Errorf("%d. readFrameHeader(%q) = %v", i, tt.in, err)
continue
}
tt.want.valid = true
if got != tt.want {
t.Errorf("%d. readFrameHeader(%q) = %+v; want %+v", i, tt.in, got, tt.want)
}
}
}
func TestReadWriteFrameHeader(t *testing.T) {
tests := []struct {
len uint32
typ FrameType
flags Flags
streamID uint32
}{
{len: 0, typ: 255, flags: 1, streamID: 0},
{len: 0, typ: 255, flags: 1, streamID: 1},
{len: 0, typ: 255, flags: 1, streamID: 255},
{len: 0, typ: 255, flags: 1, streamID: 256},
{len: 0, typ: 255, flags: 1, streamID: 65535},
{len: 0, typ: 255, flags: 1, streamID: 65536},
{len: 0, typ: 1, flags: 255, streamID: 1},
{len: 255, typ: 1, flags: 255, streamID: 1},
{len: 256, typ: 1, flags: 255, streamID: 1},
{len: 65535, typ: 1, flags: 255, streamID: 1},
{len: 65536, typ: 1, flags: 255, streamID: 1},
{len: 16777215, typ: 1, flags: 255, streamID: 1},
}
for _, tt := range tests {
fr, buf := testFramer()
fr.startWrite(tt.typ, tt.flags, tt.streamID)
fr.writeBytes(make([]byte, tt.len))
fr.endWrite()
fh, err := ReadFrameHeader(buf)
if err != nil {
t.Errorf("ReadFrameHeader(%+v) = %v", tt, err)
continue
}
if fh.Type != tt.typ || fh.Flags != tt.flags || fh.Length != tt.len || fh.StreamID != tt.streamID {
t.Errorf("ReadFrameHeader(%+v) = %+v; mismatch", tt, fh)
}
}
}
func TestWriteTooLargeFrame(t *testing.T) {
fr, _ := testFramer()
fr.startWrite(0, 1, 1)
fr.writeBytes(make([]byte, 1<<24))
err := fr.endWrite()
if err != ErrFrameTooLarge {
t.Errorf("endWrite = %v; want errFrameTooLarge", err)
}
}
func TestWriteGoAway(t *testing.T) {
const debug = "foo"
fr, buf := testFramer()
if err := fr.WriteGoAway(0x01020304, 0x05060708, []byte(debug)); err != nil {
t.Fatal(err)
}
const wantEnc = "\x00\x00\v\a\x00\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x07\x08" + debug
if buf.String() != wantEnc {
t.Errorf("encoded as %q; want %q", buf.Bytes(), wantEnc)
}
f, err := fr.ReadFrame()
if err != nil {
t.Fatal(err)
}
want := &GoAwayFrame{
FrameHeader: FrameHeader{
valid: true,
Type: 0x7,
Flags: 0,
Length: uint32(4 + 4 + len(debug)),
StreamID: 0,
},
LastStreamID: 0x01020304,
ErrCode: 0x05060708,
debugData: []byte(debug),
}
if !reflect.DeepEqual(f, want) {
t.Fatalf("parsed back:\n%#v\nwant:\n%#v", f, want)
}
if got := string(f.(*GoAwayFrame).DebugData()); got != debug {
t.Errorf("debug data = %q; want %q", got, debug)
}
}
func TestWritePushPromise(t *testing.T) {
pp := PushPromiseParam{
StreamID: 42,
PromiseID: 42,
BlockFragment: []byte("abc"),
}
fr, buf := testFramer()
if err := fr.WritePushPromise(pp); err != nil {
t.Fatal(err)
}
const wantEnc = "\x00\x00\x07\x05\x00\x00\x00\x00*\x00\x00\x00*abc"
if buf.String() != wantEnc {
t.Errorf("encoded as %q; want %q", buf.Bytes(), wantEnc)
}
f, err := fr.ReadFrame()
if err != nil {
t.Fatal(err)
}
_, ok := f.(*PushPromiseFrame)
if !ok {
t.Fatalf("got %T; want *PushPromiseFrame", f)
}
want := &PushPromiseFrame{
FrameHeader: FrameHeader{
valid: true,
Type: 0x5,
Flags: 0x0,
Length: 0x7,
StreamID: 42,
},
PromiseID: 42,
headerFragBuf: []byte("abc"),
}
if !reflect.DeepEqual(f, want) {
t.Fatalf("parsed back:\n%#v\nwant:\n%#v", f, want)
}
}

View File

@ -1,33 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
// Licensed under the same terms as Go itself:
// https://code.google.com/p/go/source/browse/LICENSE
package http2
import (
"fmt"
"strings"
"testing"
)
func TestGoroutineLock(t *testing.T) {
DebugGoroutines = true
g := newGoroutineLock()
g.check()
sawPanic := make(chan interface{})
go func() {
defer func() { sawPanic <- recover() }()
g.check() // should panic
}()
e := <-sawPanic
if e == nil {
t.Fatal("did not see panic from check in other goroutine")
}
if !strings.Contains(fmt.Sprint(e), "wrong goroutine") {
t.Errorf("expected on see panic about running on the wrong goroutine; got %v", e)
}
}

View File

@ -1,331 +0,0 @@
// Copyright 2014 The Go Authors.
// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
// Licensed under the same terms as Go itself:
// https://code.google.com/p/go/source/browse/LICENSE
package hpack
import (
"bytes"
"encoding/hex"
"reflect"
"strings"
"testing"
)
func TestEncoderTableSizeUpdate(t *testing.T) {
tests := []struct {
size1, size2 uint32
wantHex string
}{
// Should emit 2 table size updates (2048 and 4096)
{2048, 4096, "3fe10f 3fe11f 82"},
// Should emit 1 table size update (2048)
{16384, 2048, "3fe10f 82"},
}
for _, tt := range tests {
var buf bytes.Buffer
e := NewEncoder(&buf)
e.SetMaxDynamicTableSize(tt.size1)
e.SetMaxDynamicTableSize(tt.size2)
if err := e.WriteField(pair(":method", "GET")); err != nil {
t.Fatal(err)
}
want := removeSpace(tt.wantHex)
if got := hex.EncodeToString(buf.Bytes()); got != want {
t.Errorf("e.SetDynamicTableSize %v, %v = %q; want %q", tt.size1, tt.size2, got, want)
}
}
}
func TestEncoderWriteField(t *testing.T) {
var buf bytes.Buffer
e := NewEncoder(&buf)
var got []HeaderField
d := NewDecoder(4<<10, func(f HeaderField) {
got = append(got, f)
})
tests := []struct {
hdrs []HeaderField
}{
{[]HeaderField{
pair(":method", "GET"),
pair(":scheme", "http"),
pair(":path", "/"),
pair(":authority", "www.example.com"),
}},
{[]HeaderField{
pair(":method", "GET"),
pair(":scheme", "http"),
pair(":path", "/"),
pair(":authority", "www.example.com"),
pair("cache-control", "no-cache"),
}},
{[]HeaderField{
pair(":method", "GET"),
pair(":scheme", "https"),
pair(":path", "/index.html"),
pair(":authority", "www.example.com"),
pair("custom-key", "custom-value"),
}},
}
for i, tt := range tests {
buf.Reset()
got = got[:0]
for _, hf := range tt.hdrs {
if err := e.WriteField(hf); err != nil {
t.Fatal(err)
}
}
_, err := d.Write(buf.Bytes())
if err != nil {
t.Errorf("%d. Decoder Write = %v", i, err)
}
if !reflect.DeepEqual(got, tt.hdrs) {
t.Errorf("%d. Decoded %+v; want %+v", i, got, tt.hdrs)
}
}
}
func TestEncoderSearchTable(t *testing.T) {
e := NewEncoder(nil)
e.dynTab.add(pair("foo", "bar"))
e.dynTab.add(pair("blake", "miz"))
e.dynTab.add(pair(":method", "GET"))
tests := []struct {
hf HeaderField
wantI uint64
wantMatch bool
}{
// Name and Value match
{pair("foo", "bar"), uint64(len(staticTable) + 3), true},
{pair("blake", "miz"), uint64(len(staticTable) + 2), true},
{pair(":method", "GET"), 2, true},
// Only name match because Sensitive == true
{HeaderField{":method", "GET", true}, 2, false},
// Only Name matches
{pair("foo", "..."), uint64(len(staticTable) + 3), false},
{pair("blake", "..."), uint64(len(staticTable) + 2), false},
{pair(":method", "..."), 2, false},
// None match
{pair("foo-", "bar"), 0, false},
}
for _, tt := range tests {
if gotI, gotMatch := e.searchTable(tt.hf); gotI != tt.wantI || gotMatch != tt.wantMatch {
t.Errorf("d.search(%+v) = %v, %v; want %v, %v", tt.hf, gotI, gotMatch, tt.wantI, tt.wantMatch)
}
}
}
func TestAppendVarInt(t *testing.T) {
tests := []struct {
n byte
i uint64
want []byte
}{
// Fits in a byte:
{1, 0, []byte{0}},
{2, 2, []byte{2}},
{3, 6, []byte{6}},
{4, 14, []byte{14}},
{5, 30, []byte{30}},
{6, 62, []byte{62}},
{7, 126, []byte{126}},
{8, 254, []byte{254}},
// Multiple bytes:
{5, 1337, []byte{31, 154, 10}},
}
for _, tt := range tests {
got := appendVarInt(nil, tt.n, tt.i)
if !bytes.Equal(got, tt.want) {
t.Errorf("appendVarInt(nil, %v, %v) = %v; want %v", tt.n, tt.i, got, tt.want)
}
}
}
func TestAppendHpackString(t *testing.T) {
tests := []struct {
s, wantHex string
}{
// Huffman encoded
{"www.example.com", "8c f1e3 c2e5 f23a 6ba0 ab90 f4ff"},
// Not Huffman encoded
{"a", "01 61"},
// zero length
{"", "00"},
}
for _, tt := range tests {
want := removeSpace(tt.wantHex)
buf := appendHpackString(nil, tt.s)
if got := hex.EncodeToString(buf); want != got {
t.Errorf("appendHpackString(nil, %q) = %q; want %q", tt.s, got, want)
}
}
}
func TestAppendIndexed(t *testing.T) {
tests := []struct {
i uint64
wantHex string
}{
// 1 byte
{1, "81"},
{126, "fe"},
// 2 bytes
{127, "ff00"},
{128, "ff01"},
}
for _, tt := range tests {
want := removeSpace(tt.wantHex)
buf := appendIndexed(nil, tt.i)
if got := hex.EncodeToString(buf); want != got {
t.Errorf("appendIndex(nil, %v) = %q; want %q", tt.i, got, want)
}
}
}
func TestAppendNewName(t *testing.T) {
tests := []struct {
f HeaderField
indexing bool
wantHex string
}{
// Incremental indexing
{HeaderField{"custom-key", "custom-value", false}, true, "40 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf"},
// Without indexing
{HeaderField{"custom-key", "custom-value", false}, false, "00 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf"},
// Never indexed
{HeaderField{"custom-key", "custom-value", true}, true, "10 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf"},
{HeaderField{"custom-key", "custom-value", true}, false, "10 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf"},
}
for _, tt := range tests {
want := removeSpace(tt.wantHex)
buf := appendNewName(nil, tt.f, tt.indexing)
if got := hex.EncodeToString(buf); want != got {
t.Errorf("appendNewName(nil, %+v, %v) = %q; want %q", tt.f, tt.indexing, got, want)
}
}
}
func TestAppendIndexedName(t *testing.T) {
tests := []struct {
f HeaderField
i uint64
indexing bool
wantHex string
}{
// Incremental indexing
{HeaderField{":status", "302", false}, 8, true, "48 82 6402"},
// Without indexing
{HeaderField{":status", "302", false}, 8, false, "08 82 6402"},
// Never indexed
{HeaderField{":status", "302", true}, 8, true, "18 82 6402"},
{HeaderField{":status", "302", true}, 8, false, "18 82 6402"},
}
for _, tt := range tests {
want := removeSpace(tt.wantHex)
buf := appendIndexedName(nil, tt.f, tt.i, tt.indexing)
if got := hex.EncodeToString(buf); want != got {
t.Errorf("appendIndexedName(nil, %+v, %v) = %q; want %q", tt.f, tt.indexing, got, want)
}
}
}
func TestAppendTableSize(t *testing.T) {
tests := []struct {
i uint32
wantHex string
}{
// Fits into 1 byte
{30, "3e"},
// Extra byte
{31, "3f00"},
{32, "3f01"},
}
for _, tt := range tests {
want := removeSpace(tt.wantHex)
buf := appendTableSize(nil, tt.i)
if got := hex.EncodeToString(buf); want != got {
t.Errorf("appendTableSize(nil, %v) = %q; want %q", tt.i, got, want)
}
}
}
func TestEncoderSetMaxDynamicTableSize(t *testing.T) {
var buf bytes.Buffer
e := NewEncoder(&buf)
tests := []struct {
v uint32
wantUpdate bool
wantMinSize uint32
wantMaxSize uint32
}{
// Set new table size to 2048
{2048, true, 2048, 2048},
// Set new table size to 16384, but still limited to
// 4096
{16384, true, 2048, 4096},
}
for _, tt := range tests {
e.SetMaxDynamicTableSize(tt.v)
if got := e.tableSizeUpdate; tt.wantUpdate != got {
t.Errorf("e.tableSizeUpdate = %v; want %v", got, tt.wantUpdate)
}
if got := e.minSize; tt.wantMinSize != got {
t.Errorf("e.minSize = %v; want %v", got, tt.wantMinSize)
}
if got := e.dynTab.maxSize; tt.wantMaxSize != got {
t.Errorf("e.maxSize = %v; want %v", got, tt.wantMaxSize)
}
}
}
func TestEncoderSetMaxDynamicTableSizeLimit(t *testing.T) {
e := NewEncoder(nil)
// 4095 < initialHeaderTableSize means maxSize is truncated to
// 4095.
e.SetMaxDynamicTableSizeLimit(4095)
if got, want := e.dynTab.maxSize, uint32(4095); got != want {
t.Errorf("e.dynTab.maxSize = %v; want %v", got, want)
}
if got, want := e.maxSizeLimit, uint32(4095); got != want {
t.Errorf("e.maxSizeLimit = %v; want %v", got, want)
}
if got, want := e.tableSizeUpdate, true; got != want {
t.Errorf("e.tableSizeUpdate = %v; want %v", got, want)
}
// maxSize will be truncated to maxSizeLimit
e.SetMaxDynamicTableSize(16384)
if got, want := e.dynTab.maxSize, uint32(4095); got != want {
t.Errorf("e.dynTab.maxSize = %v; want %v", got, want)
}
// 8192 > current maxSizeLimit, so maxSize does not change.
e.SetMaxDynamicTableSizeLimit(8192)
if got, want := e.dynTab.maxSize, uint32(4095); got != want {
t.Errorf("e.dynTab.maxSize = %v; want %v", got, want)
}
if got, want := e.maxSizeLimit, uint32(8192); got != want {
t.Errorf("e.maxSizeLimit = %v; want %v", got, want)
}
}
func removeSpace(s string) string {
return strings.Replace(s, " ", "", -1)
}

View File

@ -1,648 +0,0 @@
// Copyright 2014 The Go Authors.
// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
// Licensed under the same terms as Go itself:
// https://code.google.com/p/go/source/browse/LICENSE
package hpack
import (
"bufio"
"bytes"
"encoding/hex"
"fmt"
"reflect"
"regexp"
"strconv"
"strings"
"testing"
)
func TestStaticTable(t *testing.T) {
fromSpec := `
+-------+-----------------------------+---------------+
| 1 | :authority | |
| 2 | :method | GET |
| 3 | :method | POST |
| 4 | :path | / |
| 5 | :path | /index.html |
| 6 | :scheme | http |
| 7 | :scheme | https |
| 8 | :status | 200 |
| 9 | :status | 204 |
| 10 | :status | 206 |
| 11 | :status | 304 |
| 12 | :status | 400 |
| 13 | :status | 404 |
| 14 | :status | 500 |
| 15 | accept-charset | |
| 16 | accept-encoding | gzip, deflate |
| 17 | accept-language | |
| 18 | accept-ranges | |
| 19 | accept | |
| 20 | access-control-allow-origin | |
| 21 | age | |
| 22 | allow | |
| 23 | authorization | |
| 24 | cache-control | |
| 25 | content-disposition | |
| 26 | content-encoding | |
| 27 | content-language | |
| 28 | content-length | |
| 29 | content-location | |
| 30 | content-range | |
| 31 | content-type | |
| 32 | cookie | |
| 33 | date | |
| 34 | etag | |
| 35 | expect | |
| 36 | expires | |
| 37 | from | |
| 38 | host | |
| 39 | if-match | |
| 40 | if-modified-since | |
| 41 | if-none-match | |
| 42 | if-range | |
| 43 | if-unmodified-since | |
| 44 | last-modified | |
| 45 | link | |
| 46 | location | |
| 47 | max-forwards | |
| 48 | proxy-authenticate | |
| 49 | proxy-authorization | |
| 50 | range | |
| 51 | referer | |
| 52 | refresh | |
| 53 | retry-after | |
| 54 | server | |
| 55 | set-cookie | |
| 56 | strict-transport-security | |
| 57 | transfer-encoding | |
| 58 | user-agent | |
| 59 | vary | |
| 60 | via | |
| 61 | www-authenticate | |
+-------+-----------------------------+---------------+
`
bs := bufio.NewScanner(strings.NewReader(fromSpec))
re := regexp.MustCompile(`\| (\d+)\s+\| (\S+)\s*\| (\S(.*\S)?)?\s+\|`)
for bs.Scan() {
l := bs.Text()
if !strings.Contains(l, "|") {
continue
}
m := re.FindStringSubmatch(l)
if m == nil {
continue
}
i, err := strconv.Atoi(m[1])
if err != nil {
t.Errorf("Bogus integer on line %q", l)
continue
}
if i < 1 || i > len(staticTable) {
t.Errorf("Bogus index %d on line %q", i, l)
continue
}
if got, want := staticTable[i-1].Name, m[2]; got != want {
t.Errorf("header index %d name = %q; want %q", i, got, want)
}
if got, want := staticTable[i-1].Value, m[3]; got != want {
t.Errorf("header index %d value = %q; want %q", i, got, want)
}
}
if err := bs.Err(); err != nil {
t.Error(err)
}
}
func (d *Decoder) mustAt(idx int) HeaderField {
if hf, ok := d.at(uint64(idx)); !ok {
panic(fmt.Sprintf("bogus index %d", idx))
} else {
return hf
}
}
func TestDynamicTableAt(t *testing.T) {
d := NewDecoder(4096, nil)
at := d.mustAt
if got, want := at(2), (pair(":method", "GET")); got != want {
t.Errorf("at(2) = %v; want %v", got, want)
}
d.dynTab.add(pair("foo", "bar"))
d.dynTab.add(pair("blake", "miz"))
if got, want := at(len(staticTable)+1), (pair("blake", "miz")); got != want {
t.Errorf("at(dyn 1) = %v; want %v", got, want)
}
if got, want := at(len(staticTable)+2), (pair("foo", "bar")); got != want {
t.Errorf("at(dyn 2) = %v; want %v", got, want)
}
if got, want := at(3), (pair(":method", "POST")); got != want {
t.Errorf("at(3) = %v; want %v", got, want)
}
}
func TestDynamicTableSearch(t *testing.T) {
dt := dynamicTable{}
dt.setMaxSize(4096)
dt.add(pair("foo", "bar"))
dt.add(pair("blake", "miz"))
dt.add(pair(":method", "GET"))
tests := []struct {
hf HeaderField
wantI uint64
wantMatch bool
}{
// Name and Value match
{pair("foo", "bar"), 3, true},
{pair(":method", "GET"), 1, true},
// Only name match because of Sensitive == true
{HeaderField{"blake", "miz", true}, 2, false},
// Only Name matches
{pair("foo", "..."), 3, false},
{pair("blake", "..."), 2, false},
{pair(":method", "..."), 1, false},
// None match
{pair("foo-", "bar"), 0, false},
}
for _, tt := range tests {
if gotI, gotMatch := dt.search(tt.hf); gotI != tt.wantI || gotMatch != tt.wantMatch {
t.Errorf("d.search(%+v) = %v, %v; want %v, %v", tt.hf, gotI, gotMatch, tt.wantI, tt.wantMatch)
}
}
}
func TestDynamicTableSizeEvict(t *testing.T) {
d := NewDecoder(4096, nil)
if want := uint32(0); d.dynTab.size != want {
t.Fatalf("size = %d; want %d", d.dynTab.size, want)
}
add := d.dynTab.add
add(pair("blake", "eats pizza"))
if want := uint32(15 + 32); d.dynTab.size != want {
t.Fatalf("after pizza, size = %d; want %d", d.dynTab.size, want)
}
add(pair("foo", "bar"))
if want := uint32(15 + 32 + 6 + 32); d.dynTab.size != want {
t.Fatalf("after foo bar, size = %d; want %d", d.dynTab.size, want)
}
d.dynTab.setMaxSize(15 + 32 + 1 /* slop */)
if want := uint32(6 + 32); d.dynTab.size != want {
t.Fatalf("after setMaxSize, size = %d; want %d", d.dynTab.size, want)
}
if got, want := d.mustAt(len(staticTable)+1), (pair("foo", "bar")); got != want {
t.Errorf("at(dyn 1) = %v; want %v", got, want)
}
add(pair("long", strings.Repeat("x", 500)))
if want := uint32(0); d.dynTab.size != want {
t.Fatalf("after big one, size = %d; want %d", d.dynTab.size, want)
}
}
func TestDecoderDecode(t *testing.T) {
tests := []struct {
name string
in []byte
want []HeaderField
wantDynTab []HeaderField // newest entry first
}{
// C.2.1 Literal Header Field with Indexing
// http://http2.github.io/http2-spec/compression.html#rfc.section.C.2.1
{"C.2.1", dehex("400a 6375 7374 6f6d 2d6b 6579 0d63 7573 746f 6d2d 6865 6164 6572"),
[]HeaderField{pair("custom-key", "custom-header")},
[]HeaderField{pair("custom-key", "custom-header")},
},
// C.2.2 Literal Header Field without Indexing
// http://http2.github.io/http2-spec/compression.html#rfc.section.C.2.2
{"C.2.2", dehex("040c 2f73 616d 706c 652f 7061 7468"),
[]HeaderField{pair(":path", "/sample/path")},
[]HeaderField{}},
// C.2.3 Literal Header Field never Indexed
// http://http2.github.io/http2-spec/compression.html#rfc.section.C.2.3
{"C.2.3", dehex("1008 7061 7373 776f 7264 0673 6563 7265 74"),
[]HeaderField{{"password", "secret", true}},
[]HeaderField{}},
// C.2.4 Indexed Header Field
// http://http2.github.io/http2-spec/compression.html#rfc.section.C.2.4
{"C.2.4", []byte("\x82"),
[]HeaderField{pair(":method", "GET")},
[]HeaderField{}},
}
for _, tt := range tests {
d := NewDecoder(4096, nil)
hf, err := d.DecodeFull(tt.in)
if err != nil {
t.Errorf("%s: %v", tt.name, err)
continue
}
if !reflect.DeepEqual(hf, tt.want) {
t.Errorf("%s: Got %v; want %v", tt.name, hf, tt.want)
}
gotDynTab := d.dynTab.reverseCopy()
if !reflect.DeepEqual(gotDynTab, tt.wantDynTab) {
t.Errorf("%s: dynamic table after = %v; want %v", tt.name, gotDynTab, tt.wantDynTab)
}
}
}
func (dt *dynamicTable) reverseCopy() (hf []HeaderField) {
hf = make([]HeaderField, len(dt.ents))
for i := range hf {
hf[i] = dt.ents[len(dt.ents)-1-i]
}
return
}
type encAndWant struct {
enc []byte
want []HeaderField
wantDynTab []HeaderField
wantDynSize uint32
}
// C.3 Request Examples without Huffman Coding
// http://http2.github.io/http2-spec/compression.html#rfc.section.C.3
func TestDecodeC3_NoHuffman(t *testing.T) {
testDecodeSeries(t, 4096, []encAndWant{
{dehex("8286 8441 0f77 7777 2e65 7861 6d70 6c65 2e63 6f6d"),
[]HeaderField{
pair(":method", "GET"),
pair(":scheme", "http"),
pair(":path", "/"),
pair(":authority", "www.example.com"),
},
[]HeaderField{
pair(":authority", "www.example.com"),
},
57,
},
{dehex("8286 84be 5808 6e6f 2d63 6163 6865"),
[]HeaderField{
pair(":method", "GET"),
pair(":scheme", "http"),
pair(":path", "/"),
pair(":authority", "www.example.com"),
pair("cache-control", "no-cache"),
},
[]HeaderField{
pair("cache-control", "no-cache"),
pair(":authority", "www.example.com"),
},
110,
},
{dehex("8287 85bf 400a 6375 7374 6f6d 2d6b 6579 0c63 7573 746f 6d2d 7661 6c75 65"),
[]HeaderField{
pair(":method", "GET"),
pair(":scheme", "https"),
pair(":path", "/index.html"),
pair(":authority", "www.example.com"),
pair("custom-key", "custom-value"),
},
[]HeaderField{
pair("custom-key", "custom-value"),
pair("cache-control", "no-cache"),
pair(":authority", "www.example.com"),
},
164,
},
})
}
// C.4 Request Examples with Huffman Coding
// http://http2.github.io/http2-spec/compression.html#rfc.section.C.4
func TestDecodeC4_Huffman(t *testing.T) {
testDecodeSeries(t, 4096, []encAndWant{
{dehex("8286 8441 8cf1 e3c2 e5f2 3a6b a0ab 90f4 ff"),
[]HeaderField{
pair(":method", "GET"),
pair(":scheme", "http"),
pair(":path", "/"),
pair(":authority", "www.example.com"),
},
[]HeaderField{
pair(":authority", "www.example.com"),
},
57,
},
{dehex("8286 84be 5886 a8eb 1064 9cbf"),
[]HeaderField{
pair(":method", "GET"),
pair(":scheme", "http"),
pair(":path", "/"),
pair(":authority", "www.example.com"),
pair("cache-control", "no-cache"),
},
[]HeaderField{
pair("cache-control", "no-cache"),
pair(":authority", "www.example.com"),
},
110,
},
{dehex("8287 85bf 4088 25a8 49e9 5ba9 7d7f 8925 a849 e95b b8e8 b4bf"),
[]HeaderField{
pair(":method", "GET"),
pair(":scheme", "https"),
pair(":path", "/index.html"),
pair(":authority", "www.example.com"),
pair("custom-key", "custom-value"),
},
[]HeaderField{
pair("custom-key", "custom-value"),
pair("cache-control", "no-cache"),
pair(":authority", "www.example.com"),
},
164,
},
})
}
// http://http2.github.io/http2-spec/compression.html#rfc.section.C.5
// "This section shows several consecutive header lists, corresponding
// to HTTP responses, on the same connection. The HTTP/2 setting
// parameter SETTINGS_HEADER_TABLE_SIZE is set to the value of 256
// octets, causing some evictions to occur."
func TestDecodeC5_ResponsesNoHuff(t *testing.T) {
testDecodeSeries(t, 256, []encAndWant{
{dehex(`
4803 3330 3258 0770 7269 7661 7465 611d
4d6f 6e2c 2032 3120 4f63 7420 3230 3133
2032 303a 3133 3a32 3120 474d 546e 1768
7474 7073 3a2f 2f77 7777 2e65 7861 6d70
6c65 2e63 6f6d
`),
[]HeaderField{
pair(":status", "302"),
pair("cache-control", "private"),
pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
pair("location", "https://www.example.com"),
},
[]HeaderField{
pair("location", "https://www.example.com"),
pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
pair("cache-control", "private"),
pair(":status", "302"),
},
222,
},
{dehex("4803 3330 37c1 c0bf"),
[]HeaderField{
pair(":status", "307"),
pair("cache-control", "private"),
pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
pair("location", "https://www.example.com"),
},
[]HeaderField{
pair(":status", "307"),
pair("location", "https://www.example.com"),
pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
pair("cache-control", "private"),
},
222,
},
{dehex(`
88c1 611d 4d6f 6e2c 2032 3120 4f63 7420
3230 3133 2032 303a 3133 3a32 3220 474d
54c0 5a04 677a 6970 7738 666f 6f3d 4153
444a 4b48 514b 425a 584f 5157 454f 5049
5541 5851 5745 4f49 553b 206d 6178 2d61
6765 3d33 3630 303b 2076 6572 7369 6f6e
3d31
`),
[]HeaderField{
pair(":status", "200"),
pair("cache-control", "private"),
pair("date", "Mon, 21 Oct 2013 20:13:22 GMT"),
pair("location", "https://www.example.com"),
pair("content-encoding", "gzip"),
pair("set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"),
},
[]HeaderField{
pair("set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"),
pair("content-encoding", "gzip"),
pair("date", "Mon, 21 Oct 2013 20:13:22 GMT"),
},
215,
},
})
}
// http://http2.github.io/http2-spec/compression.html#rfc.section.C.6
// "This section shows the same examples as the previous section, but
// using Huffman encoding for the literal values. The HTTP/2 setting
// parameter SETTINGS_HEADER_TABLE_SIZE is set to the value of 256
// octets, causing some evictions to occur. The eviction mechanism
// uses the length of the decoded literal values, so the same
// evictions occurs as in the previous section."
func TestDecodeC6_ResponsesHuffman(t *testing.T) {
testDecodeSeries(t, 256, []encAndWant{
{dehex(`
4882 6402 5885 aec3 771a 4b61 96d0 7abe
9410 54d4 44a8 2005 9504 0b81 66e0 82a6
2d1b ff6e 919d 29ad 1718 63c7 8f0b 97c8
e9ae 82ae 43d3
`),
[]HeaderField{
pair(":status", "302"),
pair("cache-control", "private"),
pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
pair("location", "https://www.example.com"),
},
[]HeaderField{
pair("location", "https://www.example.com"),
pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
pair("cache-control", "private"),
pair(":status", "302"),
},
222,
},
{dehex("4883 640e ffc1 c0bf"),
[]HeaderField{
pair(":status", "307"),
pair("cache-control", "private"),
pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
pair("location", "https://www.example.com"),
},
[]HeaderField{
pair(":status", "307"),
pair("location", "https://www.example.com"),
pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
pair("cache-control", "private"),
},
222,
},
{dehex(`
88c1 6196 d07a be94 1054 d444 a820 0595
040b 8166 e084 a62d 1bff c05a 839b d9ab
77ad 94e7 821d d7f2 e6c7 b335 dfdf cd5b
3960 d5af 2708 7f36 72c1 ab27 0fb5 291f
9587 3160 65c0 03ed 4ee5 b106 3d50 07
`),
[]HeaderField{
pair(":status", "200"),
pair("cache-control", "private"),
pair("date", "Mon, 21 Oct 2013 20:13:22 GMT"),
pair("location", "https://www.example.com"),
pair("content-encoding", "gzip"),
pair("set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"),
},
[]HeaderField{
pair("set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"),
pair("content-encoding", "gzip"),
pair("date", "Mon, 21 Oct 2013 20:13:22 GMT"),
},
215,
},
})
}
func testDecodeSeries(t *testing.T, size uint32, steps []encAndWant) {
d := NewDecoder(size, nil)
for i, step := range steps {
hf, err := d.DecodeFull(step.enc)
if err != nil {
t.Fatalf("Error at step index %d: %v", i, err)
}
if !reflect.DeepEqual(hf, step.want) {
t.Fatalf("At step index %d: Got headers %v; want %v", i, hf, step.want)
}
gotDynTab := d.dynTab.reverseCopy()
if !reflect.DeepEqual(gotDynTab, step.wantDynTab) {
t.Errorf("After step index %d, dynamic table = %v; want %v", i, gotDynTab, step.wantDynTab)
}
if d.dynTab.size != step.wantDynSize {
t.Errorf("After step index %d, dynamic table size = %v; want %v", i, d.dynTab.size, step.wantDynSize)
}
}
}
func TestHuffmanDecode(t *testing.T) {
tests := []struct {
inHex, want string
}{
{"f1e3 c2e5 f23a 6ba0 ab90 f4ff", "www.example.com"},
{"a8eb 1064 9cbf", "no-cache"},
{"25a8 49e9 5ba9 7d7f", "custom-key"},
{"25a8 49e9 5bb8 e8b4 bf", "custom-value"},
{"6402", "302"},
{"aec3 771a 4b", "private"},
{"d07a be94 1054 d444 a820 0595 040b 8166 e082 a62d 1bff", "Mon, 21 Oct 2013 20:13:21 GMT"},
{"9d29 ad17 1863 c78f 0b97 c8e9 ae82 ae43 d3", "https://www.example.com"},
{"9bd9 ab", "gzip"},
{"94e7 821d d7f2 e6c7 b335 dfdf cd5b 3960 d5af 2708 7f36 72c1 ab27 0fb5 291f 9587 3160 65c0 03ed 4ee5 b106 3d50 07",
"foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"},
}
for i, tt := range tests {
var buf bytes.Buffer
in, err := hex.DecodeString(strings.Replace(tt.inHex, " ", "", -1))
if err != nil {
t.Errorf("%d. hex input error: %v", i, err)
continue
}
if _, err := HuffmanDecode(&buf, in); err != nil {
t.Errorf("%d. decode error: %v", i, err)
continue
}
if got := buf.String(); tt.want != got {
t.Errorf("%d. decode = %q; want %q", i, got, tt.want)
}
}
}
func TestAppendHuffmanString(t *testing.T) {
tests := []struct {
in, want string
}{
{"www.example.com", "f1e3 c2e5 f23a 6ba0 ab90 f4ff"},
{"no-cache", "a8eb 1064 9cbf"},
{"custom-key", "25a8 49e9 5ba9 7d7f"},
{"custom-value", "25a8 49e9 5bb8 e8b4 bf"},
{"302", "6402"},
{"private", "aec3 771a 4b"},
{"Mon, 21 Oct 2013 20:13:21 GMT", "d07a be94 1054 d444 a820 0595 040b 8166 e082 a62d 1bff"},
{"https://www.example.com", "9d29 ad17 1863 c78f 0b97 c8e9 ae82 ae43 d3"},
{"gzip", "9bd9 ab"},
{"foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1",
"94e7 821d d7f2 e6c7 b335 dfdf cd5b 3960 d5af 2708 7f36 72c1 ab27 0fb5 291f 9587 3160 65c0 03ed 4ee5 b106 3d50 07"},
}
for i, tt := range tests {
buf := []byte{}
want := strings.Replace(tt.want, " ", "", -1)
buf = AppendHuffmanString(buf, tt.in)
if got := hex.EncodeToString(buf); want != got {
t.Errorf("%d. encode = %q; want %q", i, got, want)
}
}
}
func TestReadVarInt(t *testing.T) {
type res struct {
i uint64
consumed int
err error
}
tests := []struct {
n byte
p []byte
want res
}{
// Fits in a byte:
{1, []byte{0}, res{0, 1, nil}},
{2, []byte{2}, res{2, 1, nil}},
{3, []byte{6}, res{6, 1, nil}},
{4, []byte{14}, res{14, 1, nil}},
{5, []byte{30}, res{30, 1, nil}},
{6, []byte{62}, res{62, 1, nil}},
{7, []byte{126}, res{126, 1, nil}},
{8, []byte{254}, res{254, 1, nil}},
// Doesn't fit in a byte:
{1, []byte{1}, res{0, 0, errNeedMore}},
{2, []byte{3}, res{0, 0, errNeedMore}},
{3, []byte{7}, res{0, 0, errNeedMore}},
{4, []byte{15}, res{0, 0, errNeedMore}},
{5, []byte{31}, res{0, 0, errNeedMore}},
{6, []byte{63}, res{0, 0, errNeedMore}},
{7, []byte{127}, res{0, 0, errNeedMore}},
{8, []byte{255}, res{0, 0, errNeedMore}},
// Ignoring top bits:
{5, []byte{255, 154, 10}, res{1337, 3, nil}}, // high dummy three bits: 111
{5, []byte{159, 154, 10}, res{1337, 3, nil}}, // high dummy three bits: 100
{5, []byte{191, 154, 10}, res{1337, 3, nil}}, // high dummy three bits: 101
// Extra byte:
{5, []byte{191, 154, 10, 2}, res{1337, 3, nil}}, // extra byte
// Short a byte:
{5, []byte{191, 154}, res{0, 0, errNeedMore}},
// integer overflow:
{1, []byte{255, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128}, res{0, 0, errVarintOverflow}},
}
for _, tt := range tests {
i, remain, err := readVarInt(tt.n, tt.p)
consumed := len(tt.p) - len(remain)
got := res{i, consumed, err}
if got != tt.want {
t.Errorf("readVarInt(%d, %v ~ %x) = %+v; want %+v", tt.n, tt.p, tt.p, got, tt.want)
}
}
}
func dehex(s string) []byte {
s = strings.Replace(s, " ", "", -1)
s = strings.Replace(s, "\n", "", -1)
b, err := hex.DecodeString(s)
if err != nil {
panic(err)
}
return b
}

View File

@ -1,152 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
// Licensed under the same terms as Go itself:
// https://code.google.com/p/go/source/browse/LICENSE
package http2
import (
"bytes"
"errors"
"flag"
"fmt"
"net/http"
"os/exec"
"strconv"
"strings"
"testing"
"github.com/bradfitz/http2/hpack"
)
var knownFailing = flag.Bool("known_failing", false, "Run known-failing tests.")
func condSkipFailingTest(t *testing.T) {
if !*knownFailing {
t.Skip("Skipping known-failing test without --known_failing")
}
}
func init() {
DebugGoroutines = true
flag.BoolVar(&VerboseLogs, "verboseh2", false, "Verbose HTTP/2 debug logging")
}
func TestSettingString(t *testing.T) {
tests := []struct {
s Setting
want string
}{
{Setting{SettingMaxFrameSize, 123}, "[MAX_FRAME_SIZE = 123]"},
{Setting{1<<16 - 1, 123}, "[UNKNOWN_SETTING_65535 = 123]"},
}
for i, tt := range tests {
got := fmt.Sprint(tt.s)
if got != tt.want {
t.Errorf("%d. for %#v, string = %q; want %q", i, tt.s, got, tt.want)
}
}
}
type twriter struct {
t testing.TB
st *serverTester // optional
}
func (w twriter) Write(p []byte) (n int, err error) {
if w.st != nil {
ps := string(p)
for _, phrase := range w.st.logFilter {
if strings.Contains(ps, phrase) {
return len(p), nil // no logging
}
}
}
w.t.Logf("%s", p)
return len(p), nil
}
// like encodeHeader, but don't add implicit psuedo headers.
func encodeHeaderNoImplicit(t *testing.T, headers ...string) []byte {
var buf bytes.Buffer
enc := hpack.NewEncoder(&buf)
for len(headers) > 0 {
k, v := headers[0], headers[1]
headers = headers[2:]
if err := enc.WriteField(hpack.HeaderField{Name: k, Value: v}); err != nil {
t.Fatalf("HPACK encoding error for %q/%q: %v", k, v, err)
}
}
return buf.Bytes()
}
// Verify that curl has http2.
func requireCurl(t *testing.T) {
out, err := dockerLogs(curl(t, "--version"))
if err != nil {
t.Skipf("failed to determine curl features; skipping test")
}
if !strings.Contains(string(out), "HTTP2") {
t.Skip("curl doesn't support HTTP2; skipping test")
}
}
func curl(t *testing.T, args ...string) (container string) {
out, err := exec.Command("docker", append([]string{"run", "-d", "--net=host", "gohttp2/curl"}, args...)...).CombinedOutput()
if err != nil {
t.Skipf("Failed to run curl in docker: %v, %s", err, out)
}
return strings.TrimSpace(string(out))
}
type puppetCommand struct {
fn func(w http.ResponseWriter, r *http.Request)
done chan<- bool
}
type handlerPuppet struct {
ch chan puppetCommand
}
func newHandlerPuppet() *handlerPuppet {
return &handlerPuppet{
ch: make(chan puppetCommand),
}
}
func (p *handlerPuppet) act(w http.ResponseWriter, r *http.Request) {
for cmd := range p.ch {
cmd.fn(w, r)
cmd.done <- true
}
}
func (p *handlerPuppet) done() { close(p.ch) }
func (p *handlerPuppet) do(fn func(http.ResponseWriter, *http.Request)) {
done := make(chan bool)
p.ch <- puppetCommand{fn, done}
<-done
}
func dockerLogs(container string) ([]byte, error) {
out, err := exec.Command("docker", "wait", container).CombinedOutput()
if err != nil {
return out, err
}
exitStatus, err := strconv.Atoi(strings.TrimSpace(string(out)))
if err != nil {
return out, errors.New("unexpected exit status from docker wait")
}
out, err = exec.Command("docker", "logs", container).CombinedOutput()
exec.Command("docker", "rm", container).Run()
if err == nil && exitStatus != 0 {
err = fmt.Errorf("exit status %d: %s", exitStatus, out)
}
return out, err
}
func kill(container string) {
exec.Command("docker", "kill", container).Run()
exec.Command("docker", "rm", container).Run()
}

View File

@ -1,24 +0,0 @@
// Copyright 2014 The Go Authors.
// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
// Licensed under the same terms as Go itself:
// https://code.google.com/p/go/source/browse/LICENSE
package http2
import (
"errors"
"testing"
)
func TestPipeClose(t *testing.T) {
var p pipe
p.c.L = &p.m
a := errors.New("a")
b := errors.New("b")
p.Close(a)
p.Close(b)
_, err := p.Read(make([]byte, 1))
if err != a {
t.Errorf("err = %v want %v", err, a)
}
}

View File

@ -1,121 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
// Licensed under the same terms as Go itself:
// https://code.google.com/p/go/source/browse/LICENSE
package http2
import (
"testing"
)
func TestPriority(t *testing.T) {
// A -> B
// move A's parent to B
streams := make(map[uint32]*stream)
a := &stream{
parent: nil,
weight: 16,
}
streams[1] = a
b := &stream{
parent: a,
weight: 16,
}
streams[2] = b
adjustStreamPriority(streams, 1, PriorityParam{
Weight: 20,
StreamDep: 2,
})
if a.parent != b {
t.Errorf("Expected A's parent to be B")
}
if a.weight != 20 {
t.Errorf("Expected A's weight to be 20; got %d", a.weight)
}
if b.parent != nil {
t.Errorf("Expected B to have no parent")
}
if b.weight != 16 {
t.Errorf("Expected B's weight to be 16; got %d", b.weight)
}
}
func TestPriorityExclusiveZero(t *testing.T) {
// A B and C are all children of the 0 stream.
// Exclusive reprioritization to any of the streams
// should bring the rest of the streams under the
// reprioritized stream
streams := make(map[uint32]*stream)
a := &stream{
parent: nil,
weight: 16,
}
streams[1] = a
b := &stream{
parent: nil,
weight: 16,
}
streams[2] = b
c := &stream{
parent: nil,
weight: 16,
}
streams[3] = c
adjustStreamPriority(streams, 3, PriorityParam{
Weight: 20,
StreamDep: 0,
Exclusive: true,
})
if a.parent != c {
t.Errorf("Expected A's parent to be C")
}
if a.weight != 16 {
t.Errorf("Expected A's weight to be 16; got %d", a.weight)
}
if b.parent != c {
t.Errorf("Expected B's parent to be C")
}
if b.weight != 16 {
t.Errorf("Expected B's weight to be 16; got %d", b.weight)
}
if c.parent != nil {
t.Errorf("Expected C to have no parent")
}
if c.weight != 20 {
t.Errorf("Expected C's weight to be 20; got %d", b.weight)
}
}
func TestPriorityOwnParent(t *testing.T) {
streams := make(map[uint32]*stream)
a := &stream{
parent: nil,
weight: 16,
}
streams[1] = a
b := &stream{
parent: a,
weight: 16,
}
streams[2] = b
adjustStreamPriority(streams, 1, PriorityParam{
Weight: 20,
StreamDep: 1,
})
if a.parent != nil {
t.Errorf("Expected A's parent to be nil")
}
if a.weight != 20 {
t.Errorf("Expected A's weight to be 20; got %d", a.weight)
}
if b.parent != a {
t.Errorf("Expected B's parent to be A")
}
if b.weight != 16 {
t.Errorf("Expected B's weight to be 16; got %d", b.weight)
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,168 +0,0 @@
// Copyright 2015 The Go Authors.
// See https://go.googlesource.com/go/+/master/CONTRIBUTORS
// Licensed under the same terms as Go itself:
// https://go.googlesource.com/go/+/master/LICENSE
package http2
import (
"flag"
"io"
"io/ioutil"
"net/http"
"os"
"reflect"
"strings"
"testing"
"time"
)
var (
extNet = flag.Bool("extnet", false, "do external network tests")
transportHost = flag.String("transporthost", "http2.golang.org", "hostname to use for TestTransport")
insecure = flag.Bool("insecure", false, "insecure TLS dials")
)
func TestTransportExternal(t *testing.T) {
if !*extNet {
t.Skip("skipping external network test")
}
req, _ := http.NewRequest("GET", "https://"+*transportHost+"/", nil)
rt := &Transport{
InsecureTLSDial: *insecure,
}
res, err := rt.RoundTrip(req)
if err != nil {
t.Fatalf("%v", err)
}
res.Write(os.Stdout)
}
func TestTransport(t *testing.T) {
const body = "sup"
st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
io.WriteString(w, body)
})
defer st.Close()
tr := &Transport{InsecureTLSDial: true}
defer tr.CloseIdleConnections()
req, err := http.NewRequest("GET", st.ts.URL, nil)
if err != nil {
t.Fatal(err)
}
res, err := tr.RoundTrip(req)
if err != nil {
t.Fatal(err)
}
defer res.Body.Close()
t.Logf("Got res: %+v", res)
if g, w := res.StatusCode, 200; g != w {
t.Errorf("StatusCode = %v; want %v", g, w)
}
if g, w := res.Status, "200 OK"; g != w {
t.Errorf("Status = %q; want %q", g, w)
}
wantHeader := http.Header{
"Content-Length": []string{"3"},
"Content-Type": []string{"text/plain; charset=utf-8"},
}
if !reflect.DeepEqual(res.Header, wantHeader) {
t.Errorf("res Header = %v; want %v", res.Header, wantHeader)
}
if res.Request != req {
t.Errorf("Response.Request = %p; want %p", res.Request, req)
}
if res.TLS == nil {
t.Errorf("Response.TLS = nil; want non-nil", res.TLS)
}
slurp, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Error("Body read: %v", err)
} else if string(slurp) != body {
t.Errorf("Body = %q; want %q", slurp, body)
}
}
func TestTransportReusesConns(t *testing.T) {
st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
io.WriteString(w, r.RemoteAddr)
}, optOnlyServer)
defer st.Close()
tr := &Transport{InsecureTLSDial: true}
defer tr.CloseIdleConnections()
get := func() string {
req, err := http.NewRequest("GET", st.ts.URL, nil)
if err != nil {
t.Fatal(err)
}
res, err := tr.RoundTrip(req)
if err != nil {
t.Fatal(err)
}
defer res.Body.Close()
slurp, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatalf("Body read: %v", err)
}
addr := strings.TrimSpace(string(slurp))
if addr == "" {
t.Fatalf("didn't get an addr in response")
}
return addr
}
first := get()
second := get()
if first != second {
t.Errorf("first and second responses were on different connections: %q vs %q", first, second)
}
}
func TestTransportAbortClosesPipes(t *testing.T) {
shutdown := make(chan struct{})
st := newServerTester(t,
func(w http.ResponseWriter, r *http.Request) {
w.(http.Flusher).Flush()
<-shutdown
},
optOnlyServer,
)
defer st.Close()
defer close(shutdown) // we must shutdown before st.Close() to avoid hanging
done := make(chan struct{})
requestMade := make(chan struct{})
go func() {
defer close(done)
tr := &Transport{
InsecureTLSDial: true,
}
req, err := http.NewRequest("GET", st.ts.URL, nil)
if err != nil {
t.Fatal(err)
}
res, err := tr.RoundTrip(req)
if err != nil {
t.Fatal(err)
}
defer res.Body.Close()
close(requestMade)
_, err = ioutil.ReadAll(res.Body)
if err == nil {
t.Error("expected error from res.Body.Read")
}
}()
<-requestMade
// Now force the serve loop to end, via closing the connection.
st.closeConn()
// deadlock? that's a bug.
select {
case <-done:
case <-time.After(3 * time.Second):
t.Fatal("timeout")
}
}

View File

@ -1,357 +0,0 @@
// Copyright 2014 The Go Authors.
// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
// Licensed under the same terms as Go itself:
// https://code.google.com/p/go/source/browse/LICENSE
package http2
import (
"bytes"
"encoding/xml"
"flag"
"fmt"
"io"
"os"
"reflect"
"regexp"
"sort"
"strconv"
"strings"
"sync"
"testing"
)
var coverSpec = flag.Bool("coverspec", false, "Run spec coverage tests")
// The global map of sentence coverage for the http2 spec.
var defaultSpecCoverage specCoverage
var loadSpecOnce sync.Once
func loadSpec() {
if f, err := os.Open("testdata/draft-ietf-httpbis-http2.xml"); err != nil {
panic(err)
} else {
defaultSpecCoverage = readSpecCov(f)
f.Close()
}
}
// covers marks all sentences for section sec in defaultSpecCoverage. Sentences not
// "covered" will be included in report outputed by TestSpecCoverage.
func covers(sec, sentences string) {
loadSpecOnce.Do(loadSpec)
defaultSpecCoverage.cover(sec, sentences)
}
type specPart struct {
section string
sentence string
}
func (ss specPart) Less(oo specPart) bool {
atoi := func(s string) int {
n, err := strconv.Atoi(s)
if err != nil {
panic(err)
}
return n
}
a := strings.Split(ss.section, ".")
b := strings.Split(oo.section, ".")
for len(a) > 0 {
if len(b) == 0 {
return false
}
x, y := atoi(a[0]), atoi(b[0])
if x == y {
a, b = a[1:], b[1:]
continue
}
return x < y
}
if len(b) > 0 {
return true
}
return false
}
type bySpecSection []specPart
func (a bySpecSection) Len() int { return len(a) }
func (a bySpecSection) Less(i, j int) bool { return a[i].Less(a[j]) }
func (a bySpecSection) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
type specCoverage struct {
coverage map[specPart]bool
d *xml.Decoder
}
func joinSection(sec []int) string {
s := fmt.Sprintf("%d", sec[0])
for _, n := range sec[1:] {
s = fmt.Sprintf("%s.%d", s, n)
}
return s
}
func (sc specCoverage) readSection(sec []int) {
var (
buf = new(bytes.Buffer)
sub = 0
)
for {
tk, err := sc.d.Token()
if err != nil {
if err == io.EOF {
return
}
panic(err)
}
switch v := tk.(type) {
case xml.StartElement:
if skipElement(v) {
if err := sc.d.Skip(); err != nil {
panic(err)
}
if v.Name.Local == "section" {
sub++
}
break
}
switch v.Name.Local {
case "section":
sub++
sc.readSection(append(sec, sub))
case "xref":
buf.Write(sc.readXRef(v))
}
case xml.CharData:
if len(sec) == 0 {
break
}
buf.Write(v)
case xml.EndElement:
if v.Name.Local == "section" {
sc.addSentences(joinSection(sec), buf.String())
return
}
}
}
}
func (sc specCoverage) readXRef(se xml.StartElement) []byte {
var b []byte
for {
tk, err := sc.d.Token()
if err != nil {
panic(err)
}
switch v := tk.(type) {
case xml.CharData:
if b != nil {
panic("unexpected CharData")
}
b = []byte(string(v))
case xml.EndElement:
if v.Name.Local != "xref" {
panic("expected </xref>")
}
if b != nil {
return b
}
sig := attrSig(se)
switch sig {
case "target":
return []byte(fmt.Sprintf("[%s]", attrValue(se, "target")))
case "fmt-of,rel,target", "fmt-,,rel,target":
return []byte(fmt.Sprintf("[%s, %s]", attrValue(se, "target"), attrValue(se, "rel")))
case "fmt-of,sec,target", "fmt-,,sec,target":
return []byte(fmt.Sprintf("[section %s of %s]", attrValue(se, "sec"), attrValue(se, "target")))
case "fmt-of,rel,sec,target":
return []byte(fmt.Sprintf("[section %s of %s, %s]", attrValue(se, "sec"), attrValue(se, "target"), attrValue(se, "rel")))
default:
panic(fmt.Sprintf("unknown attribute signature %q in %#v", sig, fmt.Sprintf("%#v", se)))
}
default:
panic(fmt.Sprintf("unexpected tag %q", v))
}
}
}
var skipAnchor = map[string]bool{
"intro": true,
"Overview": true,
}
var skipTitle = map[string]bool{
"Acknowledgements": true,
"Change Log": true,
"Document Organization": true,
"Conventions and Terminology": true,
}
func skipElement(s xml.StartElement) bool {
switch s.Name.Local {
case "artwork":
return true
case "section":
for _, attr := range s.Attr {
switch attr.Name.Local {
case "anchor":
if skipAnchor[attr.Value] || strings.HasPrefix(attr.Value, "changes.since.") {
return true
}
case "title":
if skipTitle[attr.Value] {
return true
}
}
}
}
return false
}
func readSpecCov(r io.Reader) specCoverage {
sc := specCoverage{
coverage: map[specPart]bool{},
d: xml.NewDecoder(r)}
sc.readSection(nil)
return sc
}
func (sc specCoverage) addSentences(sec string, sentence string) {
for _, s := range parseSentences(sentence) {
sc.coverage[specPart{sec, s}] = false
}
}
func (sc specCoverage) cover(sec string, sentence string) {
for _, s := range parseSentences(sentence) {
p := specPart{sec, s}
if _, ok := sc.coverage[p]; !ok {
panic(fmt.Sprintf("Not found in spec: %q, %q", sec, s))
}
sc.coverage[specPart{sec, s}] = true
}
}
var whitespaceRx = regexp.MustCompile(`\s+`)
func parseSentences(sens string) []string {
sens = strings.TrimSpace(sens)
if sens == "" {
return nil
}
ss := strings.Split(whitespaceRx.ReplaceAllString(sens, " "), ". ")
for i, s := range ss {
s = strings.TrimSpace(s)
if !strings.HasSuffix(s, ".") {
s += "."
}
ss[i] = s
}
return ss
}
func TestSpecParseSentences(t *testing.T) {
tests := []struct {
ss string
want []string
}{
{"Sentence 1. Sentence 2.",
[]string{
"Sentence 1.",
"Sentence 2.",
}},
{"Sentence 1. \nSentence 2.\tSentence 3.",
[]string{
"Sentence 1.",
"Sentence 2.",
"Sentence 3.",
}},
}
for i, tt := range tests {
got := parseSentences(tt.ss)
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("%d: got = %q, want %q", i, got, tt.want)
}
}
}
func TestSpecCoverage(t *testing.T) {
if !*coverSpec {
t.Skip()
}
loadSpecOnce.Do(loadSpec)
var (
list []specPart
cv = defaultSpecCoverage.coverage
total = len(cv)
complete = 0
)
for sp, touched := range defaultSpecCoverage.coverage {
if touched {
complete++
} else {
list = append(list, sp)
}
}
sort.Stable(bySpecSection(list))
if testing.Short() && len(list) > 5 {
list = list[:5]
}
for _, p := range list {
t.Errorf("\tSECTION %s: %s", p.section, p.sentence)
}
t.Logf("%d/%d (%d%%) sentances covered", complete, total, (complete/total)*100)
}
func attrSig(se xml.StartElement) string {
var names []string
for _, attr := range se.Attr {
if attr.Name.Local == "fmt" {
names = append(names, "fmt-"+attr.Value)
} else {
names = append(names, attr.Name.Local)
}
}
sort.Strings(names)
return strings.Join(names, ",")
}
func attrValue(se xml.StartElement, attr string) string {
for _, a := range se.Attr {
if a.Name.Local == attr {
return a.Value
}
}
panic("unknown attribute " + attr)
}
func TestSpecPartLess(t *testing.T) {
tests := []struct {
sec1, sec2 string
want bool
}{
{"6.2.1", "6.2", false},
{"6.2", "6.2.1", true},
{"6.10", "6.10.1", true},
{"6.10", "6.1.1", false}, // 10, not 1
{"6.1", "6.1", false}, // equal, so not less
}
for _, tt := range tests {
got := (specPart{tt.sec1, "foo"}).Less(specPart{tt.sec2, "foo"})
if got != tt.want {
t.Errorf("Less(%q, %q) = %v; want %v", tt.sec1, tt.sec2, got, tt.want)
}
}
}

File diff suppressed because it is too large Load Diff

View File

@ -75,12 +75,13 @@ func Merge(dst, src Message) {
}
func mergeStruct(out, in reflect.Value) {
sprop := GetProperties(in.Type())
for i := 0; i < in.NumField(); i++ {
f := in.Type().Field(i)
if strings.HasPrefix(f.Name, "XXX_") {
continue
}
mergeAny(out.Field(i), in.Field(i))
mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
}
if emIn, ok := in.Addr().Interface().(extendableProto); ok {
@ -98,7 +99,10 @@ func mergeStruct(out, in reflect.Value) {
}
}
func mergeAny(out, in reflect.Value) {
// mergeAny performs a merge between two values of the same type.
// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
// prop is set if this is a struct field (it may be nil).
func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
if in.Type() == protoMessageType {
if !in.IsNil() {
if out.IsNil() {
@ -112,7 +116,21 @@ func mergeAny(out, in reflect.Value) {
switch in.Kind() {
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
reflect.String, reflect.Uint32, reflect.Uint64:
if !viaPtr && isProto3Zero(in) {
return
}
out.Set(in)
case reflect.Interface:
// Probably a oneof field; copy non-nil values.
if in.IsNil() {
return
}
// Allocate destination if it is not set, or set to a different type.
// Otherwise we will merge as normal.
if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
}
mergeAny(out.Elem(), in.Elem(), false, nil)
case reflect.Map:
if in.Len() == 0 {
return
@ -127,7 +145,7 @@ func mergeAny(out, in reflect.Value) {
switch elemKind {
case reflect.Ptr:
val = reflect.New(in.Type().Elem().Elem())
mergeAny(val, in.MapIndex(key))
mergeAny(val, in.MapIndex(key), false, nil)
case reflect.Slice:
val = in.MapIndex(key)
val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
@ -143,13 +161,21 @@ func mergeAny(out, in reflect.Value) {
if out.IsNil() {
out.Set(reflect.New(in.Elem().Type()))
}
mergeAny(out.Elem(), in.Elem())
mergeAny(out.Elem(), in.Elem(), true, nil)
case reflect.Slice:
if in.IsNil() {
return
}
if in.Type().Elem().Kind() == reflect.Uint8 {
// []byte is a scalar bytes field, not a repeated field.
// Edge case: if this is in a proto3 message, a zero length
// bytes field is considered the zero value, and should not
// be merged.
if prop != nil && prop.proto3 && in.Len() == 0 {
return
}
// Make a deep copy.
// Append to []byte{} instead of []byte(nil) so that we never end up
// with a nil result.
@ -167,7 +193,7 @@ func mergeAny(out, in reflect.Value) {
default:
for i := 0; i < n; i++ {
x := reflect.Indirect(reflect.New(in.Type().Elem()))
mergeAny(x, in.Index(i))
mergeAny(x, in.Index(i), false, nil)
out.Set(reflect.Append(out, x))
}
}
@ -184,7 +210,7 @@ func mergeExtension(out, in map[int32]Extension) {
eOut := Extension{desc: eIn.desc}
if eIn.value != nil {
v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
mergeAny(v, reflect.ValueOf(eIn.value))
mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
eOut.value = v.Interface()
}
if eIn.enc != nil {

View File

@ -1,227 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2011 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto_test
import (
"testing"
"github.com/golang/protobuf/proto"
pb "github.com/golang/protobuf/proto/testdata"
)
var cloneTestMessage = &pb.MyMessage{
Count: proto.Int32(42),
Name: proto.String("Dave"),
Pet: []string{"bunny", "kitty", "horsey"},
Inner: &pb.InnerMessage{
Host: proto.String("niles"),
Port: proto.Int32(9099),
Connected: proto.Bool(true),
},
Others: []*pb.OtherMessage{
{
Value: []byte("some bytes"),
},
},
Somegroup: &pb.MyMessage_SomeGroup{
GroupField: proto.Int32(6),
},
RepBytes: [][]byte{[]byte("sham"), []byte("wow")},
}
func init() {
ext := &pb.Ext{
Data: proto.String("extension"),
}
if err := proto.SetExtension(cloneTestMessage, pb.E_Ext_More, ext); err != nil {
panic("SetExtension: " + err.Error())
}
}
func TestClone(t *testing.T) {
m := proto.Clone(cloneTestMessage).(*pb.MyMessage)
if !proto.Equal(m, cloneTestMessage) {
t.Errorf("Clone(%v) = %v", cloneTestMessage, m)
}
// Verify it was a deep copy.
*m.Inner.Port++
if proto.Equal(m, cloneTestMessage) {
t.Error("Mutating clone changed the original")
}
// Byte fields and repeated fields should be copied.
if &m.Pet[0] == &cloneTestMessage.Pet[0] {
t.Error("Pet: repeated field not copied")
}
if &m.Others[0] == &cloneTestMessage.Others[0] {
t.Error("Others: repeated field not copied")
}
if &m.Others[0].Value[0] == &cloneTestMessage.Others[0].Value[0] {
t.Error("Others[0].Value: bytes field not copied")
}
if &m.RepBytes[0] == &cloneTestMessage.RepBytes[0] {
t.Error("RepBytes: repeated field not copied")
}
if &m.RepBytes[0][0] == &cloneTestMessage.RepBytes[0][0] {
t.Error("RepBytes[0]: bytes field not copied")
}
}
func TestCloneNil(t *testing.T) {
var m *pb.MyMessage
if c := proto.Clone(m); !proto.Equal(m, c) {
t.Errorf("Clone(%v) = %v", m, c)
}
}
var mergeTests = []struct {
src, dst, want proto.Message
}{
{
src: &pb.MyMessage{
Count: proto.Int32(42),
},
dst: &pb.MyMessage{
Name: proto.String("Dave"),
},
want: &pb.MyMessage{
Count: proto.Int32(42),
Name: proto.String("Dave"),
},
},
{
src: &pb.MyMessage{
Inner: &pb.InnerMessage{
Host: proto.String("hey"),
Connected: proto.Bool(true),
},
Pet: []string{"horsey"},
Others: []*pb.OtherMessage{
{
Value: []byte("some bytes"),
},
},
},
dst: &pb.MyMessage{
Inner: &pb.InnerMessage{
Host: proto.String("niles"),
Port: proto.Int32(9099),
},
Pet: []string{"bunny", "kitty"},
Others: []*pb.OtherMessage{
{
Key: proto.Int64(31415926535),
},
{
// Explicitly test a src=nil field
Inner: nil,
},
},
},
want: &pb.MyMessage{
Inner: &pb.InnerMessage{
Host: proto.String("hey"),
Connected: proto.Bool(true),
Port: proto.Int32(9099),
},
Pet: []string{"bunny", "kitty", "horsey"},
Others: []*pb.OtherMessage{
{
Key: proto.Int64(31415926535),
},
{},
{
Value: []byte("some bytes"),
},
},
},
},
{
src: &pb.MyMessage{
RepBytes: [][]byte{[]byte("wow")},
},
dst: &pb.MyMessage{
Somegroup: &pb.MyMessage_SomeGroup{
GroupField: proto.Int32(6),
},
RepBytes: [][]byte{[]byte("sham")},
},
want: &pb.MyMessage{
Somegroup: &pb.MyMessage_SomeGroup{
GroupField: proto.Int32(6),
},
RepBytes: [][]byte{[]byte("sham"), []byte("wow")},
},
},
// Check that a scalar bytes field replaces rather than appends.
{
src: &pb.OtherMessage{Value: []byte("foo")},
dst: &pb.OtherMessage{Value: []byte("bar")},
want: &pb.OtherMessage{Value: []byte("foo")},
},
{
src: &pb.MessageWithMap{
NameMapping: map[int32]string{6: "Nigel"},
MsgMapping: map[int64]*pb.FloatingPoint{
0x4001: &pb.FloatingPoint{F: proto.Float64(2.0)},
},
ByteMapping: map[bool][]byte{true: []byte("wowsa")},
},
dst: &pb.MessageWithMap{
NameMapping: map[int32]string{
6: "Bruce", // should be overwritten
7: "Andrew",
},
},
want: &pb.MessageWithMap{
NameMapping: map[int32]string{
6: "Nigel",
7: "Andrew",
},
MsgMapping: map[int64]*pb.FloatingPoint{
0x4001: &pb.FloatingPoint{F: proto.Float64(2.0)},
},
ByteMapping: map[bool][]byte{true: []byte("wowsa")},
},
},
}
func TestMerge(t *testing.T) {
for _, m := range mergeTests {
got := proto.Clone(m.dst)
proto.Merge(got, m.src)
if !proto.Equal(got, m.want) {
t.Errorf("Merge(%v, %v)\n got %v\nwant %v\n", m.dst, m.src, got, m.want)
}
}
}

View File

@ -46,6 +46,10 @@ import (
// errOverflow is returned when an integer is too large to be represented.
var errOverflow = errors.New("proto: integer overflow")
// ErrInternalBadWireType is returned by generated code when an incorrect
// wire type is encountered. It does not get returned to user code.
var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
// The fundamental decoders that interpret bytes on the wire.
// Those that take integer types all return uint64 and are
// therefore of type valueDecoder.
@ -314,6 +318,24 @@ func UnmarshalMerge(buf []byte, pb Message) error {
return NewBuffer(buf).Unmarshal(pb)
}
// DecodeMessage reads a count-delimited message from the Buffer.
func (p *Buffer) DecodeMessage(pb Message) error {
enc, err := p.DecodeRawBytes(false)
if err != nil {
return err
}
return NewBuffer(enc).Unmarshal(pb)
}
// DecodeGroup reads a tag-delimited group from the Buffer.
func (p *Buffer) DecodeGroup(pb Message) error {
typ, base, err := getbase(pb)
if err != nil {
return err
}
return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base)
}
// Unmarshal parses the protocol buffer representation in the
// Buffer and places the decoded result in pb. If the struct
// underlying pb does not match the data in the buffer, the results can be
@ -377,6 +399,20 @@ func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group
continue
}
}
// Maybe it's a oneof?
if prop.oneofUnmarshaler != nil {
m := structPointer_Interface(base, st).(Message)
// First return value indicates whether tag is a oneof field.
ok, err = prop.oneofUnmarshaler(m, tag, wire, o)
if err == ErrInternalBadWireType {
// Map the error to something more descriptive.
// Do the formatting here to save generated code space.
err = fmt.Errorf("bad wiretype for oneof field in %T", m)
}
if ok {
continue
}
}
err = o.skipAndSave(st, tag, wire, base, prop.unrecField)
continue
}
@ -561,9 +597,13 @@ func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error
return err
}
nb := int(nn) // number of bytes of encoded bools
fin := o.index + nb
if fin < o.index {
return errOverflow
}
y := *v
for i := 0; i < nb; i++ {
for o.index < fin {
u, err := p.valDec(o)
if err != nil {
return err
@ -675,7 +715,7 @@ func (o *Buffer) dec_new_map(p *Properties, base structPointer) error {
oi := o.index // index at the end of this map entry
o.index -= len(raw) // move buffer back to start of map entry
mptr := structPointer_Map(base, p.field, p.mtype) // *map[K]V
mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V
if mptr.Elem().IsNil() {
mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem()))
}
@ -727,8 +767,14 @@ func (o *Buffer) dec_new_map(p *Properties, base structPointer) error {
return fmt.Errorf("proto: bad map data tag %d", raw[0])
}
}
keyelem, valelem := keyptr.Elem(), valptr.Elem()
if !keyelem.IsValid() || !valelem.IsValid() {
// We did not decode the key or the value in the map entry.
// Either way, it's an invalid map entry.
return fmt.Errorf("proto: bad map data: missing key/val")
}
v.SetMapIndex(keyptr.Elem(), valptr.Elem())
v.SetMapIndex(keyelem, valelem)
return nil
}

View File

@ -228,6 +228,20 @@ func Marshal(pb Message) ([]byte, error) {
return p.buf, err
}
// EncodeMessage writes the protocol buffer to the Buffer,
// prefixed by a varint-encoded length.
func (p *Buffer) EncodeMessage(pb Message) error {
t, base, err := getbase(pb)
if structPointer_IsNil(base) {
return ErrNil
}
if err == nil {
var state errorState
err = p.enc_len_struct(GetProperties(t.Elem()), base, &state)
}
return err
}
// Marshal takes the protocol buffer
// and encodes it into the wire format, writing the result to the
// Buffer.
@ -318,7 +332,7 @@ func size_bool(p *Properties, base structPointer) int {
func size_proto3_bool(p *Properties, base structPointer) int {
v := *structPointer_BoolVal(base, p.field)
if !v {
if !v && !p.oneof {
return 0
}
return len(p.tagcode) + 1 // each bool takes exactly one byte
@ -361,7 +375,7 @@ func size_int32(p *Properties, base structPointer) (n int) {
func size_proto3_int32(p *Properties, base structPointer) (n int) {
v := structPointer_Word32Val(base, p.field)
x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range
if x == 0 {
if x == 0 && !p.oneof {
return 0
}
n += len(p.tagcode)
@ -407,7 +421,7 @@ func size_uint32(p *Properties, base structPointer) (n int) {
func size_proto3_uint32(p *Properties, base structPointer) (n int) {
v := structPointer_Word32Val(base, p.field)
x := word32Val_Get(v)
if x == 0 {
if x == 0 && !p.oneof {
return 0
}
n += len(p.tagcode)
@ -452,7 +466,7 @@ func size_int64(p *Properties, base structPointer) (n int) {
func size_proto3_int64(p *Properties, base structPointer) (n int) {
v := structPointer_Word64Val(base, p.field)
x := word64Val_Get(v)
if x == 0 {
if x == 0 && !p.oneof {
return 0
}
n += len(p.tagcode)
@ -495,7 +509,7 @@ func size_string(p *Properties, base structPointer) (n int) {
func size_proto3_string(p *Properties, base structPointer) (n int) {
v := *structPointer_StringVal(base, p.field)
if v == "" {
if v == "" && !p.oneof {
return 0
}
n += len(p.tagcode)
@ -529,7 +543,7 @@ func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error {
}
o.buf = append(o.buf, p.tagcode...)
o.EncodeRawBytes(data)
return nil
return state.err
}
o.buf = append(o.buf, p.tagcode...)
@ -667,7 +681,7 @@ func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error
func size_slice_byte(p *Properties, base structPointer) (n int) {
s := *structPointer_Bytes(base, p.field)
if s == nil {
if s == nil && !p.oneof {
return 0
}
n += len(p.tagcode)
@ -677,7 +691,7 @@ func size_slice_byte(p *Properties, base structPointer) (n int) {
func size_proto3_slice_byte(p *Properties, base structPointer) (n int) {
s := *structPointer_Bytes(base, p.field)
if len(s) == 0 {
if len(s) == 0 && !p.oneof {
return 0
}
n += len(p.tagcode)
@ -1084,7 +1098,7 @@ func (o *Buffer) enc_new_map(p *Properties, base structPointer) error {
repeated MapFieldEntry map_field = N;
*/
v := structPointer_Map(base, p.field, p.mtype).Elem() // map[K]V
v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V
if v.Len() == 0 {
return nil
}
@ -1106,6 +1120,11 @@ func (o *Buffer) enc_new_map(p *Properties, base structPointer) error {
for _, key := range keys {
val := v.MapIndex(key)
// The only illegal map entry values are nil message pointers.
if val.Kind() == reflect.Ptr && val.IsNil() {
return errors.New("proto: map has nil element")
}
keycopy.Set(key)
valcopy.Set(val)
@ -1118,7 +1137,7 @@ func (o *Buffer) enc_new_map(p *Properties, base structPointer) error {
}
func size_new_map(p *Properties, base structPointer) int {
v := structPointer_Map(base, p.field, p.mtype).Elem() // map[K]V
v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V
keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype)
@ -1196,6 +1215,14 @@ func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error {
}
}
// Do oneof fields.
if prop.oneofMarshaler != nil {
m := structPointer_Interface(base, prop.stype).(Message)
if err := prop.oneofMarshaler(m, o); err != nil {
return err
}
}
// Add unrecognized fields at the end.
if prop.unrecField.IsValid() {
v := *structPointer_Bytes(base, prop.unrecField)
@ -1221,6 +1248,27 @@ func size_struct(prop *StructProperties, base structPointer) (n int) {
n += len(v)
}
// Factor in any oneof fields.
// TODO: This could be faster and use less reflection.
if prop.oneofMarshaler != nil {
sv := reflect.ValueOf(structPointer_Interface(base, prop.stype)).Elem()
for i := 0; i < prop.stype.NumField(); i++ {
fv := sv.Field(i)
if fv.Kind() != reflect.Interface || fv.IsNil() {
continue
}
if prop.stype.Field(i).Tag.Get("protobuf_oneof") == "" {
continue
}
spv := fv.Elem() // interface -> *T
sv := spv.Elem() // *T -> T
sf := sv.Type().Field(0) // StructField inside T
var prop Properties
prop.Init(sf.Type, "whatever", sf.Tag.Get("protobuf"), &sf)
n += prop.size(&prop, toStructPointer(spv))
}
}
return
}

View File

@ -154,6 +154,17 @@ func equalAny(v1, v2 reflect.Value) bool {
return v1.Float() == v2.Float()
case reflect.Int32, reflect.Int64:
return v1.Int() == v2.Int()
case reflect.Interface:
// Probably a oneof field; compare the inner values.
n1, n2 := v1.IsNil(), v2.IsNil()
if n1 || n2 {
return n1 == n2
}
e1, e2 := v1.Elem(), v2.Elem()
if e1.Type() != e2.Type() {
return false
}
return equalAny(e1, e2)
case reflect.Map:
if v1.Len() != v2.Len() {
return false

View File

@ -1,191 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2011 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto_test
import (
"testing"
. "github.com/golang/protobuf/proto"
pb "github.com/golang/protobuf/proto/testdata"
)
// Four identical base messages.
// The init function adds extensions to some of them.
var messageWithoutExtension = &pb.MyMessage{Count: Int32(7)}
var messageWithExtension1a = &pb.MyMessage{Count: Int32(7)}
var messageWithExtension1b = &pb.MyMessage{Count: Int32(7)}
var messageWithExtension2 = &pb.MyMessage{Count: Int32(7)}
// Two messages with non-message extensions.
var messageWithInt32Extension1 = &pb.MyMessage{Count: Int32(8)}
var messageWithInt32Extension2 = &pb.MyMessage{Count: Int32(8)}
func init() {
ext1 := &pb.Ext{Data: String("Kirk")}
ext2 := &pb.Ext{Data: String("Picard")}
// messageWithExtension1a has ext1, but never marshals it.
if err := SetExtension(messageWithExtension1a, pb.E_Ext_More, ext1); err != nil {
panic("SetExtension on 1a failed: " + err.Error())
}
// messageWithExtension1b is the unmarshaled form of messageWithExtension1a.
if err := SetExtension(messageWithExtension1b, pb.E_Ext_More, ext1); err != nil {
panic("SetExtension on 1b failed: " + err.Error())
}
buf, err := Marshal(messageWithExtension1b)
if err != nil {
panic("Marshal of 1b failed: " + err.Error())
}
messageWithExtension1b.Reset()
if err := Unmarshal(buf, messageWithExtension1b); err != nil {
panic("Unmarshal of 1b failed: " + err.Error())
}
// messageWithExtension2 has ext2.
if err := SetExtension(messageWithExtension2, pb.E_Ext_More, ext2); err != nil {
panic("SetExtension on 2 failed: " + err.Error())
}
if err := SetExtension(messageWithInt32Extension1, pb.E_Ext_Number, Int32(23)); err != nil {
panic("SetExtension on Int32-1 failed: " + err.Error())
}
if err := SetExtension(messageWithInt32Extension1, pb.E_Ext_Number, Int32(24)); err != nil {
panic("SetExtension on Int32-2 failed: " + err.Error())
}
}
var EqualTests = []struct {
desc string
a, b Message
exp bool
}{
{"different types", &pb.GoEnum{}, &pb.GoTestField{}, false},
{"equal empty", &pb.GoEnum{}, &pb.GoEnum{}, true},
{"nil vs nil", nil, nil, true},
{"typed nil vs typed nil", (*pb.GoEnum)(nil), (*pb.GoEnum)(nil), true},
{"typed nil vs empty", (*pb.GoEnum)(nil), &pb.GoEnum{}, false},
{"different typed nil", (*pb.GoEnum)(nil), (*pb.GoTestField)(nil), false},
{"one set field, one unset field", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{}, false},
{"one set field zero, one unset field", &pb.GoTest{Param: Int32(0)}, &pb.GoTest{}, false},
{"different set fields", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{Label: String("bar")}, false},
{"equal set", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{Label: String("foo")}, true},
{"repeated, one set", &pb.GoTest{F_Int32Repeated: []int32{2, 3}}, &pb.GoTest{}, false},
{"repeated, different length", &pb.GoTest{F_Int32Repeated: []int32{2, 3}}, &pb.GoTest{F_Int32Repeated: []int32{2}}, false},
{"repeated, different value", &pb.GoTest{F_Int32Repeated: []int32{2}}, &pb.GoTest{F_Int32Repeated: []int32{3}}, false},
{"repeated, equal", &pb.GoTest{F_Int32Repeated: []int32{2, 4}}, &pb.GoTest{F_Int32Repeated: []int32{2, 4}}, true},
{"repeated, nil equal nil", &pb.GoTest{F_Int32Repeated: nil}, &pb.GoTest{F_Int32Repeated: nil}, true},
{"repeated, nil equal empty", &pb.GoTest{F_Int32Repeated: nil}, &pb.GoTest{F_Int32Repeated: []int32{}}, true},
{"repeated, empty equal nil", &pb.GoTest{F_Int32Repeated: []int32{}}, &pb.GoTest{F_Int32Repeated: nil}, true},
{
"nested, different",
&pb.GoTest{RequiredField: &pb.GoTestField{Label: String("foo")}},
&pb.GoTest{RequiredField: &pb.GoTestField{Label: String("bar")}},
false,
},
{
"nested, equal",
&pb.GoTest{RequiredField: &pb.GoTestField{Label: String("wow")}},
&pb.GoTest{RequiredField: &pb.GoTestField{Label: String("wow")}},
true,
},
{"bytes", &pb.OtherMessage{Value: []byte("foo")}, &pb.OtherMessage{Value: []byte("foo")}, true},
{"bytes, empty", &pb.OtherMessage{Value: []byte{}}, &pb.OtherMessage{Value: []byte{}}, true},
{"bytes, empty vs nil", &pb.OtherMessage{Value: []byte{}}, &pb.OtherMessage{Value: nil}, false},
{
"repeated bytes",
&pb.MyMessage{RepBytes: [][]byte{[]byte("sham"), []byte("wow")}},
&pb.MyMessage{RepBytes: [][]byte{[]byte("sham"), []byte("wow")}},
true,
},
{"extension vs. no extension", messageWithoutExtension, messageWithExtension1a, false},
{"extension vs. same extension", messageWithExtension1a, messageWithExtension1b, true},
{"extension vs. different extension", messageWithExtension1a, messageWithExtension2, false},
{"int32 extension vs. itself", messageWithInt32Extension1, messageWithInt32Extension1, true},
{"int32 extension vs. a different int32", messageWithInt32Extension1, messageWithInt32Extension2, false},
{
"message with group",
&pb.MyMessage{
Count: Int32(1),
Somegroup: &pb.MyMessage_SomeGroup{
GroupField: Int32(5),
},
},
&pb.MyMessage{
Count: Int32(1),
Somegroup: &pb.MyMessage_SomeGroup{
GroupField: Int32(5),
},
},
true,
},
{
"map same",
&pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}},
&pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}},
true,
},
{
"map different entry",
&pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}},
&pb.MessageWithMap{NameMapping: map[int32]string{2: "Rob"}},
false,
},
{
"map different key only",
&pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}},
&pb.MessageWithMap{NameMapping: map[int32]string{2: "Ken"}},
false,
},
{
"map different value only",
&pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}},
&pb.MessageWithMap{NameMapping: map[int32]string{1: "Rob"}},
false,
},
}
func TestEqual(t *testing.T) {
for _, tc := range EqualTests {
if res := Equal(tc.a, tc.b); res != tc.exp {
t.Errorf("%v: Equal(%v, %v) = %v, want %v", tc.desc, tc.a, tc.b, res, tc.exp)
}
}
}

View File

@ -222,7 +222,7 @@ func ClearExtension(pb extendableProto, extension *ExtensionDesc) {
}
// GetExtension parses and returns the given extension of pb.
// If the extension is not present it returns ErrMissingExtension.
// If the extension is not present and has no default value it returns ErrMissingExtension.
func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, error) {
if err := checkExtensionTypes(pb, extension); err != nil {
return nil, err
@ -231,8 +231,11 @@ func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, er
emap := pb.ExtensionMap()
e, ok := emap[extension.Field]
if !ok {
return nil, ErrMissingExtension
// defaultExtensionValue returns the default value or
// ErrMissingExtension if there is no default.
return defaultExtensionValue(extension)
}
if e.value != nil {
// Already decoded. Check the descriptor, though.
if e.desc != extension {
@ -258,6 +261,41 @@ func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, er
return e.value, nil
}
// defaultExtensionValue returns the default value for extension.
// If no default for an extension is defined ErrMissingExtension is returned.
func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
t := reflect.TypeOf(extension.ExtensionType)
props := extensionProperties(extension)
sf, _, err := fieldDefault(t, props)
if err != nil {
return nil, err
}
if sf == nil || sf.value == nil {
// There is no default value.
return nil, ErrMissingExtension
}
if t.Kind() != reflect.Ptr {
// We do not need to return a Ptr, we can directly return sf.value.
return sf.value, nil
}
// We need to return an interface{} that is a pointer to sf.value.
value := reflect.New(t).Elem()
value.Set(reflect.New(value.Type().Elem()))
if sf.kind == reflect.Int32 {
// We may have an int32 or an enum, but the underlying data is int32.
// Since we can't set an int32 into a non int32 reflect.value directly
// set it as a int32.
value.Elem().SetInt(int64(sf.value.(int32)))
} else {
value.Elem().Set(reflect.ValueOf(sf.value))
}
return value.Interface(), nil
}
// decodeExtension decodes an extension encoded in b.
func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
o := NewBuffer(b)

View File

@ -1,153 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2014 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto_test
import (
"testing"
"github.com/golang/protobuf/proto"
pb "github.com/golang/protobuf/proto/testdata"
)
func TestGetExtensionsWithMissingExtensions(t *testing.T) {
msg := &pb.MyMessage{}
ext1 := &pb.Ext{}
if err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil {
t.Fatalf("Could not set ext1: %s", ext1)
}
exts, err := proto.GetExtensions(msg, []*proto.ExtensionDesc{
pb.E_Ext_More,
pb.E_Ext_Text,
})
if err != nil {
t.Fatalf("GetExtensions() failed: %s", err)
}
if exts[0] != ext1 {
t.Errorf("ext1 not in returned extensions: %T %v", exts[0], exts[0])
}
if exts[1] != nil {
t.Errorf("ext2 in returned extensions: %T %v", exts[1], exts[1])
}
}
func TestGetExtensionStability(t *testing.T) {
check := func(m *pb.MyMessage) bool {
ext1, err := proto.GetExtension(m, pb.E_Ext_More)
if err != nil {
t.Fatalf("GetExtension() failed: %s", err)
}
ext2, err := proto.GetExtension(m, pb.E_Ext_More)
if err != nil {
t.Fatalf("GetExtension() failed: %s", err)
}
return ext1 == ext2
}
msg := &pb.MyMessage{Count: proto.Int32(4)}
ext0 := &pb.Ext{}
if err := proto.SetExtension(msg, pb.E_Ext_More, ext0); err != nil {
t.Fatalf("Could not set ext1: %s", ext0)
}
if !check(msg) {
t.Errorf("GetExtension() not stable before marshaling")
}
bb, err := proto.Marshal(msg)
if err != nil {
t.Fatalf("Marshal() failed: %s", err)
}
msg1 := &pb.MyMessage{}
err = proto.Unmarshal(bb, msg1)
if err != nil {
t.Fatalf("Unmarshal() failed: %s", err)
}
if !check(msg1) {
t.Errorf("GetExtension() not stable after unmarshaling")
}
}
func TestExtensionsRoundTrip(t *testing.T) {
msg := &pb.MyMessage{}
ext1 := &pb.Ext{
Data: proto.String("hi"),
}
ext2 := &pb.Ext{
Data: proto.String("there"),
}
exists := proto.HasExtension(msg, pb.E_Ext_More)
if exists {
t.Error("Extension More present unexpectedly")
}
if err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil {
t.Error(err)
}
if err := proto.SetExtension(msg, pb.E_Ext_More, ext2); err != nil {
t.Error(err)
}
e, err := proto.GetExtension(msg, pb.E_Ext_More)
if err != nil {
t.Error(err)
}
x, ok := e.(*pb.Ext)
if !ok {
t.Errorf("e has type %T, expected testdata.Ext", e)
} else if *x.Data != "there" {
t.Errorf("SetExtension failed to overwrite, got %+v, not 'there'", x)
}
proto.ClearExtension(msg, pb.E_Ext_More)
if _, err = proto.GetExtension(msg, pb.E_Ext_More); err != proto.ErrMissingExtension {
t.Errorf("got %v, expected ErrMissingExtension", e)
}
if _, err := proto.GetExtension(msg, pb.E_X215); err == nil {
t.Error("expected bad extension error, got nil")
}
if err := proto.SetExtension(msg, pb.E_X215, 12); err == nil {
t.Error("expected extension err")
}
if err := proto.SetExtension(msg, pb.E_Ext_More, 12); err == nil {
t.Error("expected some sort of type mismatch error, got nil")
}
}
func TestNilExtension(t *testing.T) {
msg := &pb.MyMessage{
Count: proto.Int32(1),
}
if err := proto.SetExtension(msg, pb.E_Ext_Text, proto.String("hello")); err != nil {
t.Fatal(err)
}
if err := proto.SetExtension(msg, pb.E_Ext_More, (*pb.Ext)(nil)); err == nil {
t.Error("expected SetExtension to fail due to a nil extension")
} else if want := "proto: SetExtension called with nil value of type *testdata.Ext"; err.Error() != want {
t.Errorf("expected error %v, got %v", want, err)
}
// Note: if the behavior of Marshal is ever changed to ignore nil extensions, update
// this test to verify that E_Ext_Text is properly propagated through marshal->unmarshal.
}

View File

@ -66,6 +66,8 @@
that contain it (if any) followed by the CamelCased name of the
extension field itself. HasExtension, ClearExtension, GetExtension
and SetExtension are functions for manipulating extensions.
- Oneof field sets are given a single field in their message,
with distinguished wrapper types for each possible field value.
- Marshal and Unmarshal are functions to encode and decode the wire format.
The simplest way to describe this is to see an example.
@ -82,6 +84,10 @@
optional group OptionalGroup = 4 {
required string RequiredField = 5;
}
oneof union {
int32 number = 6;
string name = 7;
}
}
The resulting file, test.pb.go, is:
@ -124,11 +130,36 @@
Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
// Types that are valid to be assigned to Union:
// *Test_Number
// *Test_Name
Union isTest_Union `protobuf_oneof:"union"`
XXX_unrecognized []byte `json:"-"`
}
func (m *Test) Reset() { *m = Test{} }
func (m *Test) String() string { return proto.CompactTextString(m) }
func (*Test) ProtoMessage() {}
type isTest_Union interface {
isTest_Union()
}
type Test_Number struct {
Number int32 `protobuf:"varint,6,opt,name=number"`
}
type Test_Name struct {
Name string `protobuf:"bytes,7,opt,name=name"`
}
func (*Test_Number) isTest_Union() {}
func (*Test_Name) isTest_Union() {}
func (m *Test) GetUnion() isTest_Union {
if m != nil {
return m.Union
}
return nil
}
const Default_Test_Type int32 = 77
func (m *Test) GetLabel() string {
@ -165,6 +196,20 @@
return ""
}
func (m *Test) GetNumber() int32 {
if x, ok := m.GetUnion().(*Test_Number); ok {
return x.Number
}
return 0
}
func (m *Test) GetName() string {
if x, ok := m.GetUnion().(*Test_Name); ok {
return x.Name
}
return ""
}
func init() {
proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
}
@ -187,6 +232,7 @@
Optionalgroup: &pb.Test_OptionalGroup{
RequiredField: proto.String("good bye"),
},
Union: &pb.Test_Name{"fred"},
}
data, err := proto.Marshal(test)
if err != nil {
@ -201,6 +247,11 @@
if test.GetLabel() != newTest.GetLabel() {
log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
}
// Use a type switch to determine which oneof was set.
switch u := test.Union.(type) {
case *pb.Test_Number: // u.Number contains the number.
case *pb.Test_Name: // u.Name contains the string.
}
// etc.
}
*/
@ -211,6 +262,7 @@ import (
"fmt"
"log"
"reflect"
"sort"
"strconv"
"sync"
)
@ -385,13 +437,13 @@ func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32,
// DebugPrint dumps the encoded data in b in a debugging format with a header
// including the string s. Used in testing but made available for general debugging.
func (o *Buffer) DebugPrint(s string, b []byte) {
func (p *Buffer) DebugPrint(s string, b []byte) {
var u uint64
obuf := o.buf
index := o.index
o.buf = b
o.index = 0
obuf := p.buf
index := p.index
p.buf = b
p.index = 0
depth := 0
fmt.Printf("\n--- %s ---\n", s)
@ -402,12 +454,12 @@ out:
fmt.Print(" ")
}
index := o.index
if index == len(o.buf) {
index := p.index
if index == len(p.buf) {
break
}
op, err := o.DecodeVarint()
op, err := p.DecodeVarint()
if err != nil {
fmt.Printf("%3d: fetching op err %v\n", index, err)
break out
@ -424,7 +476,7 @@ out:
case WireBytes:
var r []byte
r, err = o.DecodeRawBytes(false)
r, err = p.DecodeRawBytes(false)
if err != nil {
break out
}
@ -445,7 +497,7 @@ out:
fmt.Printf("\n")
case WireFixed32:
u, err = o.DecodeFixed32()
u, err = p.DecodeFixed32()
if err != nil {
fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
break out
@ -453,16 +505,15 @@ out:
fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
case WireFixed64:
u, err = o.DecodeFixed64()
u, err = p.DecodeFixed64()
if err != nil {
fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
break out
}
fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
break
case WireVarint:
u, err = o.DecodeVarint()
u, err = p.DecodeVarint()
if err != nil {
fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
break out
@ -470,30 +521,22 @@ out:
fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
case WireStartGroup:
if err != nil {
fmt.Printf("%3d: t=%3d start err %v\n", index, tag, err)
break out
}
fmt.Printf("%3d: t=%3d start\n", index, tag)
depth++
case WireEndGroup:
depth--
if err != nil {
fmt.Printf("%3d: t=%3d end err %v\n", index, tag, err)
break out
}
fmt.Printf("%3d: t=%3d end\n", index, tag)
}
}
if depth != 0 {
fmt.Printf("%3d: start-end not balanced %d\n", o.index, depth)
fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
}
fmt.Printf("\n")
o.buf = obuf
o.index = index
p.buf = obuf
p.index = index
}
// SetDefaults sets unset protocol buffer fields to their default values.
@ -668,7 +711,27 @@ func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
}
ft := t.Field(fi).Type
var canHaveDefault, nestedMessage bool
sf, nested, err := fieldDefault(ft, prop)
switch {
case err != nil:
log.Print(err)
case nested:
dm.nested = append(dm.nested, fi)
case sf != nil:
sf.index = fi
dm.scalars = append(dm.scalars, *sf)
}
}
return dm
}
// fieldDefault returns the scalarField for field type ft.
// sf will be nil if the field can not have a default.
// nestedMessage will be true if this is a nested message.
// Note that sf.index is not set on return.
func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
var canHaveDefault bool
switch ft.Kind() {
case reflect.Ptr:
if ft.Elem().Kind() == reflect.Struct {
@ -693,20 +756,17 @@ func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
if !canHaveDefault {
if nestedMessage {
dm.nested = append(dm.nested, fi)
return nil, true, nil
}
continue
return nil, false, nil
}
sf := scalarField{
index: fi,
kind: ft.Elem().Kind(),
}
// We now know that ft is a pointer or slice.
sf = &scalarField{kind: ft.Elem().Kind()}
// scalar fields without defaults
if !prop.HasDefault {
dm.scalars = append(dm.scalars, sf)
continue
return sf, false, nil
}
// a scalar field: either *T or []byte
@ -714,36 +774,31 @@ func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
case reflect.Bool:
x, err := strconv.ParseBool(prop.Default)
if err != nil {
log.Printf("proto: bad default bool %q: %v", prop.Default, err)
continue
return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
}
sf.value = x
case reflect.Float32:
x, err := strconv.ParseFloat(prop.Default, 32)
if err != nil {
log.Printf("proto: bad default float32 %q: %v", prop.Default, err)
continue
return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
}
sf.value = float32(x)
case reflect.Float64:
x, err := strconv.ParseFloat(prop.Default, 64)
if err != nil {
log.Printf("proto: bad default float64 %q: %v", prop.Default, err)
continue
return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
}
sf.value = x
case reflect.Int32:
x, err := strconv.ParseInt(prop.Default, 10, 32)
if err != nil {
log.Printf("proto: bad default int32 %q: %v", prop.Default, err)
continue
return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
}
sf.value = int32(x)
case reflect.Int64:
x, err := strconv.ParseInt(prop.Default, 10, 64)
if err != nil {
log.Printf("proto: bad default int64 %q: %v", prop.Default, err)
continue
return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
}
sf.value = x
case reflect.String:
@ -754,26 +809,20 @@ func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
case reflect.Uint32:
x, err := strconv.ParseUint(prop.Default, 10, 32)
if err != nil {
log.Printf("proto: bad default uint32 %q: %v", prop.Default, err)
continue
return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
}
sf.value = uint32(x)
case reflect.Uint64:
x, err := strconv.ParseUint(prop.Default, 10, 64)
if err != nil {
log.Printf("proto: bad default uint64 %q: %v", prop.Default, err)
continue
return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
}
sf.value = x
default:
log.Printf("proto: unhandled def kind %v", ft.Elem().Kind())
continue
return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
}
dm.scalars = append(dm.scalars, sf)
}
return dm
return sf, false, nil
}
// Map fields may have key types of non-float scalars, strings and enums.
@ -781,10 +830,54 @@ func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
// If this turns out to be inefficient we can always consider other options,
// such as doing a Schwartzian transform.
type mapKeys []reflect.Value
func (s mapKeys) Len() int { return len(s) }
func (s mapKeys) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s mapKeys) Less(i, j int) bool {
return fmt.Sprint(s[i].Interface()) < fmt.Sprint(s[j].Interface())
func mapKeys(vs []reflect.Value) sort.Interface {
s := mapKeySorter{
vs: vs,
// default Less function: textual comparison
less: func(a, b reflect.Value) bool {
return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface())
},
}
// Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps;
// numeric keys are sorted numerically.
if len(vs) == 0 {
return s
}
switch vs[0].Kind() {
case reflect.Int32, reflect.Int64:
s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
case reflect.Uint32, reflect.Uint64:
s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
}
return s
}
type mapKeySorter struct {
vs []reflect.Value
less func(a, b reflect.Value) bool
}
func (s mapKeySorter) Len() int { return len(s.vs) }
func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }
func (s mapKeySorter) Less(i, j int) bool {
return s.less(s.vs[i], s.vs[j])
}
// isProto3Zero reports whether v is a zero proto3 value.
func isProto3Zero(v reflect.Value) bool {
switch v.Kind() {
case reflect.Bool:
return !v.Bool()
case reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Uint32, reflect.Uint64:
return v.Uint() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
case reflect.String:
return v.String() == ""
}
return false
}

View File

@ -1,66 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2014 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
import (
"bytes"
"testing"
)
func TestUnmarshalMessageSetWithDuplicate(t *testing.T) {
// Check that a repeated message set entry will be concatenated.
in := &MessageSet{
Item: []*_MessageSet_Item{
{TypeId: Int32(12345), Message: []byte("hoo")},
{TypeId: Int32(12345), Message: []byte("hah")},
},
}
b, err := Marshal(in)
if err != nil {
t.Fatalf("Marshal: %v", err)
}
t.Logf("Marshaled bytes: %q", b)
m := make(map[int32]Extension)
if err := UnmarshalMessageSet(b, m); err != nil {
t.Fatalf("UnmarshalMessageSet: %v", err)
}
ext, ok := m[12345]
if !ok {
t.Fatalf("Didn't retrieve extension 12345; map is %v", m)
}
// Skip wire type/field number and length varints.
got := skipVarint(skipVarint(ext.enc))
if want := []byte("hoohah"); !bytes.Equal(got, want) {
t.Errorf("Combined extension is %q, want %q", got, want)
}
}

View File

@ -144,8 +144,8 @@ func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
return structPointer_ifield(p, f).(*map[int32]Extension)
}
// Map returns the reflect.Value for the address of a map field in the struct.
func structPointer_Map(p structPointer, f field, typ reflect.Type) reflect.Value {
// NewAt returns the reflect.Value for a pointer to a field in the struct.
func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
return structPointer_field(p, f).Addr()
}

View File

@ -130,8 +130,8 @@ func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// Map returns the reflect.Value for the address of a map field in the struct.
func structPointer_Map(p structPointer, f field, typ reflect.Type) reflect.Value {
// NewAt returns the reflect.Value for a pointer to a field in the struct.
func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f)))
}

View File

@ -84,6 +84,12 @@ type decoder func(p *Buffer, prop *Properties, base structPointer) error
// A valueDecoder decodes a single integer in a particular encoding.
type valueDecoder func(o *Buffer) (x uint64, err error)
// A oneofMarshaler does the marshaling for all oneof fields in a message.
type oneofMarshaler func(Message, *Buffer) error
// A oneofUnmarshaler does the unmarshaling for a oneof field in a message.
type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error)
// tagMap is an optimization over map[int]int for typical protocol buffer
// use-cases. Encoded protocol buffers are often in tag order with small tag
// numbers.
@ -132,6 +138,21 @@ type StructProperties struct {
order []int // list of struct field numbers in tag order
unrecField field // field id of the XXX_unrecognized []byte field
extendable bool // is this an extendable proto
oneofMarshaler oneofMarshaler
oneofUnmarshaler oneofUnmarshaler
stype reflect.Type
// OneofTypes contains information about the oneof fields in this message.
// It is keyed by the original name of a field.
OneofTypes map[string]*OneofProperties
}
// OneofProperties represents information about a specific field in a oneof.
type OneofProperties struct {
Type reflect.Type // pointer to generated struct type for this oneof field
Field int // struct field number of the containing oneof in the message
Prop *Properties
}
// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
@ -156,6 +177,7 @@ type Properties struct {
Packed bool // relevant for repeated primitives only
Enum string // set for enum types only
proto3 bool // whether this is known to be a proto3 field; set for []byte only
oneof bool // whether this is a oneof field
Default string // default value
HasDefault bool // whether an explicit default was provided
@ -208,6 +230,9 @@ func (p *Properties) String() string {
if p.proto3 {
s += ",proto3"
}
if p.oneof {
s += ",oneof"
}
if len(p.Enum) > 0 {
s += ",enum=" + p.Enum
}
@ -284,6 +309,8 @@ func (p *Properties) Parse(s string) {
p.Enum = f[5:]
case f == "proto3":
p.proto3 = true
case f == "oneof":
p.oneof = true
case strings.HasPrefix(f, "def="):
p.HasDefault = true
p.Default = f[4:] // rest of string
@ -665,6 +692,7 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
if f.Name == "XXX_unrecognized" { // special case
prop.unrecField = toField(&f)
}
oneof := f.Tag.Get("protobuf_oneof") != "" // special case
prop.Prop[i] = p
prop.order[i] = i
if debug {
@ -674,7 +702,7 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
}
print("\n")
}
if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") {
if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && !oneof {
fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]")
}
}
@ -682,6 +710,41 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
// Re-order prop.order.
sort.Sort(prop)
type oneofMessage interface {
XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), []interface{})
}
if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
var oots []interface{}
prop.oneofMarshaler, prop.oneofUnmarshaler, oots = om.XXX_OneofFuncs()
prop.stype = t
// Interpret oneof metadata.
prop.OneofTypes = make(map[string]*OneofProperties)
for _, oot := range oots {
oop := &OneofProperties{
Type: reflect.ValueOf(oot).Type(), // *T
Prop: new(Properties),
}
sft := oop.Type.Elem().Field(0)
oop.Prop.Name = sft.Name
oop.Prop.Parse(sft.Tag.Get("protobuf"))
// There will be exactly one interface field that
// this new value is assignable to.
for i := 0; i < t.NumField(); i++ {
f := t.Field(i)
if f.Type.Kind() != reflect.Interface {
continue
}
if !oop.Type.AssignableTo(f.Type) {
continue
}
oop.Field = i
break
}
prop.OneofTypes[oop.Prop.OrigName] = oop
}
}
// build required counts
// build tags
reqCount := 0

View File

@ -0,0 +1,122 @@
// Code generated by protoc-gen-go.
// source: proto3_proto/proto3.proto
// DO NOT EDIT!
/*
Package proto3_proto is a generated protocol buffer package.
It is generated from these files:
proto3_proto/proto3.proto
It has these top-level messages:
Message
Nested
MessageWithMap
*/
package proto3_proto
import proto "github.com/golang/protobuf/proto"
import testdata "github.com/golang/protobuf/proto/testdata"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
type Message_Humour int32
const (
Message_UNKNOWN Message_Humour = 0
Message_PUNS Message_Humour = 1
Message_SLAPSTICK Message_Humour = 2
Message_BILL_BAILEY Message_Humour = 3
)
var Message_Humour_name = map[int32]string{
0: "UNKNOWN",
1: "PUNS",
2: "SLAPSTICK",
3: "BILL_BAILEY",
}
var Message_Humour_value = map[string]int32{
"UNKNOWN": 0,
"PUNS": 1,
"SLAPSTICK": 2,
"BILL_BAILEY": 3,
}
func (x Message_Humour) String() string {
return proto.EnumName(Message_Humour_name, int32(x))
}
type Message struct {
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
Hilarity Message_Humour `protobuf:"varint,2,opt,name=hilarity,enum=proto3_proto.Message_Humour" json:"hilarity,omitempty"`
HeightInCm uint32 `protobuf:"varint,3,opt,name=height_in_cm" json:"height_in_cm,omitempty"`
Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"`
ResultCount int64 `protobuf:"varint,7,opt,name=result_count" json:"result_count,omitempty"`
TrueScotsman bool `protobuf:"varint,8,opt,name=true_scotsman" json:"true_scotsman,omitempty"`
Score float32 `protobuf:"fixed32,9,opt,name=score" json:"score,omitempty"`
Key []uint64 `protobuf:"varint,5,rep,name=key" json:"key,omitempty"`
Nested *Nested `protobuf:"bytes,6,opt,name=nested" json:"nested,omitempty"`
Terrain map[string]*Nested `protobuf:"bytes,10,rep,name=terrain" json:"terrain,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
Proto2Field *testdata.SubDefaults `protobuf:"bytes,11,opt,name=proto2_field" json:"proto2_field,omitempty"`
Proto2Value map[string]*testdata.SubDefaults `protobuf:"bytes,13,rep,name=proto2_value" json:"proto2_value,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
}
func (m *Message) Reset() { *m = Message{} }
func (m *Message) String() string { return proto.CompactTextString(m) }
func (*Message) ProtoMessage() {}
func (m *Message) GetNested() *Nested {
if m != nil {
return m.Nested
}
return nil
}
func (m *Message) GetTerrain() map[string]*Nested {
if m != nil {
return m.Terrain
}
return nil
}
func (m *Message) GetProto2Field() *testdata.SubDefaults {
if m != nil {
return m.Proto2Field
}
return nil
}
func (m *Message) GetProto2Value() map[string]*testdata.SubDefaults {
if m != nil {
return m.Proto2Value
}
return nil
}
type Nested struct {
Bunny string `protobuf:"bytes,1,opt,name=bunny" json:"bunny,omitempty"`
}
func (m *Nested) Reset() { *m = Nested{} }
func (m *Nested) String() string { return proto.CompactTextString(m) }
func (*Nested) ProtoMessage() {}
type MessageWithMap struct {
ByteMapping map[bool][]byte `protobuf:"bytes,1,rep,name=byte_mapping" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value,proto3"`
}
func (m *MessageWithMap) Reset() { *m = MessageWithMap{} }
func (m *MessageWithMap) String() string { return proto.CompactTextString(m) }
func (*MessageWithMap) ProtoMessage() {}
func (m *MessageWithMap) GetByteMapping() map[bool][]byte {
if m != nil {
return m.ByteMapping
}
return nil
}
func init() {
proto.RegisterEnum("proto3_proto.Message_Humour", Message_Humour_name, Message_Humour_value)
}

View File

@ -1,125 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2014 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto_test
import (
"testing"
"github.com/golang/protobuf/proto"
pb "github.com/golang/protobuf/proto/proto3_proto"
tpb "github.com/golang/protobuf/proto/testdata"
)
func TestProto3ZeroValues(t *testing.T) {
tests := []struct {
desc string
m proto.Message
}{
{"zero message", &pb.Message{}},
{"empty bytes field", &pb.Message{Data: []byte{}}},
}
for _, test := range tests {
b, err := proto.Marshal(test.m)
if err != nil {
t.Errorf("%s: proto.Marshal: %v", test.desc, err)
continue
}
if len(b) > 0 {
t.Errorf("%s: Encoding is non-empty: %q", test.desc, b)
}
}
}
func TestRoundTripProto3(t *testing.T) {
m := &pb.Message{
Name: "David", // (2 | 1<<3): 0x0a 0x05 "David"
Hilarity: pb.Message_PUNS, // (0 | 2<<3): 0x10 0x01
HeightInCm: 178, // (0 | 3<<3): 0x18 0xb2 0x01
Data: []byte("roboto"), // (2 | 4<<3): 0x20 0x06 "roboto"
ResultCount: 47, // (0 | 7<<3): 0x38 0x2f
TrueScotsman: true, // (0 | 8<<3): 0x40 0x01
Score: 8.1, // (5 | 9<<3): 0x4d <8.1>
Key: []uint64{1, 0xdeadbeef},
Nested: &pb.Nested{
Bunny: "Monty",
},
}
t.Logf(" m: %v", m)
b, err := proto.Marshal(m)
if err != nil {
t.Fatalf("proto.Marshal: %v", err)
}
t.Logf(" b: %q", b)
m2 := new(pb.Message)
if err := proto.Unmarshal(b, m2); err != nil {
t.Fatalf("proto.Unmarshal: %v", err)
}
t.Logf("m2: %v", m2)
if !proto.Equal(m, m2) {
t.Errorf("proto.Equal returned false:\n m: %v\nm2: %v", m, m2)
}
}
func TestProto3SetDefaults(t *testing.T) {
in := &pb.Message{
Terrain: map[string]*pb.Nested{
"meadow": new(pb.Nested),
},
Proto2Field: new(tpb.SubDefaults),
Proto2Value: map[string]*tpb.SubDefaults{
"badlands": new(tpb.SubDefaults),
},
}
got := proto.Clone(in).(*pb.Message)
proto.SetDefaults(got)
// There are no defaults in proto3. Everything should be the zero value, but
// we need to remember to set defaults for nested proto2 messages.
want := &pb.Message{
Terrain: map[string]*pb.Nested{
"meadow": new(pb.Nested),
},
Proto2Field: &tpb.SubDefaults{N: proto.Int64(7)},
Proto2Value: map[string]*tpb.SubDefaults{
"badlands": &tpb.SubDefaults{N: proto.Int64(7)},
},
}
if !proto.Equal(got, want) {
t.Errorf("with in = %v\nproto.SetDefaults(in) =>\ngot %v\nwant %v", in, got, want)
}
}

View File

@ -1,142 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2012 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto_test
import (
"log"
"strings"
"testing"
. "github.com/golang/protobuf/proto"
proto3pb "github.com/golang/protobuf/proto/proto3_proto"
pb "github.com/golang/protobuf/proto/testdata"
)
var messageWithExtension1 = &pb.MyMessage{Count: Int32(7)}
// messageWithExtension2 is in equal_test.go.
var messageWithExtension3 = &pb.MyMessage{Count: Int32(8)}
func init() {
if err := SetExtension(messageWithExtension1, pb.E_Ext_More, &pb.Ext{Data: String("Abbott")}); err != nil {
log.Panicf("SetExtension: %v", err)
}
if err := SetExtension(messageWithExtension3, pb.E_Ext_More, &pb.Ext{Data: String("Costello")}); err != nil {
log.Panicf("SetExtension: %v", err)
}
// Force messageWithExtension3 to have the extension encoded.
Marshal(messageWithExtension3)
}
var SizeTests = []struct {
desc string
pb Message
}{
{"empty", &pb.OtherMessage{}},
// Basic types.
{"bool", &pb.Defaults{F_Bool: Bool(true)}},
{"int32", &pb.Defaults{F_Int32: Int32(12)}},
{"negative int32", &pb.Defaults{F_Int32: Int32(-1)}},
{"small int64", &pb.Defaults{F_Int64: Int64(1)}},
{"big int64", &pb.Defaults{F_Int64: Int64(1 << 20)}},
{"negative int64", &pb.Defaults{F_Int64: Int64(-1)}},
{"fixed32", &pb.Defaults{F_Fixed32: Uint32(71)}},
{"fixed64", &pb.Defaults{F_Fixed64: Uint64(72)}},
{"uint32", &pb.Defaults{F_Uint32: Uint32(123)}},
{"uint64", &pb.Defaults{F_Uint64: Uint64(124)}},
{"float", &pb.Defaults{F_Float: Float32(12.6)}},
{"double", &pb.Defaults{F_Double: Float64(13.9)}},
{"string", &pb.Defaults{F_String: String("niles")}},
{"bytes", &pb.Defaults{F_Bytes: []byte("wowsa")}},
{"bytes, empty", &pb.Defaults{F_Bytes: []byte{}}},
{"sint32", &pb.Defaults{F_Sint32: Int32(65)}},
{"sint64", &pb.Defaults{F_Sint64: Int64(67)}},
{"enum", &pb.Defaults{F_Enum: pb.Defaults_BLUE.Enum()}},
// Repeated.
{"empty repeated bool", &pb.MoreRepeated{Bools: []bool{}}},
{"repeated bool", &pb.MoreRepeated{Bools: []bool{false, true, true, false}}},
{"packed repeated bool", &pb.MoreRepeated{BoolsPacked: []bool{false, true, true, false, true, true, true}}},
{"repeated int32", &pb.MoreRepeated{Ints: []int32{1, 12203, 1729, -1}}},
{"repeated int32 packed", &pb.MoreRepeated{IntsPacked: []int32{1, 12203, 1729}}},
{"repeated int64 packed", &pb.MoreRepeated{Int64SPacked: []int64{
// Need enough large numbers to verify that the header is counting the number of bytes
// for the field, not the number of elements.
1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62,
1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62,
}}},
{"repeated string", &pb.MoreRepeated{Strings: []string{"r", "ken", "gri"}}},
{"repeated fixed", &pb.MoreRepeated{Fixeds: []uint32{1, 2, 3, 4}}},
// Nested.
{"nested", &pb.OldMessage{Nested: &pb.OldMessage_Nested{Name: String("whatever")}}},
{"group", &pb.GroupOld{G: &pb.GroupOld_G{X: Int32(12345)}}},
// Other things.
{"unrecognized", &pb.MoreRepeated{XXX_unrecognized: []byte{13<<3 | 0, 4}}},
{"extension (unencoded)", messageWithExtension1},
{"extension (encoded)", messageWithExtension3},
// proto3 message
{"proto3 empty", &proto3pb.Message{}},
{"proto3 bool", &proto3pb.Message{TrueScotsman: true}},
{"proto3 int64", &proto3pb.Message{ResultCount: 1}},
{"proto3 uint32", &proto3pb.Message{HeightInCm: 123}},
{"proto3 float", &proto3pb.Message{Score: 12.6}},
{"proto3 string", &proto3pb.Message{Name: "Snezana"}},
{"proto3 bytes", &proto3pb.Message{Data: []byte("wowsa")}},
{"proto3 bytes, empty", &proto3pb.Message{Data: []byte{}}},
{"proto3 enum", &proto3pb.Message{Hilarity: proto3pb.Message_PUNS}},
{"proto3 map field with empty bytes", &proto3pb.MessageWithMap{ByteMapping: map[bool][]byte{false: []byte{}}}},
{"map field", &pb.MessageWithMap{NameMapping: map[int32]string{1: "Rob", 7: "Andrew"}}},
{"map field with message", &pb.MessageWithMap{MsgMapping: map[int64]*pb.FloatingPoint{0x7001: &pb.FloatingPoint{F: Float64(2.0)}}}},
{"map field with bytes", &pb.MessageWithMap{ByteMapping: map[bool][]byte{true: []byte("this time for sure")}}},
{"map field with empty bytes", &pb.MessageWithMap{ByteMapping: map[bool][]byte{true: []byte{}}}},
{"map field with big entry", &pb.MessageWithMap{NameMapping: map[int32]string{8: strings.Repeat("x", 125)}}},
{"map field with big key and val", &pb.MessageWithMap{StrToStr: map[string]string{strings.Repeat("x", 70): strings.Repeat("y", 70)}}},
{"map field with big numeric key", &pb.MessageWithMap{NameMapping: map[int32]string{0xf00d: "om nom nom"}}},
}
func TestSize(t *testing.T) {
for _, tc := range SizeTests {
size := Size(tc.pb)
b, err := Marshal(tc.pb)
if err != nil {
t.Errorf("%v: Marshal failed: %v", tc.desc, err)
continue
}
if size != len(b) {
t.Errorf("%v: Size(%v) = %d, want %d", tc.desc, tc.pb, size, len(b))
t.Logf("%v: bytes: %#v", tc.desc, b)
}
}
}

View File

@ -1,50 +0,0 @@
# Go support for Protocol Buffers - Google's data interchange format
#
# Copyright 2010 The Go Authors. All rights reserved.
# https://github.com/golang/protobuf
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
include ../../Make.protobuf
all: regenerate
regenerate:
rm -f test.pb.go
make test.pb.go
# The following rules are just aids to development. Not needed for typical testing.
diff: regenerate
git diff test.pb.go
restore:
cp test.pb.go.golden test.pb.go
preserve:
cp test.pb.go test.pb.go.golden

View File

@ -1,86 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2012 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Verify that the compiler output for test.proto is unchanged.
package testdata
import (
"crypto/sha1"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"testing"
)
// sum returns in string form (for easy comparison) the SHA-1 hash of the named file.
func sum(t *testing.T, name string) string {
data, err := ioutil.ReadFile(name)
if err != nil {
t.Fatal(err)
}
t.Logf("sum(%q): length is %d", name, len(data))
hash := sha1.New()
_, err = hash.Write(data)
if err != nil {
t.Fatal(err)
}
return fmt.Sprintf("% x", hash.Sum(nil))
}
func run(t *testing.T, name string, args ...string) {
cmd := exec.Command(name, args...)
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err := cmd.Run()
if err != nil {
t.Fatal(err)
}
}
func TestGolden(t *testing.T) {
// Compute the original checksum.
goldenSum := sum(t, "test.pb.go")
// Run the proto compiler.
run(t, "protoc", "--go_out="+os.TempDir(), "test.proto")
newFile := filepath.Join(os.TempDir(), "test.pb.go")
defer os.Remove(newFile)
// Compute the new checksum.
newSum := sum(t, newFile)
// Verify
if newSum != goldenSum {
run(t, "diff", "-u", "test.pb.go", newFile)
t.Fatal("Code generated by protoc-gen-go has changed; update test.pb.go")
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,435 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// A feature-rich test file for the protocol compiler and libraries.
syntax = "proto2";
package testdata;
enum FOO { FOO1 = 1; };
message GoEnum {
required FOO foo = 1;
}
message GoTestField {
required string Label = 1;
required string Type = 2;
}
message GoTest {
// An enum, for completeness.
enum KIND {
VOID = 0;
// Basic types
BOOL = 1;
BYTES = 2;
FINGERPRINT = 3;
FLOAT = 4;
INT = 5;
STRING = 6;
TIME = 7;
// Groupings
TUPLE = 8;
ARRAY = 9;
MAP = 10;
// Table types
TABLE = 11;
// Functions
FUNCTION = 12; // last tag
};
// Some typical parameters
required KIND Kind = 1;
optional string Table = 2;
optional int32 Param = 3;
// Required, repeated and optional foreign fields.
required GoTestField RequiredField = 4;
repeated GoTestField RepeatedField = 5;
optional GoTestField OptionalField = 6;
// Required fields of all basic types
required bool F_Bool_required = 10;
required int32 F_Int32_required = 11;
required int64 F_Int64_required = 12;
required fixed32 F_Fixed32_required = 13;
required fixed64 F_Fixed64_required = 14;
required uint32 F_Uint32_required = 15;
required uint64 F_Uint64_required = 16;
required float F_Float_required = 17;
required double F_Double_required = 18;
required string F_String_required = 19;
required bytes F_Bytes_required = 101;
required sint32 F_Sint32_required = 102;
required sint64 F_Sint64_required = 103;
// Repeated fields of all basic types
repeated bool F_Bool_repeated = 20;
repeated int32 F_Int32_repeated = 21;
repeated int64 F_Int64_repeated = 22;
repeated fixed32 F_Fixed32_repeated = 23;
repeated fixed64 F_Fixed64_repeated = 24;
repeated uint32 F_Uint32_repeated = 25;
repeated uint64 F_Uint64_repeated = 26;
repeated float F_Float_repeated = 27;
repeated double F_Double_repeated = 28;
repeated string F_String_repeated = 29;
repeated bytes F_Bytes_repeated = 201;
repeated sint32 F_Sint32_repeated = 202;
repeated sint64 F_Sint64_repeated = 203;
// Optional fields of all basic types
optional bool F_Bool_optional = 30;
optional int32 F_Int32_optional = 31;
optional int64 F_Int64_optional = 32;
optional fixed32 F_Fixed32_optional = 33;
optional fixed64 F_Fixed64_optional = 34;
optional uint32 F_Uint32_optional = 35;
optional uint64 F_Uint64_optional = 36;
optional float F_Float_optional = 37;
optional double F_Double_optional = 38;
optional string F_String_optional = 39;
optional bytes F_Bytes_optional = 301;
optional sint32 F_Sint32_optional = 302;
optional sint64 F_Sint64_optional = 303;
// Default-valued fields of all basic types
optional bool F_Bool_defaulted = 40 [default=true];
optional int32 F_Int32_defaulted = 41 [default=32];
optional int64 F_Int64_defaulted = 42 [default=64];
optional fixed32 F_Fixed32_defaulted = 43 [default=320];
optional fixed64 F_Fixed64_defaulted = 44 [default=640];
optional uint32 F_Uint32_defaulted = 45 [default=3200];
optional uint64 F_Uint64_defaulted = 46 [default=6400];
optional float F_Float_defaulted = 47 [default=314159.];
optional double F_Double_defaulted = 48 [default=271828.];
optional string F_String_defaulted = 49 [default="hello, \"world!\"\n"];
optional bytes F_Bytes_defaulted = 401 [default="Bignose"];
optional sint32 F_Sint32_defaulted = 402 [default = -32];
optional sint64 F_Sint64_defaulted = 403 [default = -64];
// Packed repeated fields (no string or bytes).
repeated bool F_Bool_repeated_packed = 50 [packed=true];
repeated int32 F_Int32_repeated_packed = 51 [packed=true];
repeated int64 F_Int64_repeated_packed = 52 [packed=true];
repeated fixed32 F_Fixed32_repeated_packed = 53 [packed=true];
repeated fixed64 F_Fixed64_repeated_packed = 54 [packed=true];
repeated uint32 F_Uint32_repeated_packed = 55 [packed=true];
repeated uint64 F_Uint64_repeated_packed = 56 [packed=true];
repeated float F_Float_repeated_packed = 57 [packed=true];
repeated double F_Double_repeated_packed = 58 [packed=true];
repeated sint32 F_Sint32_repeated_packed = 502 [packed=true];
repeated sint64 F_Sint64_repeated_packed = 503 [packed=true];
// Required, repeated, and optional groups.
required group RequiredGroup = 70 {
required string RequiredField = 71;
};
repeated group RepeatedGroup = 80 {
required string RequiredField = 81;
};
optional group OptionalGroup = 90 {
required string RequiredField = 91;
};
}
// For testing skipping of unrecognized fields.
// Numbers are all big, larger than tag numbers in GoTestField,
// the message used in the corresponding test.
message GoSkipTest {
required int32 skip_int32 = 11;
required fixed32 skip_fixed32 = 12;
required fixed64 skip_fixed64 = 13;
required string skip_string = 14;
required group SkipGroup = 15 {
required int32 group_int32 = 16;
required string group_string = 17;
}
}
// For testing packed/non-packed decoder switching.
// A serialized instance of one should be deserializable as the other.
message NonPackedTest {
repeated int32 a = 1;
}
message PackedTest {
repeated int32 b = 1 [packed=true];
}
message MaxTag {
// Maximum possible tag number.
optional string last_field = 536870911;
}
message OldMessage {
message Nested {
optional string name = 1;
}
optional Nested nested = 1;
optional int32 num = 2;
}
// NewMessage is wire compatible with OldMessage;
// imagine it as a future version.
message NewMessage {
message Nested {
optional string name = 1;
optional string food_group = 2;
}
optional Nested nested = 1;
// This is an int32 in OldMessage.
optional int64 num = 2;
}
// Smaller tests for ASCII formatting.
message InnerMessage {
required string host = 1;
optional int32 port = 2 [default=4000];
optional bool connected = 3;
}
message OtherMessage {
optional int64 key = 1;
optional bytes value = 2;
optional float weight = 3;
optional InnerMessage inner = 4;
}
message MyMessage {
required int32 count = 1;
optional string name = 2;
optional string quote = 3;
repeated string pet = 4;
optional InnerMessage inner = 5;
repeated OtherMessage others = 6;
repeated InnerMessage rep_inner = 12;
enum Color {
RED = 0;
GREEN = 1;
BLUE = 2;
};
optional Color bikeshed = 7;
optional group SomeGroup = 8 {
optional int32 group_field = 9;
}
// This field becomes [][]byte in the generated code.
repeated bytes rep_bytes = 10;
optional double bigfloat = 11;
extensions 100 to max;
}
message Ext {
extend MyMessage {
optional Ext more = 103;
optional string text = 104;
optional int32 number = 105;
}
optional string data = 1;
}
extend MyMessage {
repeated string greeting = 106;
}
message MyMessageSet {
option message_set_wire_format = true;
extensions 100 to max;
}
message Empty {
}
extend MyMessageSet {
optional Empty x201 = 201;
optional Empty x202 = 202;
optional Empty x203 = 203;
optional Empty x204 = 204;
optional Empty x205 = 205;
optional Empty x206 = 206;
optional Empty x207 = 207;
optional Empty x208 = 208;
optional Empty x209 = 209;
optional Empty x210 = 210;
optional Empty x211 = 211;
optional Empty x212 = 212;
optional Empty x213 = 213;
optional Empty x214 = 214;
optional Empty x215 = 215;
optional Empty x216 = 216;
optional Empty x217 = 217;
optional Empty x218 = 218;
optional Empty x219 = 219;
optional Empty x220 = 220;
optional Empty x221 = 221;
optional Empty x222 = 222;
optional Empty x223 = 223;
optional Empty x224 = 224;
optional Empty x225 = 225;
optional Empty x226 = 226;
optional Empty x227 = 227;
optional Empty x228 = 228;
optional Empty x229 = 229;
optional Empty x230 = 230;
optional Empty x231 = 231;
optional Empty x232 = 232;
optional Empty x233 = 233;
optional Empty x234 = 234;
optional Empty x235 = 235;
optional Empty x236 = 236;
optional Empty x237 = 237;
optional Empty x238 = 238;
optional Empty x239 = 239;
optional Empty x240 = 240;
optional Empty x241 = 241;
optional Empty x242 = 242;
optional Empty x243 = 243;
optional Empty x244 = 244;
optional Empty x245 = 245;
optional Empty x246 = 246;
optional Empty x247 = 247;
optional Empty x248 = 248;
optional Empty x249 = 249;
optional Empty x250 = 250;
}
message MessageList {
repeated group Message = 1 {
required string name = 2;
required int32 count = 3;
}
}
message Strings {
optional string string_field = 1;
optional bytes bytes_field = 2;
}
message Defaults {
enum Color {
RED = 0;
GREEN = 1;
BLUE = 2;
}
// Default-valued fields of all basic types.
// Same as GoTest, but copied here to make testing easier.
optional bool F_Bool = 1 [default=true];
optional int32 F_Int32 = 2 [default=32];
optional int64 F_Int64 = 3 [default=64];
optional fixed32 F_Fixed32 = 4 [default=320];
optional fixed64 F_Fixed64 = 5 [default=640];
optional uint32 F_Uint32 = 6 [default=3200];
optional uint64 F_Uint64 = 7 [default=6400];
optional float F_Float = 8 [default=314159.];
optional double F_Double = 9 [default=271828.];
optional string F_String = 10 [default="hello, \"world!\"\n"];
optional bytes F_Bytes = 11 [default="Bignose"];
optional sint32 F_Sint32 = 12 [default=-32];
optional sint64 F_Sint64 = 13 [default=-64];
optional Color F_Enum = 14 [default=GREEN];
// More fields with crazy defaults.
optional float F_Pinf = 15 [default=inf];
optional float F_Ninf = 16 [default=-inf];
optional float F_Nan = 17 [default=nan];
// Sub-message.
optional SubDefaults sub = 18;
// Redundant but explicit defaults.
optional string str_zero = 19 [default=""];
}
message SubDefaults {
optional int64 n = 1 [default=7];
}
message RepeatedEnum {
enum Color {
RED = 1;
}
repeated Color color = 1;
}
message MoreRepeated {
repeated bool bools = 1;
repeated bool bools_packed = 2 [packed=true];
repeated int32 ints = 3;
repeated int32 ints_packed = 4 [packed=true];
repeated int64 int64s_packed = 7 [packed=true];
repeated string strings = 5;
repeated fixed32 fixeds = 6;
}
// GroupOld and GroupNew have the same wire format.
// GroupNew has a new field inside a group.
message GroupOld {
optional group G = 101 {
optional int32 x = 2;
}
}
message GroupNew {
optional group G = 101 {
optional int32 x = 2;
optional int32 y = 3;
}
}
message FloatingPoint {
required double f = 1;
}
message MessageWithMap {
map<int32, string> name_mapping = 1;
map<sint64, FloatingPoint> msg_mapping = 2;
map<bool, bytes> byte_mapping = 3;
map<string, string> str_to_str = 4;
}

View File

@ -37,11 +37,11 @@ import (
"bufio"
"bytes"
"encoding"
"errors"
"fmt"
"io"
"log"
"math"
"os"
"reflect"
"sort"
"strings"
@ -246,7 +246,7 @@ func writeStruct(w *textWriter, sv reflect.Value) error {
}
if fv.Kind() == reflect.Map {
// Map fields are rendered as a repeated struct with key/value fields.
keys := fv.MapKeys() // TODO: should we sort these for deterministic output?
keys := fv.MapKeys()
sort.Sort(mapKeys(keys))
for _, key := range keys {
val := fv.MapIndex(key)
@ -283,6 +283,8 @@ func writeStruct(w *textWriter, sv reflect.Value) error {
if err := w.WriteByte('\n'); err != nil {
return err
}
// nil values aren't legal, but we can avoid panicking because of them.
if val.Kind() != reflect.Ptr || !val.IsNil() {
// value
if _, err := w.WriteString("value:"); err != nil {
return err
@ -298,6 +300,7 @@ func writeStruct(w *textWriter, sv reflect.Value) error {
if err := w.WriteByte('\n'); err != nil {
return err
}
}
// close struct
w.unindent()
if err := w.WriteByte('>'); err != nil {
@ -315,26 +318,34 @@ func writeStruct(w *textWriter, sv reflect.Value) error {
}
if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
// proto3 non-repeated scalar field; skip if zero value
switch fv.Kind() {
case reflect.Bool:
if !fv.Bool() {
if isProto3Zero(fv) {
continue
}
case reflect.Int32, reflect.Int64:
if fv.Int() == 0 {
}
if fv.Kind() == reflect.Interface {
// Check if it is a oneof.
if st.Field(i).Tag.Get("protobuf_oneof") != "" {
// fv is nil, or holds a pointer to generated struct.
// That generated struct has exactly one field,
// which has a protobuf struct tag.
if fv.IsNil() {
continue
}
case reflect.Uint32, reflect.Uint64:
if fv.Uint() == 0 {
continue
}
case reflect.Float32, reflect.Float64:
if fv.Float() == 0 {
continue
}
case reflect.String:
if fv.String() == "" {
continue
inner := fv.Elem().Elem() // interface -> *T -> T
tag := inner.Type().Field(0).Tag.Get("protobuf")
props = new(Properties) // Overwrite the outer props var, but not its pointee.
props.Parse(tag)
// Write the value in the oneof, not the oneof itself.
fv = inner.Field(0)
// Special case to cope with malformed messages gracefully:
// If the value in the oneof is a nil pointer, don't panic
// in writeAny.
if fv.Kind() == reflect.Ptr && fv.IsNil() {
// Use errors.New so writeAny won't render quotes.
msg := errors.New("/* nil */")
fv = reflect.ValueOf(&msg).Elem()
}
}
}
@ -666,10 +677,7 @@ func writeExtensions(w *textWriter, pv reflect.Value) error {
pb, err := GetExtension(ep, desc)
if err != nil {
if _, err := fmt.Fprintln(os.Stderr, "proto: failed getting extension: ", err); err != nil {
return err
}
continue
return fmt.Errorf("failed getting extension: %v", err)
}
// Repeated extensions will appear as a slice.

View File

@ -174,7 +174,7 @@ func (p *textParser) advance() {
}
unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
if err != nil {
p.errorf("invalid quoted string %v", p.s[0:i+1])
p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
return
}
p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
@ -385,8 +385,7 @@ func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSet
}
// Returns the index in the struct for the named field, as well as the parsed tag properties.
func structFieldByName(st reflect.Type, name string) (int, *Properties, bool) {
sprops := GetProperties(st)
func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) {
i, ok := sprops.decoderOrigNames[name]
if ok {
return i, sprops.Prop[i], true
@ -438,7 +437,8 @@ func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseEr
func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
st := sv.Type()
reqCount := GetProperties(st).reqCount
sprops := GetProperties(st)
reqCount := sprops.reqCount
var reqFieldErr error
fieldSet := make(map[string]bool)
// A struct is a sequence of "name: value", terminated by one of
@ -520,15 +520,28 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
sl = reflect.Append(sl, ext)
SetExtension(ep, desc, sl.Interface())
}
} else {
// This is a normal, non-extension field.
name := tok.value
fi, props, ok := structFieldByName(st, name)
if !ok {
return p.errorf("unknown field name %q in %v", name, st)
if err := p.consumeOptionalSeparator(); err != nil {
return err
}
continue
}
dst := sv.Field(fi)
// This is a normal, non-extension field.
name := tok.value
var dst reflect.Value
fi, props, ok := structFieldByName(sprops, name)
if ok {
dst = sv.Field(fi)
} else if oop, ok := sprops.OneofTypes[name]; ok {
// It is a oneof.
props = oop.Prop
nv := reflect.New(oop.Type.Elem())
dst = nv.Elem().Field(0)
sv.Field(oop.Field).Set(nv)
}
if !dst.IsValid() {
return p.errorf("unknown field name %q in %v", name, st)
}
if dst.Kind() == reflect.Map {
// Consume any colon.
@ -567,6 +580,9 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
if err := p.readAny(key, props.mkeyprop); err != nil {
return err
}
if err := p.consumeOptionalSeparator(); err != nil {
return err
}
if err := p.consumeToken("value"); err != nil {
return err
}
@ -576,6 +592,9 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
if err := p.readAny(val, props.mvalprop); err != nil {
return err
}
if err := p.consumeOptionalSeparator(); err != nil {
return err
}
if err := p.consumeToken(terminator); err != nil {
return err
}
@ -589,7 +608,7 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
return p.errorf("non-repeated field %q was repeated", name)
}
if err := p.checkForColon(props, st.Field(fi).Type); err != nil {
if err := p.checkForColon(props, dst.Type()); err != nil {
return err
}
@ -603,16 +622,11 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
} else if props.Required {
reqCount--
}
if err := p.consumeOptionalSeparator(); err != nil {
return err
}
// For backward compatibility, permit a semicolon or comma after a field.
tok = p.next()
if tok.err != nil {
return tok.err
}
if tok.value != ";" && tok.value != "," {
p.back()
}
}
if reqCount > 0 {
@ -621,6 +635,19 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
return reqFieldErr
}
// consumeOptionalSeparator consumes an optional semicolon or comma.
// It is used in readStruct to provide backward compatibility.
func (p *textParser) consumeOptionalSeparator() error {
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value != ";" && tok.value != "," {
p.back()
}
return nil
}
func (p *textParser) readAny(v reflect.Value, props *Properties) error {
tok := p.next()
if tok.err != nil {

View File

@ -1,511 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto_test
import (
"math"
"reflect"
"testing"
. "github.com/golang/protobuf/proto"
proto3pb "github.com/golang/protobuf/proto/proto3_proto"
. "github.com/golang/protobuf/proto/testdata"
)
type UnmarshalTextTest struct {
in string
err string // if "", no error expected
out *MyMessage
}
func buildExtStructTest(text string) UnmarshalTextTest {
msg := &MyMessage{
Count: Int32(42),
}
SetExtension(msg, E_Ext_More, &Ext{
Data: String("Hello, world!"),
})
return UnmarshalTextTest{in: text, out: msg}
}
func buildExtDataTest(text string) UnmarshalTextTest {
msg := &MyMessage{
Count: Int32(42),
}
SetExtension(msg, E_Ext_Text, String("Hello, world!"))
SetExtension(msg, E_Ext_Number, Int32(1729))
return UnmarshalTextTest{in: text, out: msg}
}
func buildExtRepStringTest(text string) UnmarshalTextTest {
msg := &MyMessage{
Count: Int32(42),
}
if err := SetExtension(msg, E_Greeting, []string{"bula", "hola"}); err != nil {
panic(err)
}
return UnmarshalTextTest{in: text, out: msg}
}
var unMarshalTextTests = []UnmarshalTextTest{
// Basic
{
in: " count:42\n name:\"Dave\" ",
out: &MyMessage{
Count: Int32(42),
Name: String("Dave"),
},
},
// Empty quoted string
{
in: `count:42 name:""`,
out: &MyMessage{
Count: Int32(42),
Name: String(""),
},
},
// Quoted string concatenation
{
in: `count:42 name: "My name is "` + "\n" + `"elsewhere"`,
out: &MyMessage{
Count: Int32(42),
Name: String("My name is elsewhere"),
},
},
// Quoted string with escaped apostrophe
{
in: `count:42 name: "HOLIDAY - New Year\'s Day"`,
out: &MyMessage{
Count: Int32(42),
Name: String("HOLIDAY - New Year's Day"),
},
},
// Quoted string with single quote
{
in: `count:42 name: 'Roger "The Ramster" Ramjet'`,
out: &MyMessage{
Count: Int32(42),
Name: String(`Roger "The Ramster" Ramjet`),
},
},
// Quoted string with all the accepted special characters from the C++ test
{
in: `count:42 name: ` + "\"\\\"A string with \\' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"",
out: &MyMessage{
Count: Int32(42),
Name: String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces"),
},
},
// Quoted string with quoted backslash
{
in: `count:42 name: "\\'xyz"`,
out: &MyMessage{
Count: Int32(42),
Name: String(`\'xyz`),
},
},
// Quoted string with UTF-8 bytes.
{
in: "count:42 name: '\303\277\302\201\xAB'",
out: &MyMessage{
Count: Int32(42),
Name: String("\303\277\302\201\xAB"),
},
},
// Bad quoted string
{
in: `inner: < host: "\0" >` + "\n",
err: `line 1.15: invalid quoted string "\0"`,
},
// Number too large for int64
{
in: "count: 1 others { key: 123456789012345678901 }",
err: "line 1.23: invalid int64: 123456789012345678901",
},
// Number too large for int32
{
in: "count: 1234567890123",
err: "line 1.7: invalid int32: 1234567890123",
},
// Number in hexadecimal
{
in: "count: 0x2beef",
out: &MyMessage{
Count: Int32(0x2beef),
},
},
// Number in octal
{
in: "count: 024601",
out: &MyMessage{
Count: Int32(024601),
},
},
// Floating point number with "f" suffix
{
in: "count: 4 others:< weight: 17.0f >",
out: &MyMessage{
Count: Int32(4),
Others: []*OtherMessage{
{
Weight: Float32(17),
},
},
},
},
// Floating point positive infinity
{
in: "count: 4 bigfloat: inf",
out: &MyMessage{
Count: Int32(4),
Bigfloat: Float64(math.Inf(1)),
},
},
// Floating point negative infinity
{
in: "count: 4 bigfloat: -inf",
out: &MyMessage{
Count: Int32(4),
Bigfloat: Float64(math.Inf(-1)),
},
},
// Number too large for float32
{
in: "others:< weight: 12345678901234567890123456789012345678901234567890 >",
err: "line 1.17: invalid float32: 12345678901234567890123456789012345678901234567890",
},
// Number posing as a quoted string
{
in: `inner: < host: 12 >` + "\n",
err: `line 1.15: invalid string: 12`,
},
// Quoted string posing as int32
{
in: `count: "12"`,
err: `line 1.7: invalid int32: "12"`,
},
// Quoted string posing a float32
{
in: `others:< weight: "17.4" >`,
err: `line 1.17: invalid float32: "17.4"`,
},
// Enum
{
in: `count:42 bikeshed: BLUE`,
out: &MyMessage{
Count: Int32(42),
Bikeshed: MyMessage_BLUE.Enum(),
},
},
// Repeated field
{
in: `count:42 pet: "horsey" pet:"bunny"`,
out: &MyMessage{
Count: Int32(42),
Pet: []string{"horsey", "bunny"},
},
},
// Repeated message with/without colon and <>/{}
{
in: `count:42 others:{} others{} others:<> others:{}`,
out: &MyMessage{
Count: Int32(42),
Others: []*OtherMessage{
{},
{},
{},
{},
},
},
},
// Missing colon for inner message
{
in: `count:42 inner < host: "cauchy.syd" >`,
out: &MyMessage{
Count: Int32(42),
Inner: &InnerMessage{
Host: String("cauchy.syd"),
},
},
},
// Missing colon for string field
{
in: `name "Dave"`,
err: `line 1.5: expected ':', found "\"Dave\""`,
},
// Missing colon for int32 field
{
in: `count 42`,
err: `line 1.6: expected ':', found "42"`,
},
// Missing required field
{
in: `name: "Pawel"`,
err: `proto: required field "testdata.MyMessage.count" not set`,
out: &MyMessage{
Name: String("Pawel"),
},
},
// Repeated non-repeated field
{
in: `name: "Rob" name: "Russ"`,
err: `line 1.12: non-repeated field "name" was repeated`,
},
// Group
{
in: `count: 17 SomeGroup { group_field: 12 }`,
out: &MyMessage{
Count: Int32(17),
Somegroup: &MyMessage_SomeGroup{
GroupField: Int32(12),
},
},
},
// Semicolon between fields
{
in: `count:3;name:"Calvin"`,
out: &MyMessage{
Count: Int32(3),
Name: String("Calvin"),
},
},
// Comma between fields
{
in: `count:4,name:"Ezekiel"`,
out: &MyMessage{
Count: Int32(4),
Name: String("Ezekiel"),
},
},
// Extension
buildExtStructTest(`count: 42 [testdata.Ext.more]:<data:"Hello, world!" >`),
buildExtStructTest(`count: 42 [testdata.Ext.more] {data:"Hello, world!"}`),
buildExtDataTest(`count: 42 [testdata.Ext.text]:"Hello, world!" [testdata.Ext.number]:1729`),
buildExtRepStringTest(`count: 42 [testdata.greeting]:"bula" [testdata.greeting]:"hola"`),
// Big all-in-one
{
in: "count:42 # Meaning\n" +
`name:"Dave" ` +
`quote:"\"I didn't want to go.\"" ` +
`pet:"bunny" ` +
`pet:"kitty" ` +
`pet:"horsey" ` +
`inner:<` +
` host:"footrest.syd" ` +
` port:7001 ` +
` connected:true ` +
`> ` +
`others:<` +
` key:3735928559 ` +
` value:"\x01A\a\f" ` +
`> ` +
`others:<` +
" weight:58.9 # Atomic weight of Co\n" +
` inner:<` +
` host:"lesha.mtv" ` +
` port:8002 ` +
` >` +
`>`,
out: &MyMessage{
Count: Int32(42),
Name: String("Dave"),
Quote: String(`"I didn't want to go."`),
Pet: []string{"bunny", "kitty", "horsey"},
Inner: &InnerMessage{
Host: String("footrest.syd"),
Port: Int32(7001),
Connected: Bool(true),
},
Others: []*OtherMessage{
{
Key: Int64(3735928559),
Value: []byte{0x1, 'A', '\a', '\f'},
},
{
Weight: Float32(58.9),
Inner: &InnerMessage{
Host: String("lesha.mtv"),
Port: Int32(8002),
},
},
},
},
},
}
func TestUnmarshalText(t *testing.T) {
for i, test := range unMarshalTextTests {
pb := new(MyMessage)
err := UnmarshalText(test.in, pb)
if test.err == "" {
// We don't expect failure.
if err != nil {
t.Errorf("Test %d: Unexpected error: %v", i, err)
} else if !reflect.DeepEqual(pb, test.out) {
t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v",
i, pb, test.out)
}
} else {
// We do expect failure.
if err == nil {
t.Errorf("Test %d: Didn't get expected error: %v", i, test.err)
} else if err.Error() != test.err {
t.Errorf("Test %d: Incorrect error.\nHave: %v\nWant: %v",
i, err.Error(), test.err)
} else if _, ok := err.(*RequiredNotSetError); ok && test.out != nil && !reflect.DeepEqual(pb, test.out) {
t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v",
i, pb, test.out)
}
}
}
}
func TestUnmarshalTextCustomMessage(t *testing.T) {
msg := &textMessage{}
if err := UnmarshalText("custom", msg); err != nil {
t.Errorf("Unexpected error from custom unmarshal: %v", err)
}
if UnmarshalText("not custom", msg) == nil {
t.Errorf("Didn't get expected error from custom unmarshal")
}
}
// Regression test; this caused a panic.
func TestRepeatedEnum(t *testing.T) {
pb := new(RepeatedEnum)
if err := UnmarshalText("color: RED", pb); err != nil {
t.Fatal(err)
}
exp := &RepeatedEnum{
Color: []RepeatedEnum_Color{RepeatedEnum_RED},
}
if !Equal(pb, exp) {
t.Errorf("Incorrect populated \nHave: %v\nWant: %v", pb, exp)
}
}
func TestProto3TextParsing(t *testing.T) {
m := new(proto3pb.Message)
const in = `name: "Wallace" true_scotsman: true`
want := &proto3pb.Message{
Name: "Wallace",
TrueScotsman: true,
}
if err := UnmarshalText(in, m); err != nil {
t.Fatal(err)
}
if !Equal(m, want) {
t.Errorf("\n got %v\nwant %v", m, want)
}
}
func TestMapParsing(t *testing.T) {
m := new(MessageWithMap)
const in = `name_mapping:<key:1234 value:"Feist"> name_mapping:<key:1 value:"Beatles">` +
`msg_mapping:<key:-4 value:<f: 2.0>>` +
`msg_mapping<key:-2 value<f: 4.0>>` + // no colon after "value"
`byte_mapping:<key:true value:"so be it">`
want := &MessageWithMap{
NameMapping: map[int32]string{
1: "Beatles",
1234: "Feist",
},
MsgMapping: map[int64]*FloatingPoint{
-4: {F: Float64(2.0)},
-2: {F: Float64(4.0)},
},
ByteMapping: map[bool][]byte{
true: []byte("so be it"),
},
}
if err := UnmarshalText(in, m); err != nil {
t.Fatal(err)
}
if !Equal(m, want) {
t.Errorf("\n got %v\nwant %v", m, want)
}
}
var benchInput string
func init() {
benchInput = "count: 4\n"
for i := 0; i < 1000; i++ {
benchInput += "pet: \"fido\"\n"
}
// Check it is valid input.
pb := new(MyMessage)
err := UnmarshalText(benchInput, pb)
if err != nil {
panic("Bad benchmark input: " + err.Error())
}
}
func BenchmarkUnmarshalText(b *testing.B) {
pb := new(MyMessage)
for i := 0; i < b.N; i++ {
UnmarshalText(benchInput, pb)
}
b.SetBytes(int64(len(benchInput)))
}

View File

@ -1,436 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto_test
import (
"bytes"
"errors"
"io/ioutil"
"math"
"strings"
"testing"
"github.com/golang/protobuf/proto"
proto3pb "github.com/golang/protobuf/proto/proto3_proto"
pb "github.com/golang/protobuf/proto/testdata"
)
// textMessage implements the methods that allow it to marshal and unmarshal
// itself as text.
type textMessage struct {
}
func (*textMessage) MarshalText() ([]byte, error) {
return []byte("custom"), nil
}
func (*textMessage) UnmarshalText(bytes []byte) error {
if string(bytes) != "custom" {
return errors.New("expected 'custom'")
}
return nil
}
func (*textMessage) Reset() {}
func (*textMessage) String() string { return "" }
func (*textMessage) ProtoMessage() {}
func newTestMessage() *pb.MyMessage {
msg := &pb.MyMessage{
Count: proto.Int32(42),
Name: proto.String("Dave"),
Quote: proto.String(`"I didn't want to go."`),
Pet: []string{"bunny", "kitty", "horsey"},
Inner: &pb.InnerMessage{
Host: proto.String("footrest.syd"),
Port: proto.Int32(7001),
Connected: proto.Bool(true),
},
Others: []*pb.OtherMessage{
{
Key: proto.Int64(0xdeadbeef),
Value: []byte{1, 65, 7, 12},
},
{
Weight: proto.Float32(6.022),
Inner: &pb.InnerMessage{
Host: proto.String("lesha.mtv"),
Port: proto.Int32(8002),
},
},
},
Bikeshed: pb.MyMessage_BLUE.Enum(),
Somegroup: &pb.MyMessage_SomeGroup{
GroupField: proto.Int32(8),
},
// One normally wouldn't do this.
// This is an undeclared tag 13, as a varint (wire type 0) with value 4.
XXX_unrecognized: []byte{13<<3 | 0, 4},
}
ext := &pb.Ext{
Data: proto.String("Big gobs for big rats"),
}
if err := proto.SetExtension(msg, pb.E_Ext_More, ext); err != nil {
panic(err)
}
greetings := []string{"adg", "easy", "cow"}
if err := proto.SetExtension(msg, pb.E_Greeting, greetings); err != nil {
panic(err)
}
// Add an unknown extension. We marshal a pb.Ext, and fake the ID.
b, err := proto.Marshal(&pb.Ext{Data: proto.String("3G skiing")})
if err != nil {
panic(err)
}
b = append(proto.EncodeVarint(201<<3|proto.WireBytes), b...)
proto.SetRawExtension(msg, 201, b)
// Extensions can be plain fields, too, so let's test that.
b = append(proto.EncodeVarint(202<<3|proto.WireVarint), 19)
proto.SetRawExtension(msg, 202, b)
return msg
}
const text = `count: 42
name: "Dave"
quote: "\"I didn't want to go.\""
pet: "bunny"
pet: "kitty"
pet: "horsey"
inner: <
host: "footrest.syd"
port: 7001
connected: true
>
others: <
key: 3735928559
value: "\001A\007\014"
>
others: <
weight: 6.022
inner: <
host: "lesha.mtv"
port: 8002
>
>
bikeshed: BLUE
SomeGroup {
group_field: 8
}
/* 2 unknown bytes */
13: 4
[testdata.Ext.more]: <
data: "Big gobs for big rats"
>
[testdata.greeting]: "adg"
[testdata.greeting]: "easy"
[testdata.greeting]: "cow"
/* 13 unknown bytes */
201: "\t3G skiing"
/* 3 unknown bytes */
202: 19
`
func TestMarshalText(t *testing.T) {
buf := new(bytes.Buffer)
if err := proto.MarshalText(buf, newTestMessage()); err != nil {
t.Fatalf("proto.MarshalText: %v", err)
}
s := buf.String()
if s != text {
t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", s, text)
}
}
func TestMarshalTextCustomMessage(t *testing.T) {
buf := new(bytes.Buffer)
if err := proto.MarshalText(buf, &textMessage{}); err != nil {
t.Fatalf("proto.MarshalText: %v", err)
}
s := buf.String()
if s != "custom" {
t.Errorf("Got %q, expected %q", s, "custom")
}
}
func TestMarshalTextNil(t *testing.T) {
want := "<nil>"
tests := []proto.Message{nil, (*pb.MyMessage)(nil)}
for i, test := range tests {
buf := new(bytes.Buffer)
if err := proto.MarshalText(buf, test); err != nil {
t.Fatal(err)
}
if got := buf.String(); got != want {
t.Errorf("%d: got %q want %q", i, got, want)
}
}
}
func TestMarshalTextUnknownEnum(t *testing.T) {
// The Color enum only specifies values 0-2.
m := &pb.MyMessage{Bikeshed: pb.MyMessage_Color(3).Enum()}
got := m.String()
const want = `bikeshed:3 `
if got != want {
t.Errorf("\n got %q\nwant %q", got, want)
}
}
func BenchmarkMarshalTextBuffered(b *testing.B) {
buf := new(bytes.Buffer)
m := newTestMessage()
for i := 0; i < b.N; i++ {
buf.Reset()
proto.MarshalText(buf, m)
}
}
func BenchmarkMarshalTextUnbuffered(b *testing.B) {
w := ioutil.Discard
m := newTestMessage()
for i := 0; i < b.N; i++ {
proto.MarshalText(w, m)
}
}
func compact(src string) string {
// s/[ \n]+/ /g; s/ $//;
dst := make([]byte, len(src))
space, comment := false, false
j := 0
for i := 0; i < len(src); i++ {
if strings.HasPrefix(src[i:], "/*") {
comment = true
i++
continue
}
if comment && strings.HasPrefix(src[i:], "*/") {
comment = false
i++
continue
}
if comment {
continue
}
c := src[i]
if c == ' ' || c == '\n' {
space = true
continue
}
if j > 0 && (dst[j-1] == ':' || dst[j-1] == '<' || dst[j-1] == '{') {
space = false
}
if c == '{' {
space = false
}
if space {
dst[j] = ' '
j++
space = false
}
dst[j] = c
j++
}
if space {
dst[j] = ' '
j++
}
return string(dst[0:j])
}
var compactText = compact(text)
func TestCompactText(t *testing.T) {
s := proto.CompactTextString(newTestMessage())
if s != compactText {
t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v\n===\n", s, compactText)
}
}
func TestStringEscaping(t *testing.T) {
testCases := []struct {
in *pb.Strings
out string
}{
{
// Test data from C++ test (TextFormatTest.StringEscape).
// Single divergence: we don't escape apostrophes.
&pb.Strings{StringField: proto.String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces")},
"string_field: \"\\\"A string with ' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"\n",
},
{
// Test data from the same C++ test.
&pb.Strings{StringField: proto.String("\350\260\267\346\255\214")},
"string_field: \"\\350\\260\\267\\346\\255\\214\"\n",
},
{
// Some UTF-8.
&pb.Strings{StringField: proto.String("\x00\x01\xff\x81")},
`string_field: "\000\001\377\201"` + "\n",
},
}
for i, tc := range testCases {
var buf bytes.Buffer
if err := proto.MarshalText(&buf, tc.in); err != nil {
t.Errorf("proto.MarsalText: %v", err)
continue
}
s := buf.String()
if s != tc.out {
t.Errorf("#%d: Got:\n%s\nExpected:\n%s\n", i, s, tc.out)
continue
}
// Check round-trip.
pb := new(pb.Strings)
if err := proto.UnmarshalText(s, pb); err != nil {
t.Errorf("#%d: UnmarshalText: %v", i, err)
continue
}
if !proto.Equal(pb, tc.in) {
t.Errorf("#%d: Round-trip failed:\nstart: %v\n end: %v", i, tc.in, pb)
}
}
}
// A limitedWriter accepts some output before it fails.
// This is a proxy for something like a nearly-full or imminently-failing disk,
// or a network connection that is about to die.
type limitedWriter struct {
b bytes.Buffer
limit int
}
var outOfSpace = errors.New("proto: insufficient space")
func (w *limitedWriter) Write(p []byte) (n int, err error) {
var avail = w.limit - w.b.Len()
if avail <= 0 {
return 0, outOfSpace
}
if len(p) <= avail {
return w.b.Write(p)
}
n, _ = w.b.Write(p[:avail])
return n, outOfSpace
}
func TestMarshalTextFailing(t *testing.T) {
// Try lots of different sizes to exercise more error code-paths.
for lim := 0; lim < len(text); lim++ {
buf := new(limitedWriter)
buf.limit = lim
err := proto.MarshalText(buf, newTestMessage())
// We expect a certain error, but also some partial results in the buffer.
if err != outOfSpace {
t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", err, outOfSpace)
}
s := buf.b.String()
x := text[:buf.limit]
if s != x {
t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", s, x)
}
}
}
func TestFloats(t *testing.T) {
tests := []struct {
f float64
want string
}{
{0, "0"},
{4.7, "4.7"},
{math.Inf(1), "inf"},
{math.Inf(-1), "-inf"},
{math.NaN(), "nan"},
}
for _, test := range tests {
msg := &pb.FloatingPoint{F: &test.f}
got := strings.TrimSpace(msg.String())
want := `f:` + test.want
if got != want {
t.Errorf("f=%f: got %q, want %q", test.f, got, want)
}
}
}
func TestRepeatedNilText(t *testing.T) {
m := &pb.MessageList{
Message: []*pb.MessageList_Message{
nil,
&pb.MessageList_Message{
Name: proto.String("Horse"),
},
nil,
},
}
want := `Message <nil>
Message {
name: "Horse"
}
Message <nil>
`
if s := proto.MarshalTextString(m); s != want {
t.Errorf(" got: %s\nwant: %s", s, want)
}
}
func TestProto3Text(t *testing.T) {
tests := []struct {
m proto.Message
want string
}{
// zero message
{&proto3pb.Message{}, ``},
// zero message except for an empty byte slice
{&proto3pb.Message{Data: []byte{}}, ``},
// trivial case
{&proto3pb.Message{Name: "Rob", HeightInCm: 175}, `name:"Rob" height_in_cm:175`},
// empty map
{&pb.MessageWithMap{}, ``},
// non-empty map; current map format is the same as a repeated struct
{
&pb.MessageWithMap{NameMapping: map[int32]string{1234: "Feist"}},
`name_mapping:<key:1234 value:"Feist" >`,
},
}
for _, test := range tests {
got := strings.TrimSpace(test.m.String())
if got != test.want {
t.Errorf("\n got %s\nwant %s", got, test.want)
}
}
}

View File

@ -64,16 +64,19 @@ type Context interface {
//
// Done is provided for use in select statements:
//
// // DoSomething calls DoSomethingSlow and returns as soon as
// // it returns or ctx.Done is closed.
// func DoSomething(ctx context.Context) (Result, error) {
// c := make(chan Result, 1)
// go func() { c <- DoSomethingSlow(ctx) }()
// // Stream generates values with DoSomething and sends them to out
// // until DoSomething returns an error or ctx.Done is closed.
// func Stream(ctx context.Context, out <-chan Value) error {
// for {
// v, err := DoSomething(ctx)
// if err != nil {
// return err
// }
// select {
// case res := <-c:
// return res, nil
// case <-ctx.Done():
// return nil, ctx.Err()
// return ctx.Err()
// case out <- v:
// }
// }
// }
//
@ -202,6 +205,9 @@ type CancelFunc func()
// WithCancel returns a copy of parent with a new Done channel. The returned
// context's Done channel is closed when the returned cancel function is called
// or when the parent context's Done channel is closed, whichever happens first.
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete.
func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
c := newCancelCtx(parent)
propagateCancel(parent, &c)
@ -262,6 +268,19 @@ func parentCancelCtx(parent Context) (*cancelCtx, bool) {
}
}
// removeChild removes a context from its parent.
func removeChild(parent Context, child canceler) {
p, ok := parentCancelCtx(parent)
if !ok {
return
}
p.mu.Lock()
if p.children != nil {
delete(p.children, child)
}
p.mu.Unlock()
}
// A canceler is a context type that can be canceled directly. The
// implementations are *cancelCtx and *timerCtx.
type canceler interface {
@ -316,13 +335,7 @@ func (c *cancelCtx) cancel(removeFromParent bool, err error) {
c.mu.Unlock()
if removeFromParent {
if p, ok := parentCancelCtx(c.Context); ok {
p.mu.Lock()
if p.children != nil {
delete(p.children, c)
}
p.mu.Unlock()
}
removeChild(c.Context, c)
}
}
@ -333,9 +346,8 @@ func (c *cancelCtx) cancel(removeFromParent bool, err error) {
// cancel function is called, or when the parent context's Done channel is
// closed, whichever happens first.
//
// Canceling this context releases resources associated with the deadline
// timer, so code should call cancel as soon as the operations running in this
// Context complete.
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete.
func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
if cur, ok := parent.Deadline(); ok && cur.Before(deadline) {
// The current deadline is already sooner than the new one.
@ -380,7 +392,11 @@ func (c *timerCtx) String() string {
}
func (c *timerCtx) cancel(removeFromParent bool, err error) {
c.cancelCtx.cancel(removeFromParent, err)
c.cancelCtx.cancel(false, err)
if removeFromParent {
// Remove this timerCtx from its parent cancelCtx's children.
removeChild(c.cancelCtx.Context, c)
}
c.mu.Lock()
if c.timer != nil {
c.timer.Stop()
@ -391,9 +407,8 @@ func (c *timerCtx) cancel(removeFromParent bool, err error) {
// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
//
// Canceling this context releases resources associated with the deadline
// timer, so code should call cancel as soon as the operations running in this
// Context complete:
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete:
//
// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)

View File

@ -1,553 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package context
import (
"fmt"
"math/rand"
"runtime"
"strings"
"sync"
"testing"
"time"
)
// otherContext is a Context that's not one of the types defined in context.go.
// This lets us test code paths that differ based on the underlying type of the
// Context.
type otherContext struct {
Context
}
func TestBackground(t *testing.T) {
c := Background()
if c == nil {
t.Fatalf("Background returned nil")
}
select {
case x := <-c.Done():
t.Errorf("<-c.Done() == %v want nothing (it should block)", x)
default:
}
if got, want := fmt.Sprint(c), "context.Background"; got != want {
t.Errorf("Background().String() = %q want %q", got, want)
}
}
func TestTODO(t *testing.T) {
c := TODO()
if c == nil {
t.Fatalf("TODO returned nil")
}
select {
case x := <-c.Done():
t.Errorf("<-c.Done() == %v want nothing (it should block)", x)
default:
}
if got, want := fmt.Sprint(c), "context.TODO"; got != want {
t.Errorf("TODO().String() = %q want %q", got, want)
}
}
func TestWithCancel(t *testing.T) {
c1, cancel := WithCancel(Background())
if got, want := fmt.Sprint(c1), "context.Background.WithCancel"; got != want {
t.Errorf("c1.String() = %q want %q", got, want)
}
o := otherContext{c1}
c2, _ := WithCancel(o)
contexts := []Context{c1, o, c2}
for i, c := range contexts {
if d := c.Done(); d == nil {
t.Errorf("c[%d].Done() == %v want non-nil", i, d)
}
if e := c.Err(); e != nil {
t.Errorf("c[%d].Err() == %v want nil", i, e)
}
select {
case x := <-c.Done():
t.Errorf("<-c.Done() == %v want nothing (it should block)", x)
default:
}
}
cancel()
time.Sleep(100 * time.Millisecond) // let cancelation propagate
for i, c := range contexts {
select {
case <-c.Done():
default:
t.Errorf("<-c[%d].Done() blocked, but shouldn't have", i)
}
if e := c.Err(); e != Canceled {
t.Errorf("c[%d].Err() == %v want %v", i, e, Canceled)
}
}
}
func TestParentFinishesChild(t *testing.T) {
// Context tree:
// parent -> cancelChild
// parent -> valueChild -> timerChild
parent, cancel := WithCancel(Background())
cancelChild, stop := WithCancel(parent)
defer stop()
valueChild := WithValue(parent, "key", "value")
timerChild, stop := WithTimeout(valueChild, 10000*time.Hour)
defer stop()
select {
case x := <-parent.Done():
t.Errorf("<-parent.Done() == %v want nothing (it should block)", x)
case x := <-cancelChild.Done():
t.Errorf("<-cancelChild.Done() == %v want nothing (it should block)", x)
case x := <-timerChild.Done():
t.Errorf("<-timerChild.Done() == %v want nothing (it should block)", x)
case x := <-valueChild.Done():
t.Errorf("<-valueChild.Done() == %v want nothing (it should block)", x)
default:
}
// The parent's children should contain the two cancelable children.
pc := parent.(*cancelCtx)
cc := cancelChild.(*cancelCtx)
tc := timerChild.(*timerCtx)
pc.mu.Lock()
if len(pc.children) != 2 || !pc.children[cc] || !pc.children[tc] {
t.Errorf("bad linkage: pc.children = %v, want %v and %v",
pc.children, cc, tc)
}
pc.mu.Unlock()
if p, ok := parentCancelCtx(cc.Context); !ok || p != pc {
t.Errorf("bad linkage: parentCancelCtx(cancelChild.Context) = %v, %v want %v, true", p, ok, pc)
}
if p, ok := parentCancelCtx(tc.Context); !ok || p != pc {
t.Errorf("bad linkage: parentCancelCtx(timerChild.Context) = %v, %v want %v, true", p, ok, pc)
}
cancel()
pc.mu.Lock()
if len(pc.children) != 0 {
t.Errorf("pc.cancel didn't clear pc.children = %v", pc.children)
}
pc.mu.Unlock()
// parent and children should all be finished.
check := func(ctx Context, name string) {
select {
case <-ctx.Done():
default:
t.Errorf("<-%s.Done() blocked, but shouldn't have", name)
}
if e := ctx.Err(); e != Canceled {
t.Errorf("%s.Err() == %v want %v", name, e, Canceled)
}
}
check(parent, "parent")
check(cancelChild, "cancelChild")
check(valueChild, "valueChild")
check(timerChild, "timerChild")
// WithCancel should return a canceled context on a canceled parent.
precanceledChild := WithValue(parent, "key", "value")
select {
case <-precanceledChild.Done():
default:
t.Errorf("<-precanceledChild.Done() blocked, but shouldn't have")
}
if e := precanceledChild.Err(); e != Canceled {
t.Errorf("precanceledChild.Err() == %v want %v", e, Canceled)
}
}
func TestChildFinishesFirst(t *testing.T) {
cancelable, stop := WithCancel(Background())
defer stop()
for _, parent := range []Context{Background(), cancelable} {
child, cancel := WithCancel(parent)
select {
case x := <-parent.Done():
t.Errorf("<-parent.Done() == %v want nothing (it should block)", x)
case x := <-child.Done():
t.Errorf("<-child.Done() == %v want nothing (it should block)", x)
default:
}
cc := child.(*cancelCtx)
pc, pcok := parent.(*cancelCtx) // pcok == false when parent == Background()
if p, ok := parentCancelCtx(cc.Context); ok != pcok || (ok && pc != p) {
t.Errorf("bad linkage: parentCancelCtx(cc.Context) = %v, %v want %v, %v", p, ok, pc, pcok)
}
if pcok {
pc.mu.Lock()
if len(pc.children) != 1 || !pc.children[cc] {
t.Errorf("bad linkage: pc.children = %v, cc = %v", pc.children, cc)
}
pc.mu.Unlock()
}
cancel()
if pcok {
pc.mu.Lock()
if len(pc.children) != 0 {
t.Errorf("child's cancel didn't remove self from pc.children = %v", pc.children)
}
pc.mu.Unlock()
}
// child should be finished.
select {
case <-child.Done():
default:
t.Errorf("<-child.Done() blocked, but shouldn't have")
}
if e := child.Err(); e != Canceled {
t.Errorf("child.Err() == %v want %v", e, Canceled)
}
// parent should not be finished.
select {
case x := <-parent.Done():
t.Errorf("<-parent.Done() == %v want nothing (it should block)", x)
default:
}
if e := parent.Err(); e != nil {
t.Errorf("parent.Err() == %v want nil", e)
}
}
}
func testDeadline(c Context, wait time.Duration, t *testing.T) {
select {
case <-time.After(wait):
t.Fatalf("context should have timed out")
case <-c.Done():
}
if e := c.Err(); e != DeadlineExceeded {
t.Errorf("c.Err() == %v want %v", e, DeadlineExceeded)
}
}
func TestDeadline(t *testing.T) {
c, _ := WithDeadline(Background(), time.Now().Add(100*time.Millisecond))
if got, prefix := fmt.Sprint(c), "context.Background.WithDeadline("; !strings.HasPrefix(got, prefix) {
t.Errorf("c.String() = %q want prefix %q", got, prefix)
}
testDeadline(c, 200*time.Millisecond, t)
c, _ = WithDeadline(Background(), time.Now().Add(100*time.Millisecond))
o := otherContext{c}
testDeadline(o, 200*time.Millisecond, t)
c, _ = WithDeadline(Background(), time.Now().Add(100*time.Millisecond))
o = otherContext{c}
c, _ = WithDeadline(o, time.Now().Add(300*time.Millisecond))
testDeadline(c, 200*time.Millisecond, t)
}
func TestTimeout(t *testing.T) {
c, _ := WithTimeout(Background(), 100*time.Millisecond)
if got, prefix := fmt.Sprint(c), "context.Background.WithDeadline("; !strings.HasPrefix(got, prefix) {
t.Errorf("c.String() = %q want prefix %q", got, prefix)
}
testDeadline(c, 200*time.Millisecond, t)
c, _ = WithTimeout(Background(), 100*time.Millisecond)
o := otherContext{c}
testDeadline(o, 200*time.Millisecond, t)
c, _ = WithTimeout(Background(), 100*time.Millisecond)
o = otherContext{c}
c, _ = WithTimeout(o, 300*time.Millisecond)
testDeadline(c, 200*time.Millisecond, t)
}
func TestCanceledTimeout(t *testing.T) {
c, _ := WithTimeout(Background(), 200*time.Millisecond)
o := otherContext{c}
c, cancel := WithTimeout(o, 400*time.Millisecond)
cancel()
time.Sleep(100 * time.Millisecond) // let cancelation propagate
select {
case <-c.Done():
default:
t.Errorf("<-c.Done() blocked, but shouldn't have")
}
if e := c.Err(); e != Canceled {
t.Errorf("c.Err() == %v want %v", e, Canceled)
}
}
type key1 int
type key2 int
var k1 = key1(1)
var k2 = key2(1) // same int as k1, different type
var k3 = key2(3) // same type as k2, different int
func TestValues(t *testing.T) {
check := func(c Context, nm, v1, v2, v3 string) {
if v, ok := c.Value(k1).(string); ok == (len(v1) == 0) || v != v1 {
t.Errorf(`%s.Value(k1).(string) = %q, %t want %q, %t`, nm, v, ok, v1, len(v1) != 0)
}
if v, ok := c.Value(k2).(string); ok == (len(v2) == 0) || v != v2 {
t.Errorf(`%s.Value(k2).(string) = %q, %t want %q, %t`, nm, v, ok, v2, len(v2) != 0)
}
if v, ok := c.Value(k3).(string); ok == (len(v3) == 0) || v != v3 {
t.Errorf(`%s.Value(k3).(string) = %q, %t want %q, %t`, nm, v, ok, v3, len(v3) != 0)
}
}
c0 := Background()
check(c0, "c0", "", "", "")
c1 := WithValue(Background(), k1, "c1k1")
check(c1, "c1", "c1k1", "", "")
if got, want := fmt.Sprint(c1), `context.Background.WithValue(1, "c1k1")`; got != want {
t.Errorf("c.String() = %q want %q", got, want)
}
c2 := WithValue(c1, k2, "c2k2")
check(c2, "c2", "c1k1", "c2k2", "")
c3 := WithValue(c2, k3, "c3k3")
check(c3, "c2", "c1k1", "c2k2", "c3k3")
c4 := WithValue(c3, k1, nil)
check(c4, "c4", "", "c2k2", "c3k3")
o0 := otherContext{Background()}
check(o0, "o0", "", "", "")
o1 := otherContext{WithValue(Background(), k1, "c1k1")}
check(o1, "o1", "c1k1", "", "")
o2 := WithValue(o1, k2, "o2k2")
check(o2, "o2", "c1k1", "o2k2", "")
o3 := otherContext{c4}
check(o3, "o3", "", "c2k2", "c3k3")
o4 := WithValue(o3, k3, nil)
check(o4, "o4", "", "c2k2", "")
}
func TestAllocs(t *testing.T) {
bg := Background()
for _, test := range []struct {
desc string
f func()
limit float64
gccgoLimit float64
}{
{
desc: "Background()",
f: func() { Background() },
limit: 0,
gccgoLimit: 0,
},
{
desc: fmt.Sprintf("WithValue(bg, %v, nil)", k1),
f: func() {
c := WithValue(bg, k1, nil)
c.Value(k1)
},
limit: 3,
gccgoLimit: 3,
},
{
desc: "WithTimeout(bg, 15*time.Millisecond)",
f: func() {
c, _ := WithTimeout(bg, 15*time.Millisecond)
<-c.Done()
},
limit: 8,
gccgoLimit: 13,
},
{
desc: "WithCancel(bg)",
f: func() {
c, cancel := WithCancel(bg)
cancel()
<-c.Done()
},
limit: 5,
gccgoLimit: 8,
},
{
desc: "WithTimeout(bg, 100*time.Millisecond)",
f: func() {
c, cancel := WithTimeout(bg, 100*time.Millisecond)
cancel()
<-c.Done()
},
limit: 8,
gccgoLimit: 25,
},
} {
limit := test.limit
if runtime.Compiler == "gccgo" {
// gccgo does not yet do escape analysis.
// TOOD(iant): Remove this when gccgo does do escape analysis.
limit = test.gccgoLimit
}
if n := testing.AllocsPerRun(100, test.f); n > limit {
t.Errorf("%s allocs = %f want %d", test.desc, n, int(limit))
}
}
}
func TestSimultaneousCancels(t *testing.T) {
root, cancel := WithCancel(Background())
m := map[Context]CancelFunc{root: cancel}
q := []Context{root}
// Create a tree of contexts.
for len(q) != 0 && len(m) < 100 {
parent := q[0]
q = q[1:]
for i := 0; i < 4; i++ {
ctx, cancel := WithCancel(parent)
m[ctx] = cancel
q = append(q, ctx)
}
}
// Start all the cancels in a random order.
var wg sync.WaitGroup
wg.Add(len(m))
for _, cancel := range m {
go func(cancel CancelFunc) {
cancel()
wg.Done()
}(cancel)
}
// Wait on all the contexts in a random order.
for ctx := range m {
select {
case <-ctx.Done():
case <-time.After(1 * time.Second):
buf := make([]byte, 10<<10)
n := runtime.Stack(buf, true)
t.Fatalf("timed out waiting for <-ctx.Done(); stacks:\n%s", buf[:n])
}
}
// Wait for all the cancel functions to return.
done := make(chan struct{})
go func() {
wg.Wait()
close(done)
}()
select {
case <-done:
case <-time.After(1 * time.Second):
buf := make([]byte, 10<<10)
n := runtime.Stack(buf, true)
t.Fatalf("timed out waiting for cancel functions; stacks:\n%s", buf[:n])
}
}
func TestInterlockedCancels(t *testing.T) {
parent, cancelParent := WithCancel(Background())
child, cancelChild := WithCancel(parent)
go func() {
parent.Done()
cancelChild()
}()
cancelParent()
select {
case <-child.Done():
case <-time.After(1 * time.Second):
buf := make([]byte, 10<<10)
n := runtime.Stack(buf, true)
t.Fatalf("timed out waiting for child.Done(); stacks:\n%s", buf[:n])
}
}
func TestLayersCancel(t *testing.T) {
testLayers(t, time.Now().UnixNano(), false)
}
func TestLayersTimeout(t *testing.T) {
testLayers(t, time.Now().UnixNano(), true)
}
func testLayers(t *testing.T, seed int64, testTimeout bool) {
rand.Seed(seed)
errorf := func(format string, a ...interface{}) {
t.Errorf(fmt.Sprintf("seed=%d: %s", seed, format), a...)
}
const (
timeout = 200 * time.Millisecond
minLayers = 30
)
type value int
var (
vals []*value
cancels []CancelFunc
numTimers int
ctx = Background()
)
for i := 0; i < minLayers || numTimers == 0 || len(cancels) == 0 || len(vals) == 0; i++ {
switch rand.Intn(3) {
case 0:
v := new(value)
ctx = WithValue(ctx, v, v)
vals = append(vals, v)
case 1:
var cancel CancelFunc
ctx, cancel = WithCancel(ctx)
cancels = append(cancels, cancel)
case 2:
var cancel CancelFunc
ctx, cancel = WithTimeout(ctx, timeout)
cancels = append(cancels, cancel)
numTimers++
}
}
checkValues := func(when string) {
for _, key := range vals {
if val := ctx.Value(key).(*value); key != val {
errorf("%s: ctx.Value(%p) = %p want %p", when, key, val, key)
}
}
}
select {
case <-ctx.Done():
errorf("ctx should not be canceled yet")
default:
}
if s, prefix := fmt.Sprint(ctx), "context.Background."; !strings.HasPrefix(s, prefix) {
t.Errorf("ctx.String() = %q want prefix %q", s, prefix)
}
t.Log(ctx)
checkValues("before cancel")
if testTimeout {
select {
case <-ctx.Done():
case <-time.After(timeout + timeout/10):
errorf("ctx should have timed out")
}
checkValues("after timeout")
} else {
cancel := cancels[rand.Intn(len(cancels))]
cancel()
select {
case <-ctx.Done():
default:
errorf("ctx should be canceled")
}
checkValues("after cancel")
}
}

View File

@ -0,0 +1,18 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.5
package ctxhttp
import "net/http"
func canceler(client *http.Client, req *http.Request) func() {
ch := make(chan struct{})
req.Cancel = ch
return func() {
close(ch)
}
}

View File

@ -0,0 +1,23 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !go1.5
package ctxhttp
import "net/http"
type requestCanceler interface {
CancelRequest(*http.Request)
}
func canceler(client *http.Client, req *http.Request) func() {
rc, ok := client.Transport.(requestCanceler)
if !ok {
return func() {}
}
return func() {
rc.CancelRequest(req)
}
}

View File

@ -0,0 +1,79 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package ctxhttp provides helper functions for performing context-aware HTTP requests.
package ctxhttp
import (
"io"
"net/http"
"net/url"
"strings"
"golang.org/x/net/context"
)
// Do sends an HTTP request with the provided http.Client and returns an HTTP response.
// If the client is nil, http.DefaultClient is used.
// If the context is canceled or times out, ctx.Err() will be returned.
func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) {
if client == nil {
client = http.DefaultClient
}
// Request cancelation changed in Go 1.5, see cancelreq.go and cancelreq_go14.go.
cancel := canceler(client, req)
type responseAndError struct {
resp *http.Response
err error
}
result := make(chan responseAndError, 1)
go func() {
resp, err := client.Do(req)
result <- responseAndError{resp, err}
}()
select {
case <-ctx.Done():
cancel()
return nil, ctx.Err()
case r := <-result:
return r.resp, r.err
}
}
// Get issues a GET request via the Do function.
func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, err
}
return Do(ctx, client, req)
}
// Head issues a HEAD request via the Do function.
func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
req, err := http.NewRequest("HEAD", url, nil)
if err != nil {
return nil, err
}
return Do(ctx, client, req)
}
// Post issues a POST request via the Do function.
func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) {
req, err := http.NewRequest("POST", url, body)
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", bodyType)
return Do(ctx, client, req)
}
// PostForm issues a POST request via the Do function.
func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) {
return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
}

View File

@ -1,26 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package context_test
import (
"fmt"
"time"
"golang.org/x/net/context"
)
func ExampleWithTimeout() {
// Pass a context with a timeout to tell a blocking function that it
// should abandon its work after the timeout elapses.
ctx, _ := context.WithTimeout(context.Background(), 100*time.Millisecond)
select {
case <-time.After(200 * time.Millisecond):
fmt.Println("overslept")
case <-ctx.Done():
fmt.Println(ctx.Err()) // prints "context deadline exceeded"
}
// Output:
// context deadline exceeded
}

View File

@ -0,0 +1,2 @@
*~
h2i/h2i

View File

@ -10,8 +10,11 @@ Status:
* The client work has just started but shares a lot of code
is coming along much quicker.
Docs are at https://godoc.org/github.com/bradfitz/http2
Docs are at https://godoc.org/golang.org/x/net/http2
Demo test server at https://http2.golang.org/
Help & bug reports welcome.
Help & bug reports welcome!
Contributing: https://golang.org/doc/contribute.html
Bugs: https://golang.org/issue/new?title=x/net/http2:+

View File

@ -20,6 +20,7 @@ type buffer struct {
var (
errReadEmpty = errors.New("read from empty buffer")
errWriteClosed = errors.New("write on closed buffer")
errWriteFull = errors.New("write on full buffer")
)
@ -45,7 +46,7 @@ func (b *buffer) Len() int {
// It is an error to write more data than the buffer can hold.
func (b *buffer) Write(p []byte) (n int, err error) {
if b.closed {
return 0, errors.New("closed")
return 0, errWriteClosed
}
// Slide existing data to beginning.

View File

@ -85,8 +85,8 @@ const (
// Continuation Frame
FlagContinuationEndHeaders Flags = 0x4
FlagPushPromiseEndHeaders = 0x4
FlagPushPromisePadded = 0x8
FlagPushPromiseEndHeaders Flags = 0x4
FlagPushPromisePadded Flags = 0x8
)
var flagName = map[FrameType]map[Flags]string{

View File

@ -14,16 +14,20 @@ import (
"bytes"
"errors"
"fmt"
"os"
"runtime"
"strconv"
"sync"
)
var DebugGoroutines = false
var DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1"
type goroutineLock uint64
func newGoroutineLock() goroutineLock {
if !DebugGoroutines {
return 0
}
return goroutineLock(curGoroutineID())
}

View File

@ -31,7 +31,7 @@ import (
"camlistore.org/pkg/googlestorage"
"camlistore.org/pkg/singleflight"
"github.com/bradfitz/http2"
"golang.org/x/net/http2"
)
var (
@ -51,7 +51,7 @@ func homeOldHTTP(w http.ResponseWriter, r *http.Request) {
<li>Use Firefox Nightly or go to <b>about:config</b> and enable "network.http.spdy.enabled.http2draft"</li>
<li>Use Google Chrome Canary and/or go to <b>chrome://flags/#enable-spdy4</b> to <i>Enable SPDY/4</i> (Chrome's name for HTTP/2)</li>
</ul>
<p>See code & instructions for connecting at <a href="https://github.com/bradfitz/http2">https://github.com/bradfitz/http2</a>.</p>
<p>See code & instructions for connecting at <a href="https://github.com/golang/net/tree/master/http2">https://github.com/golang/net/tree/master/http2</a>.</p>
</body></html>`)
}
@ -73,12 +73,12 @@ href="https://http2.github.io/">HTTP/2</a> demo & interop server.</p>
<p>This server exists for others in the HTTP/2 community to test their HTTP/2 client implementations and point out flaws in our server.</p>
<p> The code is currently at <a
href="https://github.com/bradfitz/http2">github.com/bradfitz/http2</a>
href="https://golang.org/x/net/http2">golang.org/x/net/http2</a>
but will move to the Go standard library at some point in the future
(enabled by default, without users needing to change their code).</p>
<p>Contact info: <i>bradfitz@golang.org</i>, or <a
href="https://github.com/bradfitz/http2/issues">file a bug</a>.</p>
href="https://golang.org/issues">file a bug</a>.</p>
<h2>Handlers for testing</h2>
<ul>
@ -287,7 +287,7 @@ func newGopherTilesHandler() http.Handler {
return
}
}
io.WriteString(w, "<html><body>")
io.WriteString(w, "<html><body onload='showtimes()'>")
fmt.Fprintf(w, "A grid of %d tiled images is below. Compare:<p>", xt*yt)
for _, ms := range []int{0, 30, 200, 1000} {
d := time.Duration(ms) * nanosPerMilli
@ -305,7 +305,16 @@ func newGopherTilesHandler() http.Handler {
}
io.WriteString(w, "<br/>\n")
}
io.WriteString(w, "<hr><a href='/'>&lt;&lt Back to Go HTTP/2 demo server</a></body></html>")
io.WriteString(w, `<p><div id='loadtimes'></div></p>
<script>
function showtimes() {
var times = 'Times from connection start:<br>'
times += 'DOM loaded: ' + (window.performance.timing.domContentLoadedEventEnd - window.performance.timing.connectStart) + 'ms<br>'
times += 'DOM complete (images loaded): ' + (window.performance.timing.domComplete - window.performance.timing.connectStart) + 'ms<br>'
document.getElementById('loadtimes').innerHTML = times
}
</script>
<hr><a href='/'>&lt;&lt Back to Go HTTP/2 demo server</a></body></html>`)
})
}

View File

@ -20,8 +20,9 @@ import (
"strings"
"time"
"code.google.com/p/goauth2/oauth"
compute "code.google.com/p/google-api-go-client/compute/v1"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
compute "google.golang.org/api/compute/v1"
)
var (
@ -44,19 +45,18 @@ func readFile(v string) string {
return strings.TrimSpace(string(slurp))
}
var config = &oauth.Config{
var config = &oauth2.Config{
// The client-id and secret should be for an "Installed Application" when using
// the CLI. Later we'll use a web application with a callback.
ClientId: readFile("client-id.dat"),
ClientID: readFile("client-id.dat"),
ClientSecret: readFile("client-secret.dat"),
Scope: strings.Join([]string{
Endpoint: google.Endpoint,
Scopes: []string{
compute.DevstorageFull_controlScope,
compute.ComputeScope,
"https://www.googleapis.com/auth/sqlservice",
"https://www.googleapis.com/auth/sqlservice.admin",
}, " "),
AuthURL: "https://accounts.google.com/o/oauth2/auth",
TokenURL: "https://accounts.google.com/o/oauth2/token",
},
RedirectURL: "urn:ietf:wg:oauth:2.0:oob",
}
@ -88,31 +88,32 @@ func main() {
prefix := "https://www.googleapis.com/compute/v1/projects/" + *proj
machType := prefix + "/zones/" + *zone + "/machineTypes/" + *mach
tr := &oauth.Transport{
Config: config,
}
tokenCache := oauth.CacheFile("token.dat")
token, err := tokenCache.Token()
const tokenFileName = "token.dat"
tokenFile := tokenCacheFile(tokenFileName)
tokenSource := oauth2.ReuseTokenSource(nil, tokenFile)
token, err := tokenSource.Token()
if err != nil {
if *writeObject != "" {
log.Fatalf("Can't use --write_object without a valid token.dat file already cached.")
}
log.Printf("Error getting token from %s: %v", string(tokenCache), err)
log.Printf("Error getting token from %s: %v", tokenFileName, err)
log.Printf("Get auth code from %v", config.AuthCodeURL("my-state"))
fmt.Print("\nEnter auth code: ")
sc := bufio.NewScanner(os.Stdin)
sc.Scan()
authCode := strings.TrimSpace(sc.Text())
token, err = tr.Exchange(authCode)
token, err = config.Exchange(oauth2.NoContext, authCode)
if err != nil {
log.Fatalf("Error exchanging auth code for a token: %v", err)
}
tokenCache.PutToken(token)
if err := tokenFile.WriteToken(token); err != nil {
log.Fatalf("Error writing to %s: %v", tokenFileName, err)
}
tokenSource = oauth2.ReuseTokenSource(token, nil)
}
tr.Token = token
oauthClient := &http.Client{Transport: tr}
oauthClient := oauth2.NewClient(oauth2.NoContext, tokenSource)
if *writeObject != "" {
writeCloudStorageObject(oauthClient)
return
@ -277,3 +278,25 @@ func writeCloudStorageObject(httpClient *http.Client) {
log.Printf("Success.")
os.Exit(0)
}
type tokenCacheFile string
func (f tokenCacheFile) Token() (*oauth2.Token, error) {
slurp, err := ioutil.ReadFile(string(f))
if err != nil {
return nil, err
}
t := new(oauth2.Token)
if err := json.Unmarshal(slurp, t); err != nil {
return nil, err
}
return t, nil
}
func (f tokenCacheFile) WriteToken(t *oauth2.Token) error {
jt, err := json.Marshal(t)
if err != nil {
return err
}
return ioutil.WriteFile(string(f), jt, 0600)
}

View File

@ -0,0 +1,97 @@
# h2i
**h2i** is an interactive HTTP/2 ("h2") console debugger. Miss the good ol'
days of telnetting to your HTTP/1.n servers? We're bringing you
back.
Features:
- send raw HTTP/2 frames
- PING
- SETTINGS
- HEADERS
- etc
- type in HTTP/1.n and have it auto-HPACK/frame-ify it for HTTP/2
- pretty print all received HTTP/2 frames from the peer (including HPACK decoding)
- tab completion of commands, options
Not yet features, but soon:
- unnecessary CONTINUATION frames on short boundaries, to test peer implementations
- request bodies (DATA frames)
- send invalid frames for testing server implementations (supported by underlying Framer)
Later:
- act like a server
## Installation
```
$ go get golang.org/x/net/http2/h2i
$ h2i <host>
```
## Demo
```
$ h2i
Usage: h2i <hostname>
-insecure
Whether to skip TLS cert validation
-nextproto string
Comma-separated list of NPN/ALPN protocol names to negotiate. (default "h2,h2-14")
$ h2i google.com
Connecting to google.com:443 ...
Connected to 74.125.224.41:443
Negotiated protocol "h2-14"
[FrameHeader SETTINGS len=18]
[MAX_CONCURRENT_STREAMS = 100]
[INITIAL_WINDOW_SIZE = 1048576]
[MAX_FRAME_SIZE = 16384]
[FrameHeader WINDOW_UPDATE len=4]
Window-Increment = 983041
h2i> PING h2iSayHI
[FrameHeader PING flags=ACK len=8]
Data = "h2iSayHI"
h2i> headers
(as HTTP/1.1)> GET / HTTP/1.1
(as HTTP/1.1)> Host: ip.appspot.com
(as HTTP/1.1)> User-Agent: h2i/brad-n-blake
(as HTTP/1.1)>
Opening Stream-ID 1:
:authority = ip.appspot.com
:method = GET
:path = /
:scheme = https
user-agent = h2i/brad-n-blake
[FrameHeader HEADERS flags=END_HEADERS stream=1 len=77]
:status = "200"
alternate-protocol = "443:quic,p=1"
content-length = "15"
content-type = "text/html"
date = "Fri, 01 May 2015 23:06:56 GMT"
server = "Google Frontend"
[FrameHeader DATA flags=END_STREAM stream=1 len=15]
"173.164.155.78\n"
[FrameHeader PING len=8]
Data = "\x00\x00\x00\x00\x00\x00\x00\x00"
h2i> ping
[FrameHeader PING flags=ACK len=8]
Data = "h2i_ping"
h2i> ping
[FrameHeader PING flags=ACK len=8]
Data = "h2i_ping"
h2i> ping
[FrameHeader GOAWAY len=22]
Last-Stream-ID = 1; Error-Code = PROTOCOL_ERROR (1)
ReadFrame: EOF
```
## Status
Quick few hour hack. So much yet to do. Feel free to file issues for
bugs or wishlist items, but [@bmizerany](https://github.com/bmizerany/)
and I aren't yet accepting pull requests until things settle down.

489
Godeps/_workspace/src/golang.org/x/net/http2/h2i/h2i.go generated vendored Normal file
View File

@ -0,0 +1,489 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
// Licensed under the same terms as Go itself:
// https://code.google.com/p/go/source/browse/LICENSE
/*
The h2i command is an interactive HTTP/2 console.
Usage:
$ h2i [flags] <hostname>
Interactive commands in the console: (all parts case-insensitive)
ping [data]
settings ack
settings FOO=n BAR=z
headers (open a new stream by typing HTTP/1.1)
*/
package main
import (
"bufio"
"bytes"
"crypto/tls"
"errors"
"flag"
"fmt"
"io"
"log"
"net"
"net/http"
"os"
"regexp"
"strconv"
"strings"
"golang.org/x/crypto/ssh/terminal"
"golang.org/x/net/http2"
"golang.org/x/net/http2/hpack"
)
// Flags
var (
flagNextProto = flag.String("nextproto", "h2,h2-14", "Comma-separated list of NPN/ALPN protocol names to negotiate.")
flagInsecure = flag.Bool("insecure", false, "Whether to skip TLS cert validation")
)
type command struct {
run func(*h2i, []string) error // required
// complete optionally specifies tokens (case-insensitive) which are
// valid for this subcommand.
complete func() []string
}
var commands = map[string]command{
"ping": command{run: (*h2i).cmdPing},
"settings": command{
run: (*h2i).cmdSettings,
complete: func() []string {
return []string{
"ACK",
http2.SettingHeaderTableSize.String(),
http2.SettingEnablePush.String(),
http2.SettingMaxConcurrentStreams.String(),
http2.SettingInitialWindowSize.String(),
http2.SettingMaxFrameSize.String(),
http2.SettingMaxHeaderListSize.String(),
}
},
},
"quit": command{run: (*h2i).cmdQuit},
"headers": command{run: (*h2i).cmdHeaders},
}
func usage() {
fmt.Fprintf(os.Stderr, "Usage: h2i <hostname>\n\n")
flag.PrintDefaults()
os.Exit(1)
}
// withPort adds ":443" if another port isn't already present.
func withPort(host string) string {
if _, _, err := net.SplitHostPort(host); err != nil {
return net.JoinHostPort(host, "443")
}
return host
}
// h2i is the app's state.
type h2i struct {
host string
tc *tls.Conn
framer *http2.Framer
term *terminal.Terminal
// owned by the command loop:
streamID uint32
hbuf bytes.Buffer
henc *hpack.Encoder
// owned by the readFrames loop:
peerSetting map[http2.SettingID]uint32
hdec *hpack.Decoder
}
func main() {
flag.Usage = usage
flag.Parse()
if flag.NArg() != 1 {
usage()
}
log.SetFlags(0)
host := flag.Arg(0)
app := &h2i{
host: host,
peerSetting: make(map[http2.SettingID]uint32),
}
app.henc = hpack.NewEncoder(&app.hbuf)
if err := app.Main(); err != nil {
if app.term != nil {
app.logf("%v\n", err)
} else {
fmt.Fprintf(os.Stderr, "%v\n", err)
}
os.Exit(1)
}
fmt.Fprintf(os.Stdout, "\n")
}
func (app *h2i) Main() error {
cfg := &tls.Config{
ServerName: app.host,
NextProtos: strings.Split(*flagNextProto, ","),
InsecureSkipVerify: *flagInsecure,
}
hostAndPort := withPort(app.host)
log.Printf("Connecting to %s ...", hostAndPort)
tc, err := tls.Dial("tcp", hostAndPort, cfg)
if err != nil {
return fmt.Errorf("Error dialing %s: %v", withPort(app.host), err)
}
log.Printf("Connected to %v", tc.RemoteAddr())
defer tc.Close()
if err := tc.Handshake(); err != nil {
return fmt.Errorf("TLS handshake: %v", err)
}
if !*flagInsecure {
if err := tc.VerifyHostname(app.host); err != nil {
return fmt.Errorf("VerifyHostname: %v", err)
}
}
state := tc.ConnectionState()
log.Printf("Negotiated protocol %q", state.NegotiatedProtocol)
if !state.NegotiatedProtocolIsMutual || state.NegotiatedProtocol == "" {
return fmt.Errorf("Could not negotiate protocol mutually")
}
if _, err := io.WriteString(tc, http2.ClientPreface); err != nil {
return err
}
app.framer = http2.NewFramer(tc, tc)
oldState, err := terminal.MakeRaw(0)
if err != nil {
return err
}
defer terminal.Restore(0, oldState)
var screen = struct {
io.Reader
io.Writer
}{os.Stdin, os.Stdout}
app.term = terminal.NewTerminal(screen, "h2i> ")
lastWord := regexp.MustCompile(`.+\W(\w+)$`)
app.term.AutoCompleteCallback = func(line string, pos int, key rune) (newLine string, newPos int, ok bool) {
if key != '\t' {
return
}
if pos != len(line) {
// TODO: we're being lazy for now, only supporting tab completion at the end.
return
}
// Auto-complete for the command itself.
if !strings.Contains(line, " ") {
var name string
name, _, ok = lookupCommand(line)
if !ok {
return
}
return name, len(name), true
}
_, c, ok := lookupCommand(line[:strings.IndexByte(line, ' ')])
if !ok || c.complete == nil {
return
}
if strings.HasSuffix(line, " ") {
app.logf("%s", strings.Join(c.complete(), " "))
return line, pos, true
}
m := lastWord.FindStringSubmatch(line)
if m == nil {
return line, len(line), true
}
soFar := m[1]
var match []string
for _, cand := range c.complete() {
if len(soFar) > len(cand) || !strings.EqualFold(cand[:len(soFar)], soFar) {
continue
}
match = append(match, cand)
}
if len(match) == 0 {
return
}
if len(match) > 1 {
// TODO: auto-complete any common prefix
app.logf("%s", strings.Join(match, " "))
return line, pos, true
}
newLine = line[:len(line)-len(soFar)] + match[0]
return newLine, len(newLine), true
}
errc := make(chan error, 2)
go func() { errc <- app.readFrames() }()
go func() { errc <- app.readConsole() }()
return <-errc
}
func (app *h2i) logf(format string, args ...interface{}) {
fmt.Fprintf(app.term, format+"\n", args...)
}
func (app *h2i) readConsole() error {
for {
line, err := app.term.ReadLine()
if err == io.EOF {
return nil
}
if err != nil {
return fmt.Errorf("terminal.ReadLine: %v", err)
}
f := strings.Fields(line)
if len(f) == 0 {
continue
}
cmd, args := f[0], f[1:]
if _, c, ok := lookupCommand(cmd); ok {
err = c.run(app, args)
} else {
app.logf("Unknown command %q", line)
}
if err == errExitApp {
return nil
}
if err != nil {
return err
}
}
}
func lookupCommand(prefix string) (name string, c command, ok bool) {
prefix = strings.ToLower(prefix)
if c, ok = commands[prefix]; ok {
return prefix, c, ok
}
for full, candidate := range commands {
if strings.HasPrefix(full, prefix) {
if c.run != nil {
return "", command{}, false // ambiguous
}
c = candidate
name = full
}
}
return name, c, c.run != nil
}
var errExitApp = errors.New("internal sentinel error value to quit the console reading loop")
func (a *h2i) cmdQuit(args []string) error {
if len(args) > 0 {
a.logf("the QUIT command takes no argument")
return nil
}
return errExitApp
}
func (a *h2i) cmdSettings(args []string) error {
if len(args) == 1 && strings.EqualFold(args[0], "ACK") {
return a.framer.WriteSettingsAck()
}
var settings []http2.Setting
for _, arg := range args {
if strings.EqualFold(arg, "ACK") {
a.logf("Error: ACK must be only argument with the SETTINGS command")
return nil
}
eq := strings.Index(arg, "=")
if eq == -1 {
a.logf("Error: invalid argument %q (expected SETTING_NAME=nnnn)", arg)
return nil
}
sid, ok := settingByName(arg[:eq])
if !ok {
a.logf("Error: unknown setting name %q", arg[:eq])
return nil
}
val, err := strconv.ParseUint(arg[eq+1:], 10, 32)
if err != nil {
a.logf("Error: invalid argument %q (expected SETTING_NAME=nnnn)", arg)
return nil
}
settings = append(settings, http2.Setting{
ID: sid,
Val: uint32(val),
})
}
a.logf("Sending: %v", settings)
return a.framer.WriteSettings(settings...)
}
func settingByName(name string) (http2.SettingID, bool) {
for _, sid := range [...]http2.SettingID{
http2.SettingHeaderTableSize,
http2.SettingEnablePush,
http2.SettingMaxConcurrentStreams,
http2.SettingInitialWindowSize,
http2.SettingMaxFrameSize,
http2.SettingMaxHeaderListSize,
} {
if strings.EqualFold(sid.String(), name) {
return sid, true
}
}
return 0, false
}
func (app *h2i) cmdPing(args []string) error {
if len(args) > 1 {
app.logf("invalid PING usage: only accepts 0 or 1 args")
return nil // nil means don't end the program
}
var data [8]byte
if len(args) == 1 {
copy(data[:], args[0])
} else {
copy(data[:], "h2i_ping")
}
return app.framer.WritePing(false, data)
}
func (app *h2i) cmdHeaders(args []string) error {
if len(args) > 0 {
app.logf("Error: HEADERS doesn't yet take arguments.")
// TODO: flags for restricting window size, to force CONTINUATION
// frames.
return nil
}
var h1req bytes.Buffer
app.term.SetPrompt("(as HTTP/1.1)> ")
defer app.term.SetPrompt("h2i> ")
for {
line, err := app.term.ReadLine()
if err != nil {
return err
}
h1req.WriteString(line)
h1req.WriteString("\r\n")
if line == "" {
break
}
}
req, err := http.ReadRequest(bufio.NewReader(&h1req))
if err != nil {
app.logf("Invalid HTTP/1.1 request: %v", err)
return nil
}
if app.streamID == 0 {
app.streamID = 1
} else {
app.streamID += 2
}
app.logf("Opening Stream-ID %d:", app.streamID)
hbf := app.encodeHeaders(req)
if len(hbf) > 16<<10 {
app.logf("TODO: h2i doesn't yet write CONTINUATION frames. Copy it from transport.go")
return nil
}
return app.framer.WriteHeaders(http2.HeadersFrameParam{
StreamID: app.streamID,
BlockFragment: hbf,
EndStream: req.Method == "GET" || req.Method == "HEAD", // good enough for now
EndHeaders: true, // for now
})
}
func (app *h2i) readFrames() error {
for {
f, err := app.framer.ReadFrame()
if err != nil {
return fmt.Errorf("ReadFrame: %v", err)
}
app.logf("%v", f)
switch f := f.(type) {
case *http2.PingFrame:
app.logf(" Data = %q", f.Data)
case *http2.SettingsFrame:
f.ForeachSetting(func(s http2.Setting) error {
app.logf(" %v", s)
app.peerSetting[s.ID] = s.Val
return nil
})
case *http2.WindowUpdateFrame:
app.logf(" Window-Increment = %v\n", f.Increment)
case *http2.GoAwayFrame:
app.logf(" Last-Stream-ID = %d; Error-Code = %v (%d)\n", f.LastStreamID, f.ErrCode, f.ErrCode)
case *http2.DataFrame:
app.logf(" %q", f.Data())
case *http2.HeadersFrame:
if f.HasPriority() {
app.logf(" PRIORITY = %v", f.Priority)
}
if app.hdec == nil {
// TODO: if the user uses h2i to send a SETTINGS frame advertising
// something larger, we'll need to respect SETTINGS_HEADER_TABLE_SIZE
// and stuff here instead of using the 4k default. But for now:
tableSize := uint32(4 << 10)
app.hdec = hpack.NewDecoder(tableSize, app.onNewHeaderField)
}
app.hdec.Write(f.HeaderBlockFragment())
}
}
}
// called from readLoop
func (app *h2i) onNewHeaderField(f hpack.HeaderField) {
if f.Sensitive {
app.logf(" %s = %q (SENSITIVE)", f.Name, f.Value)
}
app.logf(" %s = %q", f.Name, f.Value)
}
func (app *h2i) encodeHeaders(req *http.Request) []byte {
app.hbuf.Reset()
// TODO(bradfitz): figure out :authority-vs-Host stuff between http2 and Go
host := req.Host
if host == "" {
host = req.URL.Host
}
path := req.URL.Path
if path == "" {
path = "/"
}
app.writeHeader(":authority", host) // probably not right for all sites
app.writeHeader(":method", req.Method)
app.writeHeader(":path", path)
app.writeHeader(":scheme", "https")
for k, vv := range req.Header {
lowKey := strings.ToLower(k)
if lowKey == "host" {
continue
}
for _, v := range vv {
app.writeHeader(lowKey, v)
}
}
return app.hbuf.Bytes()
}
func (app *h2i) writeHeader(name, value string) {
app.henc.WriteField(hpack.HeaderField{Name: name, Value: value})
app.logf(" %s = %s", name, value)
}

View File

@ -55,7 +55,7 @@ import (
"sync"
"time"
"github.com/bradfitz/http2/hpack"
"golang.org/x/net/http2/hpack"
)
const (
@ -631,6 +631,9 @@ func (sc *serverConn) serve() {
case wm := <-sc.wantWriteFrameCh:
sc.writeFrame(wm)
case <-sc.wroteFrameCh:
if sc.writingFrame != true {
panic("internal error: expected to be already writing a frame")
}
sc.writingFrame = false
sc.scheduleFrameWrite()
case fg, ok := <-sc.readFrameCh:
@ -752,6 +755,7 @@ func (sc *serverConn) startFrameWrite(wm frameWriteMsg) {
if sc.writingFrame {
panic("internal error: can only be writing one frame at a time")
}
sc.writingFrame = true
st := wm.stream
if st != nil {
@ -768,7 +772,6 @@ func (sc *serverConn) startFrameWrite(wm frameWriteMsg) {
}
}
sc.writingFrame = true
sc.needsFrameFlush = true
if endsStream(wm.write) {
if st == nil {

View File

@ -19,7 +19,7 @@ import (
"strings"
"sync"
"github.com/bradfitz/http2/hpack"
"golang.org/x/net/http2/hpack"
)
type Transport struct {
@ -186,7 +186,7 @@ func (t *Transport) newClientConn(host, port, key string) (*clientConn, error) {
NextProtos: []string{NextProtoTLS},
InsecureSkipVerify: t.InsecureTLSDial,
}
tconn, err := tls.Dial("tcp", host+":"+port, cfg)
tconn, err := tls.Dial("tcp", net.JoinHostPort(host, port), cfg)
if err != nil {
return nil, err
}

View File

@ -13,7 +13,7 @@ import (
"net/http"
"time"
"github.com/bradfitz/http2/hpack"
"golang.org/x/net/http2/hpack"
)
// writeFramer is implemented by any type that is used to write frames.

View File

@ -0,0 +1,525 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package timeseries implements a time series structure for stats collection.
package timeseries
import (
"fmt"
"log"
"time"
)
const (
timeSeriesNumBuckets = 64
minuteHourSeriesNumBuckets = 60
)
var timeSeriesResolutions = []time.Duration{
1 * time.Second,
10 * time.Second,
1 * time.Minute,
10 * time.Minute,
1 * time.Hour,
6 * time.Hour,
24 * time.Hour, // 1 day
7 * 24 * time.Hour, // 1 week
4 * 7 * 24 * time.Hour, // 4 weeks
16 * 7 * 24 * time.Hour, // 16 weeks
}
var minuteHourSeriesResolutions = []time.Duration{
1 * time.Second,
1 * time.Minute,
}
// An Observable is a kind of data that can be aggregated in a time series.
type Observable interface {
Multiply(ratio float64) // Multiplies the data in self by a given ratio
Add(other Observable) // Adds the data from a different observation to self
Clear() // Clears the observation so it can be reused.
CopyFrom(other Observable) // Copies the contents of a given observation to self
}
// Float attaches the methods of Observable to a float64.
type Float float64
// NewFloat returns a Float.
func NewFloat() Observable {
f := Float(0)
return &f
}
// String returns the float as a string.
func (f *Float) String() string { return fmt.Sprintf("%g", f.Value()) }
// Value returns the float's value.
func (f *Float) Value() float64 { return float64(*f) }
func (f *Float) Multiply(ratio float64) { *f *= Float(ratio) }
func (f *Float) Add(other Observable) {
o := other.(*Float)
*f += *o
}
func (f *Float) Clear() { *f = 0 }
func (f *Float) CopyFrom(other Observable) {
o := other.(*Float)
*f = *o
}
// A Clock tells the current time.
type Clock interface {
Time() time.Time
}
type defaultClock int
var defaultClockInstance defaultClock
func (defaultClock) Time() time.Time { return time.Now() }
// Information kept per level. Each level consists of a circular list of
// observations. The start of the level may be derived from end and the
// len(buckets) * sizeInMillis.
type tsLevel struct {
oldest int // index to oldest bucketed Observable
newest int // index to newest bucketed Observable
end time.Time // end timestamp for this level
size time.Duration // duration of the bucketed Observable
buckets []Observable // collections of observations
provider func() Observable // used for creating new Observable
}
func (l *tsLevel) Clear() {
l.oldest = 0
l.newest = len(l.buckets) - 1
l.end = time.Time{}
for i := range l.buckets {
if l.buckets[i] != nil {
l.buckets[i].Clear()
l.buckets[i] = nil
}
}
}
func (l *tsLevel) InitLevel(size time.Duration, numBuckets int, f func() Observable) {
l.size = size
l.provider = f
l.buckets = make([]Observable, numBuckets)
}
// Keeps a sequence of levels. Each level is responsible for storing data at
// a given resolution. For example, the first level stores data at a one
// minute resolution while the second level stores data at a one hour
// resolution.
// Each level is represented by a sequence of buckets. Each bucket spans an
// interval equal to the resolution of the level. New observations are added
// to the last bucket.
type timeSeries struct {
provider func() Observable // make more Observable
numBuckets int // number of buckets in each level
levels []*tsLevel // levels of bucketed Observable
lastAdd time.Time // time of last Observable tracked
total Observable // convenient aggregation of all Observable
clock Clock // Clock for getting current time
pending Observable // observations not yet bucketed
pendingTime time.Time // what time are we keeping in pending
dirty bool // if there are pending observations
}
// init initializes a level according to the supplied criteria.
func (ts *timeSeries) init(resolutions []time.Duration, f func() Observable, numBuckets int, clock Clock) {
ts.provider = f
ts.numBuckets = numBuckets
ts.clock = clock
ts.levels = make([]*tsLevel, len(resolutions))
for i := range resolutions {
if i > 0 && resolutions[i-1] >= resolutions[i] {
log.Print("timeseries: resolutions must be monotonically increasing")
break
}
newLevel := new(tsLevel)
newLevel.InitLevel(resolutions[i], ts.numBuckets, ts.provider)
ts.levels[i] = newLevel
}
ts.Clear()
}
// Clear removes all observations from the time series.
func (ts *timeSeries) Clear() {
ts.lastAdd = time.Time{}
ts.total = ts.resetObservation(ts.total)
ts.pending = ts.resetObservation(ts.pending)
ts.pendingTime = time.Time{}
ts.dirty = false
for i := range ts.levels {
ts.levels[i].Clear()
}
}
// Add records an observation at the current time.
func (ts *timeSeries) Add(observation Observable) {
ts.AddWithTime(observation, ts.clock.Time())
}
// AddWithTime records an observation at the specified time.
func (ts *timeSeries) AddWithTime(observation Observable, t time.Time) {
smallBucketDuration := ts.levels[0].size
if t.After(ts.lastAdd) {
ts.lastAdd = t
}
if t.After(ts.pendingTime) {
ts.advance(t)
ts.mergePendingUpdates()
ts.pendingTime = ts.levels[0].end
ts.pending.CopyFrom(observation)
ts.dirty = true
} else if t.After(ts.pendingTime.Add(-1 * smallBucketDuration)) {
// The observation is close enough to go into the pending bucket.
// This compensates for clock skewing and small scheduling delays
// by letting the update stay in the fast path.
ts.pending.Add(observation)
ts.dirty = true
} else {
ts.mergeValue(observation, t)
}
}
// mergeValue inserts the observation at the specified time in the past into all levels.
func (ts *timeSeries) mergeValue(observation Observable, t time.Time) {
for _, level := range ts.levels {
index := (ts.numBuckets - 1) - int(level.end.Sub(t)/level.size)
if 0 <= index && index < ts.numBuckets {
bucketNumber := (level.oldest + index) % ts.numBuckets
if level.buckets[bucketNumber] == nil {
level.buckets[bucketNumber] = level.provider()
}
level.buckets[bucketNumber].Add(observation)
}
}
ts.total.Add(observation)
}
// mergePendingUpdates applies the pending updates into all levels.
func (ts *timeSeries) mergePendingUpdates() {
if ts.dirty {
ts.mergeValue(ts.pending, ts.pendingTime)
ts.pending = ts.resetObservation(ts.pending)
ts.dirty = false
}
}
// advance cycles the buckets at each level until the latest bucket in
// each level can hold the time specified.
func (ts *timeSeries) advance(t time.Time) {
if !t.After(ts.levels[0].end) {
return
}
for i := 0; i < len(ts.levels); i++ {
level := ts.levels[i]
if !level.end.Before(t) {
break
}
// If the time is sufficiently far, just clear the level and advance
// directly.
if !t.Before(level.end.Add(level.size * time.Duration(ts.numBuckets))) {
for _, b := range level.buckets {
ts.resetObservation(b)
}
level.end = time.Unix(0, (t.UnixNano()/level.size.Nanoseconds())*level.size.Nanoseconds())
}
for t.After(level.end) {
level.end = level.end.Add(level.size)
level.newest = level.oldest
level.oldest = (level.oldest + 1) % ts.numBuckets
ts.resetObservation(level.buckets[level.newest])
}
t = level.end
}
}
// Latest returns the sum of the num latest buckets from the level.
func (ts *timeSeries) Latest(level, num int) Observable {
now := ts.clock.Time()
if ts.levels[0].end.Before(now) {
ts.advance(now)
}
ts.mergePendingUpdates()
result := ts.provider()
l := ts.levels[level]
index := l.newest
for i := 0; i < num; i++ {
if l.buckets[index] != nil {
result.Add(l.buckets[index])
}
if index == 0 {
index = ts.numBuckets
}
index--
}
return result
}
// LatestBuckets returns a copy of the num latest buckets from level.
func (ts *timeSeries) LatestBuckets(level, num int) []Observable {
if level < 0 || level > len(ts.levels) {
log.Print("timeseries: bad level argument: ", level)
return nil
}
if num < 0 || num >= ts.numBuckets {
log.Print("timeseries: bad num argument: ", num)
return nil
}
results := make([]Observable, num)
now := ts.clock.Time()
if ts.levels[0].end.Before(now) {
ts.advance(now)
}
ts.mergePendingUpdates()
l := ts.levels[level]
index := l.newest
for i := 0; i < num; i++ {
result := ts.provider()
results[i] = result
if l.buckets[index] != nil {
result.CopyFrom(l.buckets[index])
}
if index == 0 {
index = ts.numBuckets
}
index -= 1
}
return results
}
// ScaleBy updates observations by scaling by factor.
func (ts *timeSeries) ScaleBy(factor float64) {
for _, l := range ts.levels {
for i := 0; i < ts.numBuckets; i++ {
l.buckets[i].Multiply(factor)
}
}
ts.total.Multiply(factor)
ts.pending.Multiply(factor)
}
// Range returns the sum of observations added over the specified time range.
// If start or finish times don't fall on bucket boundaries of the same
// level, then return values are approximate answers.
func (ts *timeSeries) Range(start, finish time.Time) Observable {
return ts.ComputeRange(start, finish, 1)[0]
}
// Recent returns the sum of observations from the last delta.
func (ts *timeSeries) Recent(delta time.Duration) Observable {
now := ts.clock.Time()
return ts.Range(now.Add(-delta), now)
}
// Total returns the total of all observations.
func (ts *timeSeries) Total() Observable {
ts.mergePendingUpdates()
return ts.total
}
// ComputeRange computes a specified number of values into a slice using
// the observations recorded over the specified time period. The return
// values are approximate if the start or finish times don't fall on the
// bucket boundaries at the same level or if the number of buckets spanning
// the range is not an integral multiple of num.
func (ts *timeSeries) ComputeRange(start, finish time.Time, num int) []Observable {
if start.After(finish) {
log.Printf("timeseries: start > finish, %v>%v", start, finish)
return nil
}
if num < 0 {
log.Printf("timeseries: num < 0, %v", num)
return nil
}
results := make([]Observable, num)
for _, l := range ts.levels {
if !start.Before(l.end.Add(-l.size * time.Duration(ts.numBuckets))) {
ts.extract(l, start, finish, num, results)
return results
}
}
// Failed to find a level that covers the desired range. So just
// extract from the last level, even if it doesn't cover the entire
// desired range.
ts.extract(ts.levels[len(ts.levels)-1], start, finish, num, results)
return results
}
// RecentList returns the specified number of values in slice over the most
// recent time period of the specified range.
func (ts *timeSeries) RecentList(delta time.Duration, num int) []Observable {
if delta < 0 {
return nil
}
now := ts.clock.Time()
return ts.ComputeRange(now.Add(-delta), now, num)
}
// extract returns a slice of specified number of observations from a given
// level over a given range.
func (ts *timeSeries) extract(l *tsLevel, start, finish time.Time, num int, results []Observable) {
ts.mergePendingUpdates()
srcInterval := l.size
dstInterval := finish.Sub(start) / time.Duration(num)
dstStart := start
srcStart := l.end.Add(-srcInterval * time.Duration(ts.numBuckets))
srcIndex := 0
// Where should scanning start?
if dstStart.After(srcStart) {
advance := dstStart.Sub(srcStart) / srcInterval
srcIndex += int(advance)
srcStart = srcStart.Add(advance * srcInterval)
}
// The i'th value is computed as show below.
// interval = (finish/start)/num
// i'th value = sum of observation in range
// [ start + i * interval,
// start + (i + 1) * interval )
for i := 0; i < num; i++ {
results[i] = ts.resetObservation(results[i])
dstEnd := dstStart.Add(dstInterval)
for srcIndex < ts.numBuckets && srcStart.Before(dstEnd) {
srcEnd := srcStart.Add(srcInterval)
if srcEnd.After(ts.lastAdd) {
srcEnd = ts.lastAdd
}
if !srcEnd.Before(dstStart) {
srcValue := l.buckets[(srcIndex+l.oldest)%ts.numBuckets]
if !srcStart.Before(dstStart) && !srcEnd.After(dstEnd) {
// dst completely contains src.
if srcValue != nil {
results[i].Add(srcValue)
}
} else {
// dst partially overlaps src.
overlapStart := maxTime(srcStart, dstStart)
overlapEnd := minTime(srcEnd, dstEnd)
base := srcEnd.Sub(srcStart)
fraction := overlapEnd.Sub(overlapStart).Seconds() / base.Seconds()
used := ts.provider()
if srcValue != nil {
used.CopyFrom(srcValue)
}
used.Multiply(fraction)
results[i].Add(used)
}
if srcEnd.After(dstEnd) {
break
}
}
srcIndex++
srcStart = srcStart.Add(srcInterval)
}
dstStart = dstStart.Add(dstInterval)
}
}
// resetObservation clears the content so the struct may be reused.
func (ts *timeSeries) resetObservation(observation Observable) Observable {
if observation == nil {
observation = ts.provider()
} else {
observation.Clear()
}
return observation
}
// TimeSeries tracks data at granularities from 1 second to 16 weeks.
type TimeSeries struct {
timeSeries
}
// NewTimeSeries creates a new TimeSeries using the function provided for creating new Observable.
func NewTimeSeries(f func() Observable) *TimeSeries {
return NewTimeSeriesWithClock(f, defaultClockInstance)
}
// NewTimeSeriesWithClock creates a new TimeSeries using the function provided for creating new Observable and the clock for
// assigning timestamps.
func NewTimeSeriesWithClock(f func() Observable, clock Clock) *TimeSeries {
ts := new(TimeSeries)
ts.timeSeries.init(timeSeriesResolutions, f, timeSeriesNumBuckets, clock)
return ts
}
// MinuteHourSeries tracks data at granularities of 1 minute and 1 hour.
type MinuteHourSeries struct {
timeSeries
}
// NewMinuteHourSeries creates a new MinuteHourSeries using the function provided for creating new Observable.
func NewMinuteHourSeries(f func() Observable) *MinuteHourSeries {
return NewMinuteHourSeriesWithClock(f, defaultClockInstance)
}
// NewMinuteHourSeriesWithClock creates a new MinuteHourSeries using the function provided for creating new Observable and the clock for
// assigning timestamps.
func NewMinuteHourSeriesWithClock(f func() Observable, clock Clock) *MinuteHourSeries {
ts := new(MinuteHourSeries)
ts.timeSeries.init(minuteHourSeriesResolutions, f,
minuteHourSeriesNumBuckets, clock)
return ts
}
func (ts *MinuteHourSeries) Minute() Observable {
return ts.timeSeries.Latest(0, 60)
}
func (ts *MinuteHourSeries) Hour() Observable {
return ts.timeSeries.Latest(1, 60)
}
func minTime(a, b time.Time) time.Time {
if a.Before(b) {
return a
}
return b
}
func maxTime(a, b time.Time) time.Time {
if a.After(b) {
return a
}
return b
}

524
Godeps/_workspace/src/golang.org/x/net/trace/events.go generated vendored Normal file
View File

@ -0,0 +1,524 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package trace
import (
"bytes"
"fmt"
"html/template"
"io"
"log"
"net/http"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"sync/atomic"
"text/tabwriter"
"time"
)
var eventsTmpl = template.Must(template.New("events").Funcs(template.FuncMap{
"elapsed": elapsed,
"trimSpace": strings.TrimSpace,
}).Parse(eventsHTML))
const maxEventsPerLog = 100
type bucket struct {
MaxErrAge time.Duration
String string
}
var buckets = []bucket{
{0, "total"},
{10 * time.Second, "errs<10s"},
{1 * time.Minute, "errs<1m"},
{10 * time.Minute, "errs<10m"},
{1 * time.Hour, "errs<1h"},
{10 * time.Hour, "errs<10h"},
{24000 * time.Hour, "errors"},
}
// RenderEvents renders the HTML page typically served at /debug/events.
// It does not do any auth checking; see AuthRequest for the default auth check
// used by the handler registered on http.DefaultServeMux.
// req may be nil.
func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) {
now := time.Now()
data := &struct {
Families []string // family names
Buckets []bucket
Counts [][]int // eventLog count per family/bucket
// Set when a bucket has been selected.
Family string
Bucket int
EventLogs eventLogs
Expanded bool
}{
Buckets: buckets,
}
data.Families = make([]string, 0, len(families))
famMu.RLock()
for name := range families {
data.Families = append(data.Families, name)
}
famMu.RUnlock()
sort.Strings(data.Families)
// Count the number of eventLogs in each family for each error age.
data.Counts = make([][]int, len(data.Families))
for i, name := range data.Families {
// TODO(sameer): move this loop under the family lock.
f := getEventFamily(name)
data.Counts[i] = make([]int, len(data.Buckets))
for j, b := range data.Buckets {
data.Counts[i][j] = f.Count(now, b.MaxErrAge)
}
}
if req != nil {
var ok bool
data.Family, data.Bucket, ok = parseEventsArgs(req)
if !ok {
// No-op
} else {
data.EventLogs = getEventFamily(data.Family).Copy(now, buckets[data.Bucket].MaxErrAge)
}
if data.EventLogs != nil {
defer data.EventLogs.Free()
sort.Sort(data.EventLogs)
}
if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil {
data.Expanded = exp
}
}
famMu.RLock()
defer famMu.RUnlock()
if err := eventsTmpl.Execute(w, data); err != nil {
log.Printf("net/trace: Failed executing template: %v", err)
}
}
func parseEventsArgs(req *http.Request) (fam string, b int, ok bool) {
fam, bStr := req.FormValue("fam"), req.FormValue("b")
if fam == "" || bStr == "" {
return "", 0, false
}
b, err := strconv.Atoi(bStr)
if err != nil || b < 0 || b >= len(buckets) {
return "", 0, false
}
return fam, b, true
}
// An EventLog provides a log of events associated with a specific object.
type EventLog interface {
// Printf formats its arguments with fmt.Sprintf and adds the
// result to the event log.
Printf(format string, a ...interface{})
// Errorf is like Printf, but it marks this event as an error.
Errorf(format string, a ...interface{})
// Finish declares that this event log is complete.
// The event log should not be used after calling this method.
Finish()
}
// NewEventLog returns a new EventLog with the specified family name
// and title.
func NewEventLog(family, title string) EventLog {
el := newEventLog()
el.ref()
el.Family, el.Title = family, title
el.Start = time.Now()
el.events = make([]logEntry, 0, maxEventsPerLog)
el.stack = make([]uintptr, 32)
n := runtime.Callers(2, el.stack)
el.stack = el.stack[:n]
getEventFamily(family).add(el)
return el
}
func (el *eventLog) Finish() {
getEventFamily(el.Family).remove(el)
el.unref() // matches ref in New
}
var (
famMu sync.RWMutex
families = make(map[string]*eventFamily) // family name => family
)
func getEventFamily(fam string) *eventFamily {
famMu.Lock()
defer famMu.Unlock()
f := families[fam]
if f == nil {
f = &eventFamily{}
families[fam] = f
}
return f
}
type eventFamily struct {
mu sync.RWMutex
eventLogs eventLogs
}
func (f *eventFamily) add(el *eventLog) {
f.mu.Lock()
f.eventLogs = append(f.eventLogs, el)
f.mu.Unlock()
}
func (f *eventFamily) remove(el *eventLog) {
f.mu.Lock()
defer f.mu.Unlock()
for i, el0 := range f.eventLogs {
if el == el0 {
copy(f.eventLogs[i:], f.eventLogs[i+1:])
f.eventLogs = f.eventLogs[:len(f.eventLogs)-1]
return
}
}
}
func (f *eventFamily) Count(now time.Time, maxErrAge time.Duration) (n int) {
f.mu.RLock()
defer f.mu.RUnlock()
for _, el := range f.eventLogs {
if el.hasRecentError(now, maxErrAge) {
n++
}
}
return
}
func (f *eventFamily) Copy(now time.Time, maxErrAge time.Duration) (els eventLogs) {
f.mu.RLock()
defer f.mu.RUnlock()
els = make(eventLogs, 0, len(f.eventLogs))
for _, el := range f.eventLogs {
if el.hasRecentError(now, maxErrAge) {
el.ref()
els = append(els, el)
}
}
return
}
type eventLogs []*eventLog
// Free calls unref on each element of the list.
func (els eventLogs) Free() {
for _, el := range els {
el.unref()
}
}
// eventLogs may be sorted in reverse chronological order.
func (els eventLogs) Len() int { return len(els) }
func (els eventLogs) Less(i, j int) bool { return els[i].Start.After(els[j].Start) }
func (els eventLogs) Swap(i, j int) { els[i], els[j] = els[j], els[i] }
// A logEntry is a timestamped log entry in an event log.
type logEntry struct {
When time.Time
Elapsed time.Duration // since previous event in log
NewDay bool // whether this event is on a different day to the previous event
What string
IsErr bool
}
// WhenString returns a string representation of the elapsed time of the event.
// It will include the date if midnight was crossed.
func (e logEntry) WhenString() string {
if e.NewDay {
return e.When.Format("2006/01/02 15:04:05.000000")
}
return e.When.Format("15:04:05.000000")
}
// An eventLog represents an active event log.
type eventLog struct {
// Family is the top-level grouping of event logs to which this belongs.
Family string
// Title is the title of this event log.
Title string
// Timing information.
Start time.Time
// Call stack where this event log was created.
stack []uintptr
// Append-only sequence of events.
//
// TODO(sameer): change this to a ring buffer to avoid the array copy
// when we hit maxEventsPerLog.
mu sync.RWMutex
events []logEntry
LastErrorTime time.Time
discarded int
refs int32 // how many buckets this is in
}
func (el *eventLog) reset() {
// Clear all but the mutex. Mutexes may not be copied, even when unlocked.
el.Family = ""
el.Title = ""
el.Start = time.Time{}
el.stack = nil
el.events = nil
el.LastErrorTime = time.Time{}
el.discarded = 0
el.refs = 0
}
func (el *eventLog) hasRecentError(now time.Time, maxErrAge time.Duration) bool {
if maxErrAge == 0 {
return true
}
el.mu.RLock()
defer el.mu.RUnlock()
return now.Sub(el.LastErrorTime) < maxErrAge
}
// delta returns the elapsed time since the last event or the log start,
// and whether it spans midnight.
// L >= el.mu
func (el *eventLog) delta(t time.Time) (time.Duration, bool) {
if len(el.events) == 0 {
return t.Sub(el.Start), false
}
prev := el.events[len(el.events)-1].When
return t.Sub(prev), prev.Day() != t.Day()
}
func (el *eventLog) Printf(format string, a ...interface{}) {
el.printf(false, format, a...)
}
func (el *eventLog) Errorf(format string, a ...interface{}) {
el.printf(true, format, a...)
}
func (el *eventLog) printf(isErr bool, format string, a ...interface{}) {
e := logEntry{When: time.Now(), IsErr: isErr, What: fmt.Sprintf(format, a...)}
el.mu.Lock()
e.Elapsed, e.NewDay = el.delta(e.When)
if len(el.events) < maxEventsPerLog {
el.events = append(el.events, e)
} else {
// Discard the oldest event.
if el.discarded == 0 {
// el.discarded starts at two to count for the event it
// is replacing, plus the next one that we are about to
// drop.
el.discarded = 2
} else {
el.discarded++
}
// TODO(sameer): if this causes allocations on a critical path,
// change eventLog.What to be a fmt.Stringer, as in trace.go.
el.events[0].What = fmt.Sprintf("(%d events discarded)", el.discarded)
// The timestamp of the discarded meta-event should be
// the time of the last event it is representing.
el.events[0].When = el.events[1].When
copy(el.events[1:], el.events[2:])
el.events[maxEventsPerLog-1] = e
}
if e.IsErr {
el.LastErrorTime = e.When
}
el.mu.Unlock()
}
func (el *eventLog) ref() {
atomic.AddInt32(&el.refs, 1)
}
func (el *eventLog) unref() {
if atomic.AddInt32(&el.refs, -1) == 0 {
freeEventLog(el)
}
}
func (el *eventLog) When() string {
return el.Start.Format("2006/01/02 15:04:05.000000")
}
func (el *eventLog) ElapsedTime() string {
elapsed := time.Since(el.Start)
return fmt.Sprintf("%.6f", elapsed.Seconds())
}
func (el *eventLog) Stack() string {
buf := new(bytes.Buffer)
tw := tabwriter.NewWriter(buf, 1, 8, 1, '\t', 0)
printStackRecord(tw, el.stack)
tw.Flush()
return buf.String()
}
// printStackRecord prints the function + source line information
// for a single stack trace.
// Adapted from runtime/pprof/pprof.go.
func printStackRecord(w io.Writer, stk []uintptr) {
for _, pc := range stk {
f := runtime.FuncForPC(pc)
if f == nil {
continue
}
file, line := f.FileLine(pc)
name := f.Name()
// Hide runtime.goexit and any runtime functions at the beginning.
if strings.HasPrefix(name, "runtime.") {
continue
}
fmt.Fprintf(w, "# %s\t%s:%d\n", name, file, line)
}
}
func (el *eventLog) Events() []logEntry {
el.mu.RLock()
defer el.mu.RUnlock()
return el.events
}
// freeEventLogs is a freelist of *eventLog
var freeEventLogs = make(chan *eventLog, 1000)
// newEventLog returns a event log ready to use.
func newEventLog() *eventLog {
select {
case el := <-freeEventLogs:
return el
default:
return new(eventLog)
}
}
// freeEventLog adds el to freeEventLogs if there's room.
// This is non-blocking.
func freeEventLog(el *eventLog) {
el.reset()
select {
case freeEventLogs <- el:
default:
}
}
const eventsHTML = `
<html>
<head>
<title>events</title>
</head>
<style type="text/css">
body {
font-family: sans-serif;
}
table#req-status td.family {
padding-right: 2em;
}
table#req-status td.active {
padding-right: 1em;
}
table#req-status td.empty {
color: #aaa;
}
table#reqs {
margin-top: 1em;
}
table#reqs tr.first {
{{if $.Expanded}}font-weight: bold;{{end}}
}
table#reqs td {
font-family: monospace;
}
table#reqs td.when {
text-align: right;
white-space: nowrap;
}
table#reqs td.elapsed {
padding: 0 0.5em;
text-align: right;
white-space: pre;
width: 10em;
}
address {
font-size: smaller;
margin-top: 5em;
}
</style>
<body>
<h1>/debug/events</h1>
<table id="req-status">
{{range $i, $fam := .Families}}
<tr>
<td class="family">{{$fam}}</td>
{{range $j, $bucket := $.Buckets}}
{{$n := index $.Counts $i $j}}
<td class="{{if not $bucket.MaxErrAge}}active{{end}}{{if not $n}}empty{{end}}">
{{if $n}}<a href="?fam={{$fam}}&b={{$j}}{{if $.Expanded}}&exp=1{{end}}">{{end}}
[{{$n}} {{$bucket.String}}]
{{if $n}}</a>{{end}}
</td>
{{end}}
</tr>{{end}}
</table>
{{if $.EventLogs}}
<hr />
<h3>Family: {{$.Family}}</h3>
{{if $.Expanded}}<a href="?fam={{$.Family}}&b={{$.Bucket}}">{{end}}
[Summary]{{if $.Expanded}}</a>{{end}}
{{if not $.Expanded}}<a href="?fam={{$.Family}}&b={{$.Bucket}}&exp=1">{{end}}
[Expanded]{{if not $.Expanded}}</a>{{end}}
<table id="reqs">
<tr><th>When</th><th>Elapsed</th></tr>
{{range $el := $.EventLogs}}
<tr class="first">
<td class="when">{{$el.When}}</td>
<td class="elapsed">{{$el.ElapsedTime}}</td>
<td>{{$el.Title}}
</tr>
{{if $.Expanded}}
<tr>
<td class="when"></td>
<td class="elapsed"></td>
<td><pre>{{$el.Stack|trimSpace}}</pre></td>
</tr>
{{range $el.Events}}
<tr>
<td class="when">{{.WhenString}}</td>
<td class="elapsed">{{elapsed .Elapsed}}</td>
<td>.{{if .IsErr}}E{{else}}.{{end}}. {{.What}}</td>
</tr>
{{end}}
{{end}}
{{end}}
</table>
{{end}}
</body>
</html>
`

View File

@ -0,0 +1,356 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package trace
// This file implements histogramming for RPC statistics collection.
import (
"bytes"
"fmt"
"html/template"
"log"
"math"
"golang.org/x/net/internal/timeseries"
)
const (
bucketCount = 38
)
// histogram keeps counts of values in buckets that are spaced
// out in powers of 2: 0-1, 2-3, 4-7...
// histogram implements timeseries.Observable
type histogram struct {
sum int64 // running total of measurements
sumOfSquares float64 // square of running total
buckets []int64 // bucketed values for histogram
value int // holds a single value as an optimization
valueCount int64 // number of values recorded for single value
}
// AddMeasurement records a value measurement observation to the histogram.
func (h *histogram) addMeasurement(value int64) {
// TODO: assert invariant
h.sum += value
h.sumOfSquares += float64(value) * float64(value)
bucketIndex := getBucket(value)
if h.valueCount == 0 || (h.valueCount > 0 && h.value == bucketIndex) {
h.value = bucketIndex
h.valueCount++
} else {
h.allocateBuckets()
h.buckets[bucketIndex]++
}
}
func (h *histogram) allocateBuckets() {
if h.buckets == nil {
h.buckets = make([]int64, bucketCount)
h.buckets[h.value] = h.valueCount
h.value = 0
h.valueCount = -1
}
}
func log2(i int64) int {
n := 0
for ; i >= 0x100; i >>= 8 {
n += 8
}
for ; i > 0; i >>= 1 {
n += 1
}
return n
}
func getBucket(i int64) (index int) {
index = log2(i) - 1
if index < 0 {
index = 0
}
if index >= bucketCount {
index = bucketCount - 1
}
return
}
// Total returns the number of recorded observations.
func (h *histogram) total() (total int64) {
if h.valueCount >= 0 {
total = h.valueCount
}
for _, val := range h.buckets {
total += int64(val)
}
return
}
// Average returns the average value of recorded observations.
func (h *histogram) average() float64 {
t := h.total()
if t == 0 {
return 0
}
return float64(h.sum) / float64(t)
}
// Variance returns the variance of recorded observations.
func (h *histogram) variance() float64 {
t := float64(h.total())
if t == 0 {
return 0
}
s := float64(h.sum) / t
return h.sumOfSquares/t - s*s
}
// StandardDeviation returns the standard deviation of recorded observations.
func (h *histogram) standardDeviation() float64 {
return math.Sqrt(h.variance())
}
// PercentileBoundary estimates the value that the given fraction of recorded
// observations are less than.
func (h *histogram) percentileBoundary(percentile float64) int64 {
total := h.total()
// Corner cases (make sure result is strictly less than Total())
if total == 0 {
return 0
} else if total == 1 {
return int64(h.average())
}
percentOfTotal := round(float64(total) * percentile)
var runningTotal int64
for i := range h.buckets {
value := h.buckets[i]
runningTotal += value
if runningTotal == percentOfTotal {
// We hit an exact bucket boundary. If the next bucket has data, it is a
// good estimate of the value. If the bucket is empty, we interpolate the
// midpoint between the next bucket's boundary and the next non-zero
// bucket. If the remaining buckets are all empty, then we use the
// boundary for the next bucket as the estimate.
j := uint8(i + 1)
min := bucketBoundary(j)
if runningTotal < total {
for h.buckets[j] == 0 {
j++
}
}
max := bucketBoundary(j)
return min + round(float64(max-min)/2)
} else if runningTotal > percentOfTotal {
// The value is in this bucket. Interpolate the value.
delta := runningTotal - percentOfTotal
percentBucket := float64(value-delta) / float64(value)
bucketMin := bucketBoundary(uint8(i))
nextBucketMin := bucketBoundary(uint8(i + 1))
bucketSize := nextBucketMin - bucketMin
return bucketMin + round(percentBucket*float64(bucketSize))
}
}
return bucketBoundary(bucketCount - 1)
}
// Median returns the estimated median of the observed values.
func (h *histogram) median() int64 {
return h.percentileBoundary(0.5)
}
// Add adds other to h.
func (h *histogram) Add(other timeseries.Observable) {
o := other.(*histogram)
if o.valueCount == 0 {
// Other histogram is empty
} else if h.valueCount >= 0 && o.valueCount > 0 && h.value == o.value {
// Both have a single bucketed value, aggregate them
h.valueCount += o.valueCount
} else {
// Two different values necessitate buckets in this histogram
h.allocateBuckets()
if o.valueCount >= 0 {
h.buckets[o.value] += o.valueCount
} else {
for i := range h.buckets {
h.buckets[i] += o.buckets[i]
}
}
}
h.sumOfSquares += o.sumOfSquares
h.sum += o.sum
}
// Clear resets the histogram to an empty state, removing all observed values.
func (h *histogram) Clear() {
h.buckets = nil
h.value = 0
h.valueCount = 0
h.sum = 0
h.sumOfSquares = 0
}
// CopyFrom copies from other, which must be a *histogram, into h.
func (h *histogram) CopyFrom(other timeseries.Observable) {
o := other.(*histogram)
if o.valueCount == -1 {
h.allocateBuckets()
copy(h.buckets, o.buckets)
}
h.sum = o.sum
h.sumOfSquares = o.sumOfSquares
h.value = o.value
h.valueCount = o.valueCount
}
// Multiply scales the histogram by the specified ratio.
func (h *histogram) Multiply(ratio float64) {
if h.valueCount == -1 {
for i := range h.buckets {
h.buckets[i] = int64(float64(h.buckets[i]) * ratio)
}
} else {
h.valueCount = int64(float64(h.valueCount) * ratio)
}
h.sum = int64(float64(h.sum) * ratio)
h.sumOfSquares = h.sumOfSquares * ratio
}
// New creates a new histogram.
func (h *histogram) New() timeseries.Observable {
r := new(histogram)
r.Clear()
return r
}
func (h *histogram) String() string {
return fmt.Sprintf("%d, %f, %d, %d, %v",
h.sum, h.sumOfSquares, h.value, h.valueCount, h.buckets)
}
// round returns the closest int64 to the argument
func round(in float64) int64 {
return int64(math.Floor(in + 0.5))
}
// bucketBoundary returns the first value in the bucket.
func bucketBoundary(bucket uint8) int64 {
if bucket == 0 {
return 0
}
return 1 << bucket
}
// bucketData holds data about a specific bucket for use in distTmpl.
type bucketData struct {
Lower, Upper int64
N int64
Pct, CumulativePct float64
GraphWidth int
}
// data holds data about a Distribution for use in distTmpl.
type data struct {
Buckets []*bucketData
Count, Median int64
Mean, StandardDeviation float64
}
// maxHTMLBarWidth is the maximum width of the HTML bar for visualizing buckets.
const maxHTMLBarWidth = 350.0
// newData returns data representing h for use in distTmpl.
func (h *histogram) newData() *data {
// Force the allocation of buckets to simplify the rendering implementation
h.allocateBuckets()
// We scale the bars on the right so that the largest bar is
// maxHTMLBarWidth pixels in width.
maxBucket := int64(0)
for _, n := range h.buckets {
if n > maxBucket {
maxBucket = n
}
}
total := h.total()
barsizeMult := maxHTMLBarWidth / float64(maxBucket)
var pctMult float64
if total == 0 {
pctMult = 1.0
} else {
pctMult = 100.0 / float64(total)
}
buckets := make([]*bucketData, len(h.buckets))
runningTotal := int64(0)
for i, n := range h.buckets {
if n == 0 {
continue
}
runningTotal += n
var upperBound int64
if i < bucketCount-1 {
upperBound = bucketBoundary(uint8(i + 1))
} else {
upperBound = math.MaxInt64
}
buckets[i] = &bucketData{
Lower: bucketBoundary(uint8(i)),
Upper: upperBound,
N: n,
Pct: float64(n) * pctMult,
CumulativePct: float64(runningTotal) * pctMult,
GraphWidth: int(float64(n) * barsizeMult),
}
}
return &data{
Buckets: buckets,
Count: total,
Median: h.median(),
Mean: h.average(),
StandardDeviation: h.standardDeviation(),
}
}
func (h *histogram) html() template.HTML {
buf := new(bytes.Buffer)
if err := distTmpl.Execute(buf, h.newData()); err != nil {
buf.Reset()
log.Printf("net/trace: couldn't execute template: %v", err)
}
return template.HTML(buf.String())
}
// Input: data
var distTmpl = template.Must(template.New("distTmpl").Parse(`
<table>
<tr>
<td style="padding:0.25em">Count: {{.Count}}</td>
<td style="padding:0.25em">Mean: {{printf "%.0f" .Mean}}</td>
<td style="padding:0.25em">StdDev: {{printf "%.0f" .StandardDeviation}}</td>
<td style="padding:0.25em">Median: {{.Median}}</td>
</tr>
</table>
<hr>
<table>
{{range $b := .Buckets}}
{{if $b}}
<tr>
<td style="padding:0 0 0 0.25em">[</td>
<td style="text-align:right;padding:0 0.25em">{{.Lower}},</td>
<td style="text-align:right;padding:0 0.25em">{{.Upper}})</td>
<td style="text-align:right;padding:0 0.25em">{{.N}}</td>
<td style="text-align:right;padding:0 0.25em">{{printf "%#.3f" .Pct}}%</td>
<td style="text-align:right;padding:0 0.25em">{{printf "%#.3f" .CumulativePct}}%</td>
<td><div style="background-color: blue; height: 1em; width: {{.GraphWidth}};"></div></td>
</tr>
{{end}}
{{end}}
</table>
`))

1057
Godeps/_workspace/src/golang.org/x/net/trace/trace.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1,14 +0,0 @@
language: go
go:
- 1.3
- 1.4
install:
- export GOPATH="$HOME/gopath"
- mkdir -p "$GOPATH/src/golang.org/x"
- mv "$TRAVIS_BUILD_DIR" "$GOPATH/src/golang.org/x/oauth2"
- go get -v -t -d golang.org/x/oauth2/...
script:
- go test -v golang.org/x/oauth2/...

View File

@ -1,3 +0,0 @@
# This source code refers to The Go Authors for copyright purposes.
# The master list of authors is in the main Go distribution,
# visible at http://tip.golang.org/AUTHORS.

View File

@ -1,31 +0,0 @@
# Contributing to Go
Go is an open source project.
It is the work of hundreds of contributors. We appreciate your help!
## Filing issues
When [filing an issue](https://github.com/golang/oauth2/issues), make sure to answer these five questions:
1. What version of Go are you using (`go version`)?
2. What operating system and processor architecture are you using?
3. What did you do?
4. What did you expect to see?
5. What did you see instead?
General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker.
The gophers there will answer or ask you to file an issue if you've tripped over a bug.
## Contributing code
Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html)
before sending patches.
**We do not accept GitHub pull requests**
(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review).
Unless otherwise noted, the Go source files are distributed under
the BSD-style license found in the LICENSE file.

View File

@ -1,3 +0,0 @@
# This source code was written by the Go contributors.
# The master list of contributors is in the main Go distribution,
# visible at http://tip.golang.org/CONTRIBUTORS.

View File

@ -1,27 +0,0 @@
Copyright (c) 2009 The oauth2 Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,64 +0,0 @@
# OAuth2 for Go
[![Build Status](https://travis-ci.org/golang/oauth2.svg?branch=master)](https://travis-ci.org/golang/oauth2)
oauth2 package contains a client implementation for OAuth 2.0 spec.
## Installation
~~~~
go get golang.org/x/oauth2
~~~~
See godoc for further documentation and examples.
* [godoc.org/golang.org/x/oauth2](http://godoc.org/golang.org/x/oauth2)
* [godoc.org/golang.org/x/oauth2/google](http://godoc.org/golang.org/x/oauth2/google)
## App Engine
In change 96e89be (March 2015) we removed the `oauth2.Context2` type in favor
of the [`context.Context`](https://golang.org/x/net/context#Context) type from
the `golang.org/x/net/context` package
This means its no longer possible to use the "Classic App Engine"
`appengine.Context` type with the `oauth2` package. (You're using
Classic App Engine if you import the package `"appengine"`.)
To work around this, you may use the new `"google.golang.org/appengine"`
package. This package has almost the same API as the `"appengine"` package,
but it can be fetched with `go get` and used on "Managed VMs" and well as
Classic App Engine.
See the [new `appengine` package's readme](https://github.com/golang/appengine#updating-a-go-app-engine-app)
for information on updating your app.
If you don't want to update your entire app to use the new App Engine packages,
you may use both sets of packages in parallel, using only the new packages
with the `oauth2` package.
import (
"golang.org/x/net/context"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
newappengine "google.golang.org/appengine"
newurlftech "google.golang.org/urlfetch"
"appengine"
)
func handler(w http.ResponseWriter, r *http.Request) {
var c appengine.Context = appengine.NewContext(r)
c.Infof("Logging a message with the old package")
var ctx context.Context = newappengine.NewContext(r)
client := &http.Client{
Transport: &oauth2.Transport{
Source: google.AppEngineTokenSource(ctx, "scope"),
Base: &newurlfetch.Transport{Context: ctx},
},
}
client.Get("...")
}

View File

@ -1,24 +0,0 @@
// Copyright 2014 The oauth2 Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build appengine appenginevm
// App Engine hooks.
package oauth2
import (
"net/http"
"golang.org/x/net/context"
"google.golang.org/appengine/urlfetch"
)
func init() {
registerContextClientFunc(contextClientAppEngine)
}
func contextClientAppEngine(ctx context.Context) (*http.Client, error) {
return urlfetch.Client(ctx), nil
}

View File

@ -1,45 +0,0 @@
// Copyright 2014 The oauth2 Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package oauth2_test
import (
"fmt"
"log"
"golang.org/x/oauth2"
)
func ExampleConfig() {
conf := &oauth2.Config{
ClientID: "YOUR_CLIENT_ID",
ClientSecret: "YOUR_CLIENT_SECRET",
Scopes: []string{"SCOPE1", "SCOPE2"},
Endpoint: oauth2.Endpoint{
AuthURL: "https://provider.com/o/oauth2/auth",
TokenURL: "https://provider.com/o/oauth2/token",
},
}
// Redirect user to consent page to ask for permission
// for the scopes specified above.
url := conf.AuthCodeURL("state", oauth2.AccessTypeOffline)
fmt.Printf("Visit the URL for the auth dialog: %v", url)
// Use the authorization code that is pushed to the redirect URL.
// NewTransportWithCode will do the handshake to retrieve
// an access token and initiate a Transport that is
// authorized and authenticated by the retrieved token.
var code string
if _, err := fmt.Scan(&code); err != nil {
log.Fatal(err)
}
tok, err := conf.Exchange(oauth2.NoContext, code)
if err != nil {
log.Fatal(err)
}
client := conf.Client(oauth2.NoContext, tok)
client.Get("...")
}

View File

@ -1,16 +0,0 @@
// Copyright 2015 The oauth2 Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package facebook provides constants for using OAuth2 to access Facebook.
package facebook
import (
"golang.org/x/oauth2"
)
// Endpoint is Facebook's OAuth 2.0 endpoint.
var Endpoint = oauth2.Endpoint{
AuthURL: "https://www.facebook.com/dialog/oauth",
TokenURL: "https://graph.facebook.com/oauth/access_token",
}

View File

@ -1,16 +0,0 @@
// Copyright 2014 The oauth2 Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package github provides constants for using OAuth2 to access Github.
package github
import (
"golang.org/x/oauth2"
)
// Endpoint is Github's OAuth 2.0 endpoint.
var Endpoint = oauth2.Endpoint{
AuthURL: "https://github.com/login/oauth/authorize",
TokenURL: "https://github.com/login/oauth/access_token",
}

View File

@ -1,83 +0,0 @@
// Copyright 2014 The oauth2 Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package google
import (
"sort"
"strings"
"sync"
"time"
"golang.org/x/net/context"
"golang.org/x/oauth2"
)
// Set at init time by appengine_hook.go. If nil, we're not on App Engine.
var appengineTokenFunc func(c context.Context, scopes ...string) (token string, expiry time.Time, err error)
// AppEngineTokenSource returns a token source that fetches tokens
// issued to the current App Engine application's service account.
// If you are implementing a 3-legged OAuth 2.0 flow on App Engine
// that involves user accounts, see oauth2.Config instead.
//
// The provided context must have come from appengine.NewContext.
func AppEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource {
if appengineTokenFunc == nil {
panic("google: AppEngineTokenSource can only be used on App Engine.")
}
scopes := append([]string{}, scope...)
sort.Strings(scopes)
return &appEngineTokenSource{
ctx: ctx,
scopes: scopes,
key: strings.Join(scopes, " "),
}
}
// aeTokens helps the fetched tokens to be reused until their expiration.
var (
aeTokensMu sync.Mutex
aeTokens = make(map[string]*tokenLock) // key is space-separated scopes
)
type tokenLock struct {
mu sync.Mutex // guards t; held while fetching or updating t
t *oauth2.Token
}
type appEngineTokenSource struct {
ctx context.Context
scopes []string
key string // to aeTokens map; space-separated scopes
}
func (ts *appEngineTokenSource) Token() (*oauth2.Token, error) {
if appengineTokenFunc == nil {
panic("google: AppEngineTokenSource can only be used on App Engine.")
}
aeTokensMu.Lock()
tok, ok := aeTokens[ts.key]
if !ok {
tok = &tokenLock{}
aeTokens[ts.key] = tok
}
aeTokensMu.Unlock()
tok.mu.Lock()
defer tok.mu.Unlock()
if tok.t.Valid() {
return tok.t, nil
}
access, exp, err := appengineTokenFunc(ts.ctx, ts.scopes...)
if err != nil {
return nil, err
}
tok.t = &oauth2.Token{
AccessToken: access,
Expiry: exp,
}
return tok.t, nil
}

View File

@ -1,13 +0,0 @@
// Copyright 2015 The oauth2 Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build appengine appenginevm
package google
import "google.golang.org/appengine"
func init() {
appengineTokenFunc = appengine.AccessToken
}

View File

@ -1,154 +0,0 @@
// Copyright 2015 The oauth2 Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package google
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"os"
"path/filepath"
"runtime"
"golang.org/x/net/context"
"golang.org/x/oauth2"
"golang.org/x/oauth2/jwt"
"google.golang.org/cloud/compute/metadata"
)
// DefaultClient returns an HTTP Client that uses the
// DefaultTokenSource to obtain authentication credentials.
//
// This client should be used when developing services
// that run on Google App Engine or Google Compute Engine
// and use "Application Default Credentials."
//
// For more details, see:
// https://developers.google.com/accounts/docs/application-default-credentials
//
func DefaultClient(ctx context.Context, scope ...string) (*http.Client, error) {
ts, err := DefaultTokenSource(ctx, scope...)
if err != nil {
return nil, err
}
return oauth2.NewClient(ctx, ts), nil
}
// DefaultTokenSource is a token source that uses
// "Application Default Credentials".
//
// It looks for credentials in the following places,
// preferring the first location found:
//
// 1. A JSON file whose path is specified by the
// GOOGLE_APPLICATION_CREDENTIALS environment variable.
// 2. A JSON file in a location known to the gcloud command-line tool.
// On Windows, this is %APPDATA%/gcloud/application_default_credentials.json.
// On other systems, $HOME/.config/gcloud/application_default_credentials.json.
// 3. On Google App Engine it uses the appengine.AccessToken function.
// 4. On Google Compute Engine, it fetches credentials from the metadata server.
// (In this final case any provided scopes are ignored.)
//
// For more details, see:
// https://developers.google.com/accounts/docs/application-default-credentials
//
func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSource, error) {
// First, try the environment variable.
const envVar = "GOOGLE_APPLICATION_CREDENTIALS"
if filename := os.Getenv(envVar); filename != "" {
ts, err := tokenSourceFromFile(ctx, filename, scope)
if err != nil {
return nil, fmt.Errorf("google: error getting credentials using %v environment variable: %v", envVar, err)
}
return ts, nil
}
// Second, try a well-known file.
filename := wellKnownFile()
_, err := os.Stat(filename)
if err == nil {
ts, err2 := tokenSourceFromFile(ctx, filename, scope)
if err2 == nil {
return ts, nil
}
err = err2
} else if os.IsNotExist(err) {
err = nil // ignore this error
}
if err != nil {
return nil, fmt.Errorf("google: error getting credentials using well-known file (%v): %v", filename, err)
}
// Third, if we're on Google App Engine use those credentials.
if appengineTokenFunc != nil {
return AppEngineTokenSource(ctx, scope...), nil
}
// Fourth, if we're on Google Compute Engine use the metadata server.
if metadata.OnGCE() {
return ComputeTokenSource(""), nil
}
// None are found; return helpful error.
const url = "https://developers.google.com/accounts/docs/application-default-credentials"
return nil, fmt.Errorf("google: could not find default credentials. See %v for more information.", url)
}
func wellKnownFile() string {
const f = "application_default_credentials.json"
if runtime.GOOS == "windows" {
return filepath.Join(os.Getenv("APPDATA"), "gcloud", f)
}
return filepath.Join(guessUnixHomeDir(), ".config", "gcloud", f)
}
func tokenSourceFromFile(ctx context.Context, filename string, scopes []string) (oauth2.TokenSource, error) {
b, err := ioutil.ReadFile(filename)
if err != nil {
return nil, err
}
var d struct {
// Common fields
Type string
ClientID string `json:"client_id"`
// User Credential fields
ClientSecret string `json:"client_secret"`
RefreshToken string `json:"refresh_token"`
// Service Account fields
ClientEmail string `json:"client_email"`
PrivateKeyID string `json:"private_key_id"`
PrivateKey string `json:"private_key"`
}
if err := json.Unmarshal(b, &d); err != nil {
return nil, err
}
switch d.Type {
case "authorized_user":
cfg := &oauth2.Config{
ClientID: d.ClientID,
ClientSecret: d.ClientSecret,
Scopes: append([]string{}, scopes...), // copy
Endpoint: Endpoint,
}
tok := &oauth2.Token{RefreshToken: d.RefreshToken}
return cfg.TokenSource(ctx, tok), nil
case "service_account":
cfg := &jwt.Config{
Email: d.ClientEmail,
PrivateKey: []byte(d.PrivateKey),
Scopes: append([]string{}, scopes...), // copy
TokenURL: JWTTokenURL,
}
return cfg.TokenSource(ctx), nil
case "":
return nil, errors.New("missing 'type' field in credentials")
default:
return nil, fmt.Errorf("unknown credential type: %q", d.Type)
}
}

Some files were not shown because too many files have changed in this diff Show More