From a67ed67bdcb318d2a3586be37f9d81c632a22d38 Mon Sep 17 00:00:00 2001
From: Ying Li
Date: Wed, 7 Oct 2015 16:27:35 -0700
Subject: [PATCH 1/8] Add a health check function for the key DB store that
verifies we can access the required table
Signed-off-by: Ying Li
---
cmd/notary-signer/main.go | 7 ++++++-
signer/keydbstore.go | 14 ++++++++++++++
signer/keydbstore_test.go | 34 ++++++++++++++++++++++++++++++++++
3 files changed, 54 insertions(+), 1 deletion(-)
diff --git a/cmd/notary-signer/main.go b/cmd/notary-signer/main.go
index ef89f59ac1..ef70016b9c 100644
--- a/cmd/notary-signer/main.go
+++ b/cmd/notary-signer/main.go
@@ -13,11 +13,12 @@ import (
"os"
"path/filepath"
"strings"
+ "time"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
- _ "github.com/docker/distribution/health"
+ "github.com/docker/distribution/health"
"github.com/docker/notary/cryptoservice"
"github.com/docker/notary/signer"
"github.com/docker/notary/signer/api"
@@ -147,6 +148,10 @@ func main() {
if err != nil {
log.Fatalf("failed to create a new keydbstore: %v", err)
}
+
+ health.RegisterPeriodicFunc(
+ "DB connectable and valid", keyStore.HealthCheck, time.Second * 60)
+
cryptoService := cryptoservice.NewCryptoService("", keyStore)
cryptoServices[data.ED25519Key] = cryptoService
diff --git a/signer/keydbstore.go b/signer/keydbstore.go
index 72b3e65f1a..68486fbe83 100644
--- a/signer/keydbstore.go
+++ b/signer/keydbstore.go
@@ -195,3 +195,17 @@ func (s *KeyDBStore) RotateKeyPassphrase(name, newPassphraseAlias string) error
return nil
}
+
+// DB should exist and be query-able
+func (s *KeyDBStore) HealthCheck() error {
+ dbPrivateKey := GormPrivateKey{}
+ tableOk := s.db.HasTable(&dbPrivateKey)
+ switch {
+ case s.db.Error != nil:
+ return s.db.Error
+ case !tableOk:
+ return fmt.Errorf(
+ "Cannot access table: %s", dbPrivateKey.TableName())
+ }
+ return nil
+}
diff --git a/signer/keydbstore_test.go b/signer/keydbstore_test.go
index 10e798a5c6..0a9a22df31 100644
--- a/signer/keydbstore_test.go
+++ b/signer/keydbstore_test.go
@@ -27,6 +27,7 @@ var anotherRetriever = func(keyName, alias string, createNew bool, attempts int)
return "", false, errors.New("password alias no found")
}
+
func TestCreateRead(t *testing.T) {
tempBaseDir, err := ioutil.TempDir("", "notary-test-")
defer os.RemoveAll(tempBaseDir)
@@ -161,3 +162,36 @@ func TestKeyRotation(t *testing.T) {
err = dbStore.RotateKeyPassphrase(testKey.ID(), "alias_3")
assert.Error(t, err, "password alias no found")
}
+
+
+func TestDBHealthCheck(t *testing.T) {
+ tempBaseDir, err := ioutil.TempDir("", "notary-test-")
+ defer os.RemoveAll(tempBaseDir)
+
+ // We are using SQLite for the tests
+ db, err := sql.Open("sqlite3", tempBaseDir+"test_db")
+ assert.NoError(t, err)
+
+ // Create a new KeyDB store
+ dbStore, err := NewKeyDBStore(retriever, "", "sqlite3", db)
+ assert.NoError(t, err)
+
+ // No key table, health check fails
+ err = dbStore.HealthCheck()
+ assert.Error(t, err, "Cannot access table:")
+
+ // Ensure that the private_key table exists
+ dbStore.db.CreateTable(&GormPrivateKey{})
+
+ // Heath check success because the table exists
+ err = dbStore.HealthCheck()
+ assert.NoError(t, err)
+
+ // Close the connection
+ err = dbStore.db.Close()
+ assert.NoError(t, err)
+
+ // Heath check fail because the connection is closed
+ err = dbStore.HealthCheck()
+ assert.Error(t, err, "Cannot access table:")
+}
From 74a5b1b5414d20f0985b404e745b6a53b086dbfa Mon Sep 17 00:00:00 2001
From: Ying Li
Date: Wed, 7 Oct 2015 18:12:23 -0700
Subject: [PATCH 2/8] Change the signer protocol to include a health check
Signed-off-by: Ying Li
---
proto/signer.pb.go | 105 +++++++++++++++++++++++++++++++++++++++------
proto/signer.proto | 11 +++++
2 files changed, 103 insertions(+), 13 deletions(-)
diff --git a/proto/signer.pb.go b/proto/signer.pb.go
index c7ccd824ff..bf74faadab 100644
--- a/proto/signer.pb.go
+++ b/proto/signer.pb.go
@@ -16,22 +16,23 @@ It has these top-level messages:
Signature
SignatureRequest
Void
+ HealthStatus
*/
package proto
import proto1 "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ context.Context
-var _ grpc.ClientConn
-
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto1.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
// KeyInfo holds a KeyID that is used to reference the key and it's algorithm
type KeyInfo struct {
@@ -59,7 +60,7 @@ func (m *KeyInfo) GetAlgorithm() *Algorithm {
// KeyID holds an ID that is used to reference the key
type KeyID struct {
- ID string `protobuf:"bytes,1,opt" json:"ID,omitempty"`
+ ID string `protobuf:"bytes,1,opt,name=ID" json:"ID,omitempty"`
}
func (m *KeyID) Reset() { *m = KeyID{} }
@@ -142,6 +143,26 @@ func (m *Void) Reset() { *m = Void{} }
func (m *Void) String() string { return proto1.CompactTextString(m) }
func (*Void) ProtoMessage() {}
+// A mapping of health check name to the check result message
+type HealthStatus struct {
+ Status map[string]string `protobuf:"bytes,1,rep,name=status" json:"status,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+}
+
+func (m *HealthStatus) Reset() { *m = HealthStatus{} }
+func (m *HealthStatus) String() string { return proto1.CompactTextString(m) }
+func (*HealthStatus) ProtoMessage() {}
+
+func (m *HealthStatus) GetStatus() map[string]string {
+ if m != nil {
+ return m.Status
+ }
+ return nil
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
// Client API for KeyManagement service
type KeyManagementClient interface {
@@ -151,6 +172,8 @@ type KeyManagementClient interface {
DeleteKey(ctx context.Context, in *KeyID, opts ...grpc.CallOption) (*Void, error)
// GetKeyInfo returns the PublicKey associated with a KeyID
GetKeyInfo(ctx context.Context, in *KeyID, opts ...grpc.CallOption) (*PublicKey, error)
+ // CheckHealth returns the HealthStatus with the service
+ CheckHealth(ctx context.Context, in *Void, opts ...grpc.CallOption) (*HealthStatus, error)
}
type keyManagementClient struct {
@@ -188,6 +211,15 @@ func (c *keyManagementClient) GetKeyInfo(ctx context.Context, in *KeyID, opts ..
return out, nil
}
+func (c *keyManagementClient) CheckHealth(ctx context.Context, in *Void, opts ...grpc.CallOption) (*HealthStatus, error) {
+ out := new(HealthStatus)
+ err := grpc.Invoke(ctx, "/proto.KeyManagement/CheckHealth", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
// Server API for KeyManagement service
type KeyManagementServer interface {
@@ -197,15 +229,17 @@ type KeyManagementServer interface {
DeleteKey(context.Context, *KeyID) (*Void, error)
// GetKeyInfo returns the PublicKey associated with a KeyID
GetKeyInfo(context.Context, *KeyID) (*PublicKey, error)
+ // CheckHealth returns the HealthStatus with the service
+ CheckHealth(context.Context, *Void) (*HealthStatus, error)
}
func RegisterKeyManagementServer(s *grpc.Server, srv KeyManagementServer) {
s.RegisterService(&_KeyManagement_serviceDesc, srv)
}
-func _KeyManagement_CreateKey_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) {
+func _KeyManagement_CreateKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) {
in := new(Algorithm)
- if err := codec.Unmarshal(buf, in); err != nil {
+ if err := dec(in); err != nil {
return nil, err
}
out, err := srv.(KeyManagementServer).CreateKey(ctx, in)
@@ -215,9 +249,9 @@ func _KeyManagement_CreateKey_Handler(srv interface{}, ctx context.Context, code
return out, nil
}
-func _KeyManagement_DeleteKey_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) {
+func _KeyManagement_DeleteKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) {
in := new(KeyID)
- if err := codec.Unmarshal(buf, in); err != nil {
+ if err := dec(in); err != nil {
return nil, err
}
out, err := srv.(KeyManagementServer).DeleteKey(ctx, in)
@@ -227,9 +261,9 @@ func _KeyManagement_DeleteKey_Handler(srv interface{}, ctx context.Context, code
return out, nil
}
-func _KeyManagement_GetKeyInfo_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) {
+func _KeyManagement_GetKeyInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) {
in := new(KeyID)
- if err := codec.Unmarshal(buf, in); err != nil {
+ if err := dec(in); err != nil {
return nil, err
}
out, err := srv.(KeyManagementServer).GetKeyInfo(ctx, in)
@@ -239,6 +273,18 @@ func _KeyManagement_GetKeyInfo_Handler(srv interface{}, ctx context.Context, cod
return out, nil
}
+func _KeyManagement_CheckHealth_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) {
+ in := new(Void)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ out, err := srv.(KeyManagementServer).CheckHealth(ctx, in)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
var _KeyManagement_serviceDesc = grpc.ServiceDesc{
ServiceName: "proto.KeyManagement",
HandlerType: (*KeyManagementServer)(nil),
@@ -255,6 +301,10 @@ var _KeyManagement_serviceDesc = grpc.ServiceDesc{
MethodName: "GetKeyInfo",
Handler: _KeyManagement_GetKeyInfo_Handler,
},
+ {
+ MethodName: "CheckHealth",
+ Handler: _KeyManagement_CheckHealth_Handler,
+ },
},
Streams: []grpc.StreamDesc{},
}
@@ -264,6 +314,8 @@ var _KeyManagement_serviceDesc = grpc.ServiceDesc{
type SignerClient interface {
// Sign calculates a cryptographic signature using the Key associated with a KeyID and returns the signature
Sign(ctx context.Context, in *SignatureRequest, opts ...grpc.CallOption) (*Signature, error)
+ // CheckHealth returns the HealthStatus with the service
+ CheckHealth(ctx context.Context, in *Void, opts ...grpc.CallOption) (*HealthStatus, error)
}
type signerClient struct {
@@ -283,20 +335,31 @@ func (c *signerClient) Sign(ctx context.Context, in *SignatureRequest, opts ...g
return out, nil
}
+func (c *signerClient) CheckHealth(ctx context.Context, in *Void, opts ...grpc.CallOption) (*HealthStatus, error) {
+ out := new(HealthStatus)
+ err := grpc.Invoke(ctx, "/proto.Signer/CheckHealth", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
// Server API for Signer service
type SignerServer interface {
// Sign calculates a cryptographic signature using the Key associated with a KeyID and returns the signature
Sign(context.Context, *SignatureRequest) (*Signature, error)
+ // CheckHealth returns the HealthStatus with the service
+ CheckHealth(context.Context, *Void) (*HealthStatus, error)
}
func RegisterSignerServer(s *grpc.Server, srv SignerServer) {
s.RegisterService(&_Signer_serviceDesc, srv)
}
-func _Signer_Sign_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) {
+func _Signer_Sign_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) {
in := new(SignatureRequest)
- if err := codec.Unmarshal(buf, in); err != nil {
+ if err := dec(in); err != nil {
return nil, err
}
out, err := srv.(SignerServer).Sign(ctx, in)
@@ -306,6 +369,18 @@ func _Signer_Sign_Handler(srv interface{}, ctx context.Context, codec grpc.Codec
return out, nil
}
+func _Signer_CheckHealth_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) {
+ in := new(Void)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ out, err := srv.(SignerServer).CheckHealth(ctx, in)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
var _Signer_serviceDesc = grpc.ServiceDesc{
ServiceName: "proto.Signer",
HandlerType: (*SignerServer)(nil),
@@ -314,6 +389,10 @@ var _Signer_serviceDesc = grpc.ServiceDesc{
MethodName: "Sign",
Handler: _Signer_Sign_Handler,
},
+ {
+ MethodName: "CheckHealth",
+ Handler: _Signer_CheckHealth_Handler,
+ },
},
Streams: []grpc.StreamDesc{},
}
diff --git a/proto/signer.proto b/proto/signer.proto
index 5278bc26cb..5421988b8d 100644
--- a/proto/signer.proto
+++ b/proto/signer.proto
@@ -13,12 +13,18 @@ service KeyManagement {
// GetKeyInfo returns the PublicKey associated with a KeyID
rpc GetKeyInfo(KeyID) returns (PublicKey) {}
+
+ // CheckHealth returns the HealthStatus with the service
+ rpc CheckHealth(Void) returns (HealthStatus) {}
}
// Signer Interface
service Signer {
// Sign calculates a cryptographic signature using the Key associated with a KeyID and returns the signature
rpc Sign(SignatureRequest) returns (Signature) {}
+
+ // CheckHealth returns the HealthStatus with the service
+ rpc CheckHealth(Void) returns (HealthStatus) {}
}
// KeyInfo holds a KeyID that is used to reference the key and it's algorithm
@@ -59,3 +65,8 @@ message SignatureRequest {
// Void represents an empty message type
message Void {
}
+
+// A mapping of health check name to the check result message
+message HealthStatus {
+ map status = 1;
+}
From 701e5df79efadd4245d955e6b2dc89e83eb57171 Mon Sep 17 00:00:00 2001
From: Ying Li
Date: Wed, 7 Oct 2015 18:12:51 -0700
Subject: [PATCH 3/8] Change the rpc Server objects to take a function that can
check health
Signed-off-by: Ying Li
---
signer/api/rpc_api.go | 20 ++++++++++++++++++++
signer/api/rpc_api_test.go | 13 +++++++++++--
2 files changed, 31 insertions(+), 2 deletions(-)
diff --git a/signer/api/rpc_api.go b/signer/api/rpc_api.go
index de1090c30e..626dabe29a 100644
--- a/signer/api/rpc_api.go
+++ b/signer/api/rpc_api.go
@@ -18,11 +18,13 @@ import (
//KeyManagementServer implements the KeyManagementServer grpc interface
type KeyManagementServer struct {
CryptoServices signer.CryptoServiceIndex
+ HealthChecker func() map[string]string
}
//SignerServer implements the SignerServer grpc interface
type SignerServer struct {
CryptoServices signer.CryptoServiceIndex
+ HealthChecker func() map[string]string
}
//CreateKey returns a PublicKey created using KeyManagementServer's SigningService
@@ -106,6 +108,15 @@ func (s *KeyManagementServer) GetKeyInfo(ctx context.Context, keyID *pb.KeyID) (
}, nil
}
+//CheckHealth returns the HealthStatus with the service
+func (s *KeyManagementServer) CheckHealth(ctx context.Context) (*pb.HealthStatus, error) {
+
+ logger.Debug("CheckHealth: Returning HealthStatus for KeyManagementServer")
+ return &pb.HealthStatus{
+ Status: s.HealthChecker(),
+ }, nil
+}
+
//Sign signs a message and returns the signature using a private key associate with the KeyID from the SignatureRequest
func (s *SignerServer) Sign(ctx context.Context, sr *pb.SignatureRequest) (*pb.Signature, error) {
tufKey, service, err := FindKeyByID(s.CryptoServices, sr.KeyID)
@@ -136,3 +147,12 @@ func (s *SignerServer) Sign(ctx context.Context, sr *pb.SignatureRequest) (*pb.S
return signature, nil
}
+
+//CheckHealth returns the HealthStatus with the service
+func (s *SignerServer) CheckHealth(ctx context.Context) (*pb.HealthStatus, error) {
+
+ logger.Debug("CheckHealth: Returning HealthStatus for SignerServer")
+ return &pb.HealthStatus{
+ Status: s.HealthChecker(),
+ }, nil
+}
diff --git a/signer/api/rpc_api_test.go b/signer/api/rpc_api_test.go
index 4021c7dae9..3f2c2fed4e 100644
--- a/signer/api/rpc_api_test.go
+++ b/signer/api/rpc_api_test.go
@@ -26,6 +26,10 @@ var (
grpcServer *grpc.Server
void *pb.Void
pr passphrase.Retriever
+ health= map[string]string {
+ "db": "ok",
+ "other": "not ok",
+ }
)
func init() {
@@ -52,8 +56,13 @@ func init() {
if err != nil {
log.Fatalf("fail to dial: %v", err)
}
- kmClient = pb.NewKeyManagementClient(conn)
- sClient = pb.NewSignerClient(conn)
+
+ fakeHealth := func() map[string]string {
+ return health
+ }
+
+ kmClient = pb.NewKeyManagementClient(conn, fakeHealth)
+ sClient = pb.NewSignerClient(conn, fakeHealth)
}
func TestDeleteKeyHandlerReturnsNotFoundWithNonexistentKey(t *testing.T) {
From 75516a1e84cb8d11bfeb5e4e89718d6b99760782 Mon Sep 17 00:00:00 2001
From: Ying Li
Date: Wed, 7 Oct 2015 18:41:00 -0700
Subject: [PATCH 4/8] Add the CheckHealth implementations to the RPC servers,
go fmt some other files
Signed-off-by: Ying Li
---
cmd/notary-signer/main.go | 6 ++---
signer/api/rpc_api.go | 26 +++++++++++----------
signer/api/rpc_api_test.go | 39 +++++++++++++++++++++-----------
signer/keydbstore_test.go | 46 ++++++++++++++++++--------------------
4 files changed, 65 insertions(+), 52 deletions(-)
diff --git a/cmd/notary-signer/main.go b/cmd/notary-signer/main.go
index ef70016b9c..3ca640c522 100644
--- a/cmd/notary-signer/main.go
+++ b/cmd/notary-signer/main.go
@@ -13,7 +13,7 @@ import (
"os"
"path/filepath"
"strings"
- "time"
+ "time"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
@@ -149,8 +149,8 @@ func main() {
log.Fatalf("failed to create a new keydbstore: %v", err)
}
- health.RegisterPeriodicFunc(
- "DB connectable and valid", keyStore.HealthCheck, time.Second * 60)
+ health.RegisterPeriodicFunc(
+ "DB connectable and valid", keyStore.HealthCheck, time.Second*60)
cryptoService := cryptoservice.NewCryptoService("", keyStore)
diff --git a/signer/api/rpc_api.go b/signer/api/rpc_api.go
index 626dabe29a..d810fbe271 100644
--- a/signer/api/rpc_api.go
+++ b/signer/api/rpc_api.go
@@ -18,13 +18,13 @@ import (
//KeyManagementServer implements the KeyManagementServer grpc interface
type KeyManagementServer struct {
CryptoServices signer.CryptoServiceIndex
- HealthChecker func() map[string]string
+ HealthChecker func() map[string]string
}
//SignerServer implements the SignerServer grpc interface
type SignerServer struct {
CryptoServices signer.CryptoServiceIndex
- HealthChecker func() map[string]string
+ HealthChecker func() map[string]string
}
//CreateKey returns a PublicKey created using KeyManagementServer's SigningService
@@ -109,12 +109,13 @@ func (s *KeyManagementServer) GetKeyInfo(ctx context.Context, keyID *pb.KeyID) (
}
//CheckHealth returns the HealthStatus with the service
-func (s *KeyManagementServer) CheckHealth(ctx context.Context) (*pb.HealthStatus, error) {
+func (s *KeyManagementServer) CheckHealth(ctx context.Context, v *pb.Void) (*pb.HealthStatus, error) {
+ logger := ctxu.GetLogger(ctx)
+ logger.Debug("CheckHealth: Returning HealthStatus for KeyManagementServer")
- logger.Debug("CheckHealth: Returning HealthStatus for KeyManagementServer")
- return &pb.HealthStatus{
- Status: s.HealthChecker(),
- }, nil
+ return &pb.HealthStatus{
+ Status: s.HealthChecker(),
+ }, nil
}
//Sign signs a message and returns the signature using a private key associate with the KeyID from the SignatureRequest
@@ -149,10 +150,11 @@ func (s *SignerServer) Sign(ctx context.Context, sr *pb.SignatureRequest) (*pb.S
}
//CheckHealth returns the HealthStatus with the service
-func (s *SignerServer) CheckHealth(ctx context.Context) (*pb.HealthStatus, error) {
+func (s *SignerServer) CheckHealth(ctx context.Context, v *pb.Void) (*pb.HealthStatus, error) {
+ logger := ctxu.GetLogger(ctx)
+ logger.Debug("CheckHealth: Returning HealthStatus for SignerServer")
- logger.Debug("CheckHealth: Returning HealthStatus for SignerServer")
- return &pb.HealthStatus{
- Status: s.HealthChecker(),
- }, nil
+ return &pb.HealthStatus{
+ Status: s.HealthChecker(),
+ }, nil
}
diff --git a/signer/api/rpc_api_test.go b/signer/api/rpc_api_test.go
index 3f2c2fed4e..9734af35ed 100644
--- a/signer/api/rpc_api_test.go
+++ b/signer/api/rpc_api_test.go
@@ -26,10 +26,10 @@ var (
grpcServer *grpc.Server
void *pb.Void
pr passphrase.Retriever
- health= map[string]string {
- "db": "ok",
- "other": "not ok",
- }
+ health = map[string]string{
+ "db": "ok",
+ "other": "not ok",
+ }
)
func init() {
@@ -38,9 +38,16 @@ func init() {
cryptoService := cryptoservice.NewCryptoService("", keyStore)
cryptoServices := signer.CryptoServiceIndex{data.ED25519Key: cryptoService, data.RSAKey: cryptoService, data.ECDSAKey: cryptoService}
void = &pb.Void{}
+
+ fakeHealth := func() map[string]string {
+ return health
+ }
+
//server setup
- kms := &api.KeyManagementServer{CryptoServices: cryptoServices}
- ss := &api.SignerServer{CryptoServices: cryptoServices}
+ kms := &api.KeyManagementServer{CryptoServices: cryptoServices,
+ HealthChecker: fakeHealth}
+ ss := &api.SignerServer{CryptoServices: cryptoServices,
+ HealthChecker: fakeHealth}
grpcServer = grpc.NewServer()
pb.RegisterKeyManagementServer(grpcServer, kms)
pb.RegisterSignerServer(grpcServer, ss)
@@ -52,17 +59,13 @@ func init() {
go grpcServer.Serve(lis)
//client setup
- conn, err := grpc.Dial("127.0.0.1:7899")
+ conn, err := grpc.Dial("127.0.0.1:7899", grpc.WithInsecure())
if err != nil {
log.Fatalf("fail to dial: %v", err)
}
- fakeHealth := func() map[string]string {
- return health
- }
-
- kmClient = pb.NewKeyManagementClient(conn, fakeHealth)
- sClient = pb.NewSignerClient(conn, fakeHealth)
+ kmClient = pb.NewKeyManagementClient(conn)
+ sClient = pb.NewSignerClient(conn)
}
func TestDeleteKeyHandlerReturnsNotFoundWithNonexistentKey(t *testing.T) {
@@ -149,3 +152,13 @@ func TestSignReturnsNotFoundOnNonexistKeys(t *testing.T) {
assert.Equal(t, grpc.Code(err), codes.NotFound)
assert.Nil(t, ret)
}
+
+func TestHealthChecksForServices(t *testing.T) {
+ sHealthStatus, err := sClient.CheckHealth(context.Background(), void)
+ assert.Nil(t, err)
+ assert.Equal(t, health, sHealthStatus.Status)
+
+ kmHealthStatus, err := kmClient.CheckHealth(context.Background(), void)
+ assert.Nil(t, err)
+ assert.Equal(t, health, kmHealthStatus.Status)
+}
diff --git a/signer/keydbstore_test.go b/signer/keydbstore_test.go
index 0a9a22df31..8e40a8be60 100644
--- a/signer/keydbstore_test.go
+++ b/signer/keydbstore_test.go
@@ -27,7 +27,6 @@ var anotherRetriever = func(keyName, alias string, createNew bool, attempts int)
return "", false, errors.New("password alias no found")
}
-
func TestCreateRead(t *testing.T) {
tempBaseDir, err := ioutil.TempDir("", "notary-test-")
defer os.RemoveAll(tempBaseDir)
@@ -163,35 +162,34 @@ func TestKeyRotation(t *testing.T) {
assert.Error(t, err, "password alias no found")
}
-
func TestDBHealthCheck(t *testing.T) {
- tempBaseDir, err := ioutil.TempDir("", "notary-test-")
- defer os.RemoveAll(tempBaseDir)
+ tempBaseDir, err := ioutil.TempDir("", "notary-test-")
+ defer os.RemoveAll(tempBaseDir)
- // We are using SQLite for the tests
- db, err := sql.Open("sqlite3", tempBaseDir+"test_db")
- assert.NoError(t, err)
+ // We are using SQLite for the tests
+ db, err := sql.Open("sqlite3", tempBaseDir+"test_db")
+ assert.NoError(t, err)
- // Create a new KeyDB store
- dbStore, err := NewKeyDBStore(retriever, "", "sqlite3", db)
- assert.NoError(t, err)
+ // Create a new KeyDB store
+ dbStore, err := NewKeyDBStore(retriever, "", "sqlite3", db)
+ assert.NoError(t, err)
- // No key table, health check fails
- err = dbStore.HealthCheck()
- assert.Error(t, err, "Cannot access table:")
+ // No key table, health check fails
+ err = dbStore.HealthCheck()
+ assert.Error(t, err, "Cannot access table:")
- // Ensure that the private_key table exists
- dbStore.db.CreateTable(&GormPrivateKey{})
+ // Ensure that the private_key table exists
+ dbStore.db.CreateTable(&GormPrivateKey{})
- // Heath check success because the table exists
- err = dbStore.HealthCheck()
- assert.NoError(t, err)
+ // Heath check success because the table exists
+ err = dbStore.HealthCheck()
+ assert.NoError(t, err)
- // Close the connection
- err = dbStore.db.Close()
- assert.NoError(t, err)
+ // Close the connection
+ err = dbStore.db.Close()
+ assert.NoError(t, err)
- // Heath check fail because the connection is closed
- err = dbStore.HealthCheck()
- assert.Error(t, err, "Cannot access table:")
+ // Heath check fail because the connection is closed
+ err = dbStore.HealthCheck()
+ assert.Error(t, err, "Cannot access table:")
}
From 815675eea9b6ba9d0bb0afae7e7be8761e829857 Mon Sep 17 00:00:00 2001
From: Ying Li
Date: Wed, 7 Oct 2015 20:52:48 -0700
Subject: [PATCH 5/8] After upgrading grpc, update how the server is set up
with TLS for the signer
Signed-off-by: Ying Li
---
cmd/notary-signer/main.go | 12 +++++++-----
1 file changed, 7 insertions(+), 5 deletions(-)
diff --git a/cmd/notary-signer/main.go b/cmd/notary-signer/main.go
index 3ca640c522..052a540820 100644
--- a/cmd/notary-signer/main.go
+++ b/cmd/notary-signer/main.go
@@ -161,10 +161,6 @@ func main() {
kms := &api.KeyManagementServer{CryptoServices: cryptoServices}
ss := &api.SignerServer{CryptoServices: cryptoServices}
- grpcServer := grpc.NewServer()
- pb.RegisterKeyManagementServer(grpcServer, kms)
- pb.RegisterSignerServer(grpcServer, ss)
-
rpcAddr := viper.GetString("server.grpc_addr")
lis, err := net.Listen("tcp", rpcAddr)
if err != nil {
@@ -174,7 +170,13 @@ func main() {
if err != nil {
log.Fatalf("failed to generate credentials %v", err)
}
- go grpcServer.Serve(creds.NewListener(lis))
+ opts := []grpc.ServerOption{grpc.Creds(creds)}
+ grpcServer := grpc.NewServer(opts...)
+
+ pb.RegisterKeyManagementServer(grpcServer, kms)
+ pb.RegisterSignerServer(grpcServer, ss)
+
+ go grpcServer.Serve(lis)
httpAddr := viper.GetString("server.http_addr")
if httpAddr == "" {
From 2740fd011a800071de2a744c47a5bd02c2015afb Mon Sep 17 00:00:00 2001
From: Ying Li
Date: Wed, 7 Oct 2015 21:00:01 -0700
Subject: [PATCH 6/8] Fix docstring comment
Signed-off-by: Ying Li
---
signer/keydbstore.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/signer/keydbstore.go b/signer/keydbstore.go
index 68486fbe83..a7fa5aa1ca 100644
--- a/signer/keydbstore.go
+++ b/signer/keydbstore.go
@@ -196,7 +196,7 @@ func (s *KeyDBStore) RotateKeyPassphrase(name, newPassphraseAlias string) error
return nil
}
-// DB should exist and be query-able
+// HealthCheck verifies that DB exists and is query-able
func (s *KeyDBStore) HealthCheck() error {
dbPrivateKey := GormPrivateKey{}
tableOk := s.db.HasTable(&dbPrivateKey)
From 77fa33f73721441e291c7fcf8ad2b325244d44f2 Mon Sep 17 00:00:00 2001
From: Ying Li
Date: Wed, 7 Oct 2015 21:00:20 -0700
Subject: [PATCH 7/8] Update godeps
Signed-off-by: Ying Li
---
Godeps/Godeps.json | 24 +-
.../src/github.com/bradfitz/http2/.gitignore | 1 -
.../src/github.com/bradfitz/http2/AUTHORS | 19 -
.../github.com/bradfitz/http2/CONTRIBUTORS | 19 -
.../src/github.com/bradfitz/http2/HACKING | 5 -
.../src/github.com/bradfitz/http2/LICENSE | 7 -
.../github.com/bradfitz/http2/buffer_test.go | 73 -
.../github.com/bradfitz/http2/errors_test.go | 27 -
.../github.com/bradfitz/http2/flow_test.go | 54 -
.../github.com/bradfitz/http2/frame_test.go | 578 --
.../github.com/bradfitz/http2/gotrack_test.go | 33 -
.../bradfitz/http2/hpack/encode_test.go | 331 --
.../bradfitz/http2/hpack/hpack_test.go | 648 ---
.../github.com/bradfitz/http2/http2_test.go | 152 -
.../github.com/bradfitz/http2/pipe_test.go | 24 -
.../bradfitz/http2/priority_test.go | 121 -
.../github.com/bradfitz/http2/server_test.go | 2252 --------
.../testdata/draft-ietf-httpbis-http2.xml | 5021 -----------------
.../bradfitz/http2/transport_test.go | 168 -
.../github.com/bradfitz/http2/z_spec_test.go | 357 --
.../golang/protobuf/proto/all_test.go | 2071 -------
.../github.com/golang/protobuf/proto/clone.go | 38 +-
.../golang/protobuf/proto/clone_test.go | 227 -
.../golang/protobuf/proto/decode.go | 52 +-
.../golang/protobuf/proto/encode.go | 68 +-
.../github.com/golang/protobuf/proto/equal.go | 11 +
.../golang/protobuf/proto/equal_test.go | 191 -
.../golang/protobuf/proto/extensions.go | 42 +-
.../golang/protobuf/proto/extensions_test.go | 153 -
.../github.com/golang/protobuf/proto/lib.go | 647 ++-
.../golang/protobuf/proto/message_set_test.go | 66 -
.../golang/protobuf/proto/pointer_reflect.go | 4 +-
.../golang/protobuf/proto/pointer_unsafe.go | 4 +-
.../golang/protobuf/proto/properties.go | 65 +-
.../protobuf/proto/proto3_proto/proto3.pb.go | 122 +
.../golang/protobuf/proto/proto3_test.go | 125 -
.../golang/protobuf/proto/size_test.go | 142 -
.../golang/protobuf/proto/testdata/Makefile | 50 -
.../protobuf/proto/testdata/golden_test.go | 86 -
.../golang/protobuf/proto/testdata/test.pb.go | 2397 --------
.../golang/protobuf/proto/testdata/test.proto | 435 --
.../github.com/golang/protobuf/proto/text.go | 80 +-
.../golang/protobuf/proto/text_parser.go | 199 +-
.../golang/protobuf/proto/text_parser_test.go | 511 --
.../golang/protobuf/proto/text_test.go | 436 --
.../src/golang.org/x/net/context/context.go | 67 +-
.../golang.org/x/net/context/context_test.go | 553 --
.../x/net/context/ctxhttp/cancelreq.go | 18 +
.../x/net/context/ctxhttp/cancelreq_go14.go | 23 +
.../x/net/context/ctxhttp/ctxhttp.go | 79 +
.../x/net/context/withtimeout_test.go | 26 -
.../src/golang.org/x/net/http2/.gitignore | 2 +
.../x/net}/http2/Dockerfile | 0
.../x/net}/http2/Makefile | 0
.../x/net}/http2/README | 7 +-
.../x/net}/http2/buffer.go | 7 +-
.../x/net}/http2/errors.go | 0
.../x/net}/http2/flow.go | 0
.../x/net}/http2/frame.go | 4 +-
.../x/net}/http2/gotrack.go | 6 +-
.../x/net}/http2/h2demo/.gitignore | 0
.../x/net}/http2/h2demo/Makefile | 0
.../x/net}/http2/h2demo/README | 0
.../x/net}/http2/h2demo/h2demo.go | 21 +-
.../x/net}/http2/h2demo/launch.go | 61 +-
.../x/net}/http2/h2demo/rootCA.key | 0
.../x/net}/http2/h2demo/rootCA.pem | 0
.../x/net}/http2/h2demo/rootCA.srl | 0
.../x/net}/http2/h2demo/server.crt | 0
.../x/net}/http2/h2demo/server.key | 0
.../src/golang.org/x/net/http2/h2i/README.md | 97 +
.../src/golang.org/x/net/http2/h2i/h2i.go | 489 ++
.../x/net}/http2/headermap.go | 0
.../x/net}/http2/hpack/encode.go | 0
.../x/net}/http2/hpack/hpack.go | 0
.../x/net}/http2/hpack/huffman.go | 0
.../x/net}/http2/hpack/tables.go | 0
.../x/net}/http2/http2.go | 0
.../x/net}/http2/pipe.go | 0
.../x/net}/http2/server.go | 7 +-
.../x/net}/http2/transport.go | 4 +-
.../x/net}/http2/write.go | 2 +-
.../x/net}/http2/writesched.go | 0
.../x/net/internal/timeseries/timeseries.go | 525 ++
.../src/golang.org/x/net/trace/events.go | 524 ++
.../src/golang.org/x/net/trace/histogram.go | 356 ++
.../src/golang.org/x/net/trace/trace.go | 1057 ++++
.../src/golang.org/x/oauth2/.travis.yml | 14 -
.../src/golang.org/x/oauth2/AUTHORS | 3 -
.../src/golang.org/x/oauth2/CONTRIBUTING.md | 31 -
.../src/golang.org/x/oauth2/CONTRIBUTORS | 3 -
.../src/golang.org/x/oauth2/LICENSE | 27 -
.../src/golang.org/x/oauth2/README.md | 64 -
.../golang.org/x/oauth2/client_appengine.go | 24 -
.../src/golang.org/x/oauth2/example_test.go | 45 -
.../golang.org/x/oauth2/facebook/facebook.go | 16 -
.../src/golang.org/x/oauth2/github/github.go | 16 -
.../golang.org/x/oauth2/google/appengine.go | 83 -
.../x/oauth2/google/appengine_hook.go | 13 -
.../src/golang.org/x/oauth2/google/default.go | 154 -
.../x/oauth2/google/example_test.go | 150 -
.../src/golang.org/x/oauth2/google/google.go | 145 -
.../golang.org/x/oauth2/google/google_test.go | 67 -
.../src/golang.org/x/oauth2/google/sdk.go | 168 -
.../golang.org/x/oauth2/google/sdk_test.go | 46 -
.../oauth2/google/testdata/gcloud/credentials | 122 -
.../oauth2/google/testdata/gcloud/properties | 2 -
.../golang.org/x/oauth2/internal/oauth2.go | 69 -
.../x/oauth2/internal/oauth2_test.go | 62 -
.../src/golang.org/x/oauth2/jws/jws.go | 160 -
.../golang.org/x/oauth2/jwt/example_test.go | 31 -
.../src/golang.org/x/oauth2/jwt/jwt.go | 147 -
.../src/golang.org/x/oauth2/jwt/jwt_test.go | 134 -
.../golang.org/x/oauth2/linkedin/linkedin.go | 16 -
.../src/golang.org/x/oauth2/oauth2.go | 523 --
.../src/golang.org/x/oauth2/oauth2_test.go | 439 --
.../x/oauth2/odnoklassniki/odnoklassniki.go | 16 -
.../src/golang.org/x/oauth2/paypal/paypal.go | 22 -
.../src/golang.org/x/oauth2/token.go | 104 -
.../src/golang.org/x/oauth2/token_test.go | 50 -
.../src/golang.org/x/oauth2/transport.go | 138 -
.../src/golang.org/x/oauth2/transport_test.go | 53 -
.../src/golang.org/x/oauth2/vk/vk.go | 16 -
.../cloud/compute/metadata/go13.go | 37 -
.../cloud/compute/metadata/metadata.go | 267 -
.../google.golang.org/cloud/internal/cloud.go | 128 -
.../internal/datastore/datastore_v1.pb.go | 1633 ------
.../internal/datastore/datastore_v1.proto | 594 --
.../cloud/internal/testutil/context.go | 57 -
.../src/google.golang.org/grpc/.travis.yml | 14 +-
.../google.golang.org/grpc/CONTRIBUTING.md | 4 -
.../{ => Documentation}/grpc-auth-support.md | 11 +-
.../src/google.golang.org/grpc/Makefile | 50 +
.../src/google.golang.org/grpc/README.md | 17 +-
.../grpc/benchmark/benchmark.go | 132 +-
.../grpc/benchmark/benchmark_test.go | 74 -
.../grpc/benchmark/client/main.go | 161 +
.../grpc/benchmark/grpc_testing/test.pb.go | 913 ++-
.../grpc/benchmark/grpc_testing/test.proto | 240 +-
.../grpc/benchmark/server/main.go | 35 +
.../grpc/benchmark/stats/counter.go | 4 +-
.../grpc/benchmark/stats/histogram.go | 2 +-
.../src/google.golang.org/grpc/call.go | 48 +-
.../src/google.golang.org/grpc/clientconn.go | 360 +-
.../src/google.golang.org/grpc/codegen.sh | 2 +-
.../grpc/credentials/credentials.go | 173 +-
.../grpc/credentials/oauth/oauth.go | 177 +
.../google.golang.org/grpc/examples/README.md | 57 +
.../grpc/examples/gotutorial.md | 431 ++
.../helloworld/greeter_client/main.go} | 69 +-
.../helloworld/greeter_server/main.go | 65 +
.../helloworld/helloworld/helloworld.pb.go | 109 +
.../helloworld/helloworld/helloworld.proto} | 52 +-
.../grpc/examples/route_guide/README.md | 2 +-
.../examples/route_guide/client/client.go | 42 +-
.../{proto => routeguide}/route_guide.pb.go | 6 +-
.../{proto => routeguide}/route_guide.proto | 2 +-
.../examples/route_guide/server/server.go | 24 +-
.../grpc/examples/route_guide/testdata/ca.pem | 14 -
.../route_guide/testdata/route_guide_db.json | 601 --
.../examples/route_guide/testdata/server1.key | 15 -
.../examples/route_guide/testdata/server1.pem | 16 -
.../grpc/grpclog/glogger/glogger.go | 72 +
.../google.golang.org/grpc/grpclog/logger.go | 90 +
.../health/grpc_health_v1alpha/health.pb.go | 133 +
.../health/grpc_health_v1alpha/health.proto | 20 +
.../google.golang.org/grpc/health/health.go | 49 +
.../grpc/interop/client/client.go | 288 +-
.../grpc/interop/client/testdata/ca.pem | 14 -
.../grpc/interop/client/testdata/server1.key | 15 -
.../grpc/interop/client/testdata/server1.pem | 16 -
.../grpc/interop/grpc_testing/test.pb.go | 12 +-
.../grpc/interop/server/server.go | 18 +-
.../grpc/interop/server/testdata/ca.pem | 14 -
.../grpc/interop/server/testdata/server1.key | 15 -
.../grpc/interop/server/testdata/server1.pem | 16 -
.../grpc/metadata/metadata.go | 12 +-
.../grpc/naming/etcd/etcd.go | 158 +
.../http_util_test.go => naming/naming.go} | 83 +-
.../src/google.golang.org/grpc/picker.go | 90 +
.../src/google.golang.org/grpc/rpc_util.go | 37 +-
.../google.golang.org/grpc/rpc_util_test.go | 211 -
.../src/google.golang.org/grpc/server.go | 224 +-
.../src/google.golang.org/grpc/stream.go | 131 +-
.../grpc/test/end2end_test.go | 811 ---
.../grpc/test/grpc_testing/test.pb.go | 12 +-
.../grpc/test/testdata/ca.pem | 14 -
.../grpc/test/testdata/server1.key | 15 -
.../grpc/test/testdata/server1.pem | 16 -
.../src/google.golang.org/grpc/trace.go | 106 +
.../grpc/transport/control.go | 16 +-
.../grpc/transport/http2_client.go | 208 +-
.../grpc/transport/http2_server.go | 121 +-
.../grpc/transport/http_util.go | 83 +-
.../grpc/transport/testdata/ca.pem | 14 -
.../grpc/transport/testdata/server1.key | 15 -
.../grpc/transport/testdata/server1.pem | 16 -
.../grpc/transport/transport.go | 44 +-
.../grpc/transport/transport_test.go | 590 --
199 files changed, 8074 insertions(+), 27607 deletions(-)
delete mode 100644 Godeps/_workspace/src/github.com/bradfitz/http2/.gitignore
delete mode 100644 Godeps/_workspace/src/github.com/bradfitz/http2/AUTHORS
delete mode 100644 Godeps/_workspace/src/github.com/bradfitz/http2/CONTRIBUTORS
delete mode 100644 Godeps/_workspace/src/github.com/bradfitz/http2/HACKING
delete mode 100644 Godeps/_workspace/src/github.com/bradfitz/http2/LICENSE
delete mode 100644 Godeps/_workspace/src/github.com/bradfitz/http2/buffer_test.go
delete mode 100644 Godeps/_workspace/src/github.com/bradfitz/http2/errors_test.go
delete mode 100644 Godeps/_workspace/src/github.com/bradfitz/http2/flow_test.go
delete mode 100644 Godeps/_workspace/src/github.com/bradfitz/http2/frame_test.go
delete mode 100644 Godeps/_workspace/src/github.com/bradfitz/http2/gotrack_test.go
delete mode 100644 Godeps/_workspace/src/github.com/bradfitz/http2/hpack/encode_test.go
delete mode 100644 Godeps/_workspace/src/github.com/bradfitz/http2/hpack/hpack_test.go
delete mode 100644 Godeps/_workspace/src/github.com/bradfitz/http2/http2_test.go
delete mode 100644 Godeps/_workspace/src/github.com/bradfitz/http2/pipe_test.go
delete mode 100644 Godeps/_workspace/src/github.com/bradfitz/http2/priority_test.go
delete mode 100644 Godeps/_workspace/src/github.com/bradfitz/http2/server_test.go
delete mode 100644 Godeps/_workspace/src/github.com/bradfitz/http2/testdata/draft-ietf-httpbis-http2.xml
delete mode 100644 Godeps/_workspace/src/github.com/bradfitz/http2/transport_test.go
delete mode 100644 Godeps/_workspace/src/github.com/bradfitz/http2/z_spec_test.go
delete mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/all_test.go
delete mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/clone_test.go
delete mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/equal_test.go
delete mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions_test.go
delete mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/message_set_test.go
create mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go
delete mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_test.go
delete mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/size_test.go
delete mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/Makefile
delete mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/golden_test.go
delete mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.pb.go
delete mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.proto
delete mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser_test.go
delete mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/text_test.go
delete mode 100644 Godeps/_workspace/src/golang.org/x/net/context/context_test.go
create mode 100644 Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/cancelreq.go
create mode 100644 Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/cancelreq_go14.go
create mode 100644 Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/ctxhttp.go
delete mode 100644 Godeps/_workspace/src/golang.org/x/net/context/withtimeout_test.go
create mode 100644 Godeps/_workspace/src/golang.org/x/net/http2/.gitignore
rename Godeps/_workspace/src/{github.com/bradfitz => golang.org/x/net}/http2/Dockerfile (100%)
rename Godeps/_workspace/src/{github.com/bradfitz => golang.org/x/net}/http2/Makefile (100%)
rename Godeps/_workspace/src/{github.com/bradfitz => golang.org/x/net}/http2/README (68%)
rename Godeps/_workspace/src/{github.com/bradfitz => golang.org/x/net}/http2/buffer.go (89%)
rename Godeps/_workspace/src/{github.com/bradfitz => golang.org/x/net}/http2/errors.go (100%)
rename Godeps/_workspace/src/{github.com/bradfitz => golang.org/x/net}/http2/flow.go (100%)
rename Godeps/_workspace/src/{github.com/bradfitz => golang.org/x/net}/http2/frame.go (99%)
rename Godeps/_workspace/src/{github.com/bradfitz => golang.org/x/net}/http2/gotrack.go (96%)
rename Godeps/_workspace/src/{github.com/bradfitz => golang.org/x/net}/http2/h2demo/.gitignore (100%)
rename Godeps/_workspace/src/{github.com/bradfitz => golang.org/x/net}/http2/h2demo/Makefile (100%)
rename Godeps/_workspace/src/{github.com/bradfitz => golang.org/x/net}/http2/h2demo/README (100%)
rename Godeps/_workspace/src/{github.com/bradfitz => golang.org/x/net}/http2/h2demo/h2demo.go (93%)
rename Godeps/_workspace/src/{github.com/bradfitz => golang.org/x/net}/http2/h2demo/launch.go (86%)
rename Godeps/_workspace/src/{github.com/bradfitz => golang.org/x/net}/http2/h2demo/rootCA.key (100%)
rename Godeps/_workspace/src/{github.com/bradfitz => golang.org/x/net}/http2/h2demo/rootCA.pem (100%)
rename Godeps/_workspace/src/{github.com/bradfitz => golang.org/x/net}/http2/h2demo/rootCA.srl (100%)
rename Godeps/_workspace/src/{github.com/bradfitz => golang.org/x/net}/http2/h2demo/server.crt (100%)
rename Godeps/_workspace/src/{github.com/bradfitz => golang.org/x/net}/http2/h2demo/server.key (100%)
create mode 100644 Godeps/_workspace/src/golang.org/x/net/http2/h2i/README.md
create mode 100644 Godeps/_workspace/src/golang.org/x/net/http2/h2i/h2i.go
rename Godeps/_workspace/src/{github.com/bradfitz => golang.org/x/net}/http2/headermap.go (100%)
rename Godeps/_workspace/src/{github.com/bradfitz => golang.org/x/net}/http2/hpack/encode.go (100%)
rename Godeps/_workspace/src/{github.com/bradfitz => golang.org/x/net}/http2/hpack/hpack.go (100%)
rename Godeps/_workspace/src/{github.com/bradfitz => golang.org/x/net}/http2/hpack/huffman.go (100%)
rename Godeps/_workspace/src/{github.com/bradfitz => golang.org/x/net}/http2/hpack/tables.go (100%)
rename Godeps/_workspace/src/{github.com/bradfitz => golang.org/x/net}/http2/http2.go (100%)
rename Godeps/_workspace/src/{github.com/bradfitz => golang.org/x/net}/http2/pipe.go (100%)
rename Godeps/_workspace/src/{github.com/bradfitz => golang.org/x/net}/http2/server.go (99%)
rename Godeps/_workspace/src/{github.com/bradfitz => golang.org/x/net}/http2/transport.go (99%)
rename Godeps/_workspace/src/{github.com/bradfitz => golang.org/x/net}/http2/write.go (99%)
rename Godeps/_workspace/src/{github.com/bradfitz => golang.org/x/net}/http2/writesched.go (100%)
create mode 100644 Godeps/_workspace/src/golang.org/x/net/internal/timeseries/timeseries.go
create mode 100644 Godeps/_workspace/src/golang.org/x/net/trace/events.go
create mode 100644 Godeps/_workspace/src/golang.org/x/net/trace/histogram.go
create mode 100644 Godeps/_workspace/src/golang.org/x/net/trace/trace.go
delete mode 100644 Godeps/_workspace/src/golang.org/x/oauth2/.travis.yml
delete mode 100644 Godeps/_workspace/src/golang.org/x/oauth2/AUTHORS
delete mode 100644 Godeps/_workspace/src/golang.org/x/oauth2/CONTRIBUTING.md
delete mode 100644 Godeps/_workspace/src/golang.org/x/oauth2/CONTRIBUTORS
delete mode 100644 Godeps/_workspace/src/golang.org/x/oauth2/LICENSE
delete mode 100644 Godeps/_workspace/src/golang.org/x/oauth2/README.md
delete mode 100644 Godeps/_workspace/src/golang.org/x/oauth2/client_appengine.go
delete mode 100644 Godeps/_workspace/src/golang.org/x/oauth2/example_test.go
delete mode 100644 Godeps/_workspace/src/golang.org/x/oauth2/facebook/facebook.go
delete mode 100644 Godeps/_workspace/src/golang.org/x/oauth2/github/github.go
delete mode 100644 Godeps/_workspace/src/golang.org/x/oauth2/google/appengine.go
delete mode 100644 Godeps/_workspace/src/golang.org/x/oauth2/google/appengine_hook.go
delete mode 100644 Godeps/_workspace/src/golang.org/x/oauth2/google/default.go
delete mode 100644 Godeps/_workspace/src/golang.org/x/oauth2/google/example_test.go
delete mode 100644 Godeps/_workspace/src/golang.org/x/oauth2/google/google.go
delete mode 100644 Godeps/_workspace/src/golang.org/x/oauth2/google/google_test.go
delete mode 100644 Godeps/_workspace/src/golang.org/x/oauth2/google/sdk.go
delete mode 100644 Godeps/_workspace/src/golang.org/x/oauth2/google/sdk_test.go
delete mode 100644 Godeps/_workspace/src/golang.org/x/oauth2/google/testdata/gcloud/credentials
delete mode 100644 Godeps/_workspace/src/golang.org/x/oauth2/google/testdata/gcloud/properties
delete mode 100644 Godeps/_workspace/src/golang.org/x/oauth2/internal/oauth2.go
delete mode 100644 Godeps/_workspace/src/golang.org/x/oauth2/internal/oauth2_test.go
delete mode 100644 Godeps/_workspace/src/golang.org/x/oauth2/jws/jws.go
delete mode 100644 Godeps/_workspace/src/golang.org/x/oauth2/jwt/example_test.go
delete mode 100644 Godeps/_workspace/src/golang.org/x/oauth2/jwt/jwt.go
delete mode 100644 Godeps/_workspace/src/golang.org/x/oauth2/jwt/jwt_test.go
delete mode 100644 Godeps/_workspace/src/golang.org/x/oauth2/linkedin/linkedin.go
delete mode 100644 Godeps/_workspace/src/golang.org/x/oauth2/oauth2.go
delete mode 100644 Godeps/_workspace/src/golang.org/x/oauth2/oauth2_test.go
delete mode 100644 Godeps/_workspace/src/golang.org/x/oauth2/odnoklassniki/odnoklassniki.go
delete mode 100644 Godeps/_workspace/src/golang.org/x/oauth2/paypal/paypal.go
delete mode 100644 Godeps/_workspace/src/golang.org/x/oauth2/token.go
delete mode 100644 Godeps/_workspace/src/golang.org/x/oauth2/token_test.go
delete mode 100644 Godeps/_workspace/src/golang.org/x/oauth2/transport.go
delete mode 100644 Godeps/_workspace/src/golang.org/x/oauth2/transport_test.go
delete mode 100644 Godeps/_workspace/src/golang.org/x/oauth2/vk/vk.go
delete mode 100644 Godeps/_workspace/src/google.golang.org/cloud/compute/metadata/go13.go
delete mode 100644 Godeps/_workspace/src/google.golang.org/cloud/compute/metadata/metadata.go
delete mode 100644 Godeps/_workspace/src/google.golang.org/cloud/internal/cloud.go
delete mode 100644 Godeps/_workspace/src/google.golang.org/cloud/internal/datastore/datastore_v1.pb.go
delete mode 100644 Godeps/_workspace/src/google.golang.org/cloud/internal/datastore/datastore_v1.proto
delete mode 100644 Godeps/_workspace/src/google.golang.org/cloud/internal/testutil/context.go
rename Godeps/_workspace/src/google.golang.org/grpc/{ => Documentation}/grpc-auth-support.md (57%)
create mode 100644 Godeps/_workspace/src/google.golang.org/grpc/Makefile
delete mode 100644 Godeps/_workspace/src/google.golang.org/grpc/benchmark/benchmark_test.go
create mode 100644 Godeps/_workspace/src/google.golang.org/grpc/benchmark/client/main.go
create mode 100644 Godeps/_workspace/src/google.golang.org/grpc/benchmark/server/main.go
create mode 100644 Godeps/_workspace/src/google.golang.org/grpc/credentials/oauth/oauth.go
create mode 100644 Godeps/_workspace/src/google.golang.org/grpc/examples/README.md
create mode 100644 Godeps/_workspace/src/google.golang.org/grpc/examples/gotutorial.md
rename Godeps/_workspace/src/google.golang.org/grpc/{metadata/metadata_test.go => examples/helloworld/greeter_client/main.go} (59%)
create mode 100644 Godeps/_workspace/src/google.golang.org/grpc/examples/helloworld/greeter_server/main.go
create mode 100644 Godeps/_workspace/src/google.golang.org/grpc/examples/helloworld/helloworld/helloworld.pb.go
rename Godeps/_workspace/src/{github.com/golang/protobuf/proto/size2_test.go => google.golang.org/grpc/examples/helloworld/helloworld/helloworld.proto} (64%)
rename Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/{proto => routeguide}/route_guide.pb.go (98%)
rename Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/{proto => routeguide}/route_guide.proto (99%)
delete mode 100644 Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/testdata/ca.pem
delete mode 100644 Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/testdata/route_guide_db.json
delete mode 100644 Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/testdata/server1.key
delete mode 100644 Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/testdata/server1.pem
create mode 100644 Godeps/_workspace/src/google.golang.org/grpc/grpclog/glogger/glogger.go
create mode 100644 Godeps/_workspace/src/google.golang.org/grpc/grpclog/logger.go
create mode 100644 Godeps/_workspace/src/google.golang.org/grpc/health/grpc_health_v1alpha/health.pb.go
create mode 100644 Godeps/_workspace/src/google.golang.org/grpc/health/grpc_health_v1alpha/health.proto
create mode 100644 Godeps/_workspace/src/google.golang.org/grpc/health/health.go
delete mode 100644 Godeps/_workspace/src/google.golang.org/grpc/interop/client/testdata/ca.pem
delete mode 100644 Godeps/_workspace/src/google.golang.org/grpc/interop/client/testdata/server1.key
delete mode 100644 Godeps/_workspace/src/google.golang.org/grpc/interop/client/testdata/server1.pem
delete mode 100644 Godeps/_workspace/src/google.golang.org/grpc/interop/server/testdata/ca.pem
delete mode 100644 Godeps/_workspace/src/google.golang.org/grpc/interop/server/testdata/server1.key
delete mode 100644 Godeps/_workspace/src/google.golang.org/grpc/interop/server/testdata/server1.pem
create mode 100644 Godeps/_workspace/src/google.golang.org/grpc/naming/etcd/etcd.go
rename Godeps/_workspace/src/google.golang.org/grpc/{transport/http_util_test.go => naming/naming.go} (54%)
create mode 100644 Godeps/_workspace/src/google.golang.org/grpc/picker.go
delete mode 100644 Godeps/_workspace/src/google.golang.org/grpc/rpc_util_test.go
delete mode 100644 Godeps/_workspace/src/google.golang.org/grpc/test/end2end_test.go
delete mode 100644 Godeps/_workspace/src/google.golang.org/grpc/test/testdata/ca.pem
delete mode 100644 Godeps/_workspace/src/google.golang.org/grpc/test/testdata/server1.key
delete mode 100644 Godeps/_workspace/src/google.golang.org/grpc/test/testdata/server1.pem
create mode 100644 Godeps/_workspace/src/google.golang.org/grpc/trace.go
delete mode 100644 Godeps/_workspace/src/google.golang.org/grpc/transport/testdata/ca.pem
delete mode 100644 Godeps/_workspace/src/google.golang.org/grpc/transport/testdata/server1.key
delete mode 100644 Godeps/_workspace/src/google.golang.org/grpc/transport/testdata/server1.pem
delete mode 100644 Godeps/_workspace/src/google.golang.org/grpc/transport/transport_test.go
diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json
index 2cbc07222c..d59d55ef7f 100644
--- a/Godeps/Godeps.json
+++ b/Godeps/Godeps.json
@@ -1,6 +1,6 @@
{
"ImportPath": "github.com/docker/notary",
- "GoVersion": "go1.4.2",
+ "GoVersion": "go1.5.1",
"Packages": [
"./..."
],
@@ -28,10 +28,6 @@
"ImportPath": "github.com/agl/ed25519",
"Rev": "d2b94fd789ea21d12fac1a4443dd3a3f79cda72c"
},
- {
- "ImportPath": "github.com/bradfitz/http2",
- "Rev": "97124afb234048ae0c91b8883c59fcd890bf8145"
- },
{
"ImportPath": "github.com/bugsnag/bugsnag-go",
"Comment": "v1.0.4-2-g13fd6b8",
@@ -119,7 +115,7 @@
},
{
"ImportPath": "github.com/golang/protobuf/proto",
- "Rev": "655cdfa588ea190e901bc5590e65d5621688847c"
+ "Rev": "3d2510a4dd961caffa2ae781669c628d82db700a"
},
{
"ImportPath": "github.com/google/gofuzz",
@@ -216,23 +212,23 @@
},
{
"ImportPath": "golang.org/x/net/context",
- "Rev": "1dfe7915deaf3f80b962c163b918868d8a6d8974"
+ "Rev": "47990a1ba55743e6ef1affd3a14e5bac8553615d"
},
{
- "ImportPath": "golang.org/x/oauth2",
- "Rev": "ce5ea7da934b76b1066c527632359e2b8f65db97"
+ "ImportPath": "golang.org/x/net/http2",
+ "Rev": "47990a1ba55743e6ef1affd3a14e5bac8553615d"
},
{
- "ImportPath": "google.golang.org/cloud/compute/metadata",
- "Rev": "f20d6dcccb44ed49de45ae3703312cb46e627db1"
+ "ImportPath": "golang.org/x/net/internal/timeseries",
+ "Rev": "47990a1ba55743e6ef1affd3a14e5bac8553615d"
},
{
- "ImportPath": "google.golang.org/cloud/internal",
- "Rev": "f20d6dcccb44ed49de45ae3703312cb46e627db1"
+ "ImportPath": "golang.org/x/net/trace",
+ "Rev": "47990a1ba55743e6ef1affd3a14e5bac8553615d"
},
{
"ImportPath": "google.golang.org/grpc",
- "Rev": "97f42dd262e97f4632986eddbc74c19fa022ea08"
+ "Rev": "3e7b7e58f491074e9577050058fb95d2351a60b0"
},
{
"ImportPath": "gopkg.in/yaml.v2",
diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/.gitignore b/Godeps/_workspace/src/github.com/bradfitz/http2/.gitignore
deleted file mode 100644
index b25c15b81f..0000000000
--- a/Godeps/_workspace/src/github.com/bradfitz/http2/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-*~
diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/AUTHORS b/Godeps/_workspace/src/github.com/bradfitz/http2/AUTHORS
deleted file mode 100644
index dab9dbe751..0000000000
--- a/Godeps/_workspace/src/github.com/bradfitz/http2/AUTHORS
+++ /dev/null
@@ -1,19 +0,0 @@
-# This file is like Go's AUTHORS file: it lists Copyright holders.
-# The list of humans who have contributd is in the CONTRIBUTORS file.
-#
-# To contribute to this project, because it will eventually be folded
-# back in to Go itself, you need to submit a CLA:
-#
-# http://golang.org/doc/contribute.html#copyright
-#
-# Then you get added to CONTRIBUTORS and you or your company get added
-# to the AUTHORS file.
-
-Blake Mizerany github=bmizerany
-Daniel Morsing github=DanielMorsing
-Gabriel Aszalos github=gbbr
-Google, Inc.
-Keith Rarick github=kr
-Matthew Keenan github=mattkeenan
-Matt Layher github=mdlayher
-Tatsuhiro Tsujikawa github=tatsuhiro-t
diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/CONTRIBUTORS b/Godeps/_workspace/src/github.com/bradfitz/http2/CONTRIBUTORS
deleted file mode 100644
index 22e8c8c544..0000000000
--- a/Godeps/_workspace/src/github.com/bradfitz/http2/CONTRIBUTORS
+++ /dev/null
@@ -1,19 +0,0 @@
-# This file is like Go's CONTRIBUTORS file: it lists humans.
-# The list of copyright holders (which may be companies) are in the AUTHORS file.
-#
-# To contribute to this project, because it will eventually be folded
-# back in to Go itself, you need to submit a CLA:
-#
-# http://golang.org/doc/contribute.html#copyright
-#
-# Then you get added to CONTRIBUTORS and you or your company get added
-# to the AUTHORS file.
-
-Blake Mizerany github=bmizerany
-Brad Fitzpatrick github=bradfitz
-Daniel Morsing github=DanielMorsing
-Gabriel Aszalos github=gbbr
-Keith Rarick github=kr
-Matthew Keenan github=mattkeenan
-Matt Layher github=mdlayher
-Tatsuhiro Tsujikawa github=tatsuhiro-t
diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/HACKING b/Godeps/_workspace/src/github.com/bradfitz/http2/HACKING
deleted file mode 100644
index 69aafe4d17..0000000000
--- a/Godeps/_workspace/src/github.com/bradfitz/http2/HACKING
+++ /dev/null
@@ -1,5 +0,0 @@
-We only accept contributions from users who have gone through Go's
-contribution process (signed a CLA).
-
-Please acknowledge whether you have (and use the same email) if
-sending a pull request.
diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/LICENSE b/Godeps/_workspace/src/github.com/bradfitz/http2/LICENSE
deleted file mode 100644
index 2dc6853cae..0000000000
--- a/Godeps/_workspace/src/github.com/bradfitz/http2/LICENSE
+++ /dev/null
@@ -1,7 +0,0 @@
-Copyright 2014 Google & the Go AUTHORS
-
-Go AUTHORS are:
-See https://code.google.com/p/go/source/browse/AUTHORS
-
-Licensed under the terms of Go itself:
-https://code.google.com/p/go/source/browse/LICENSE
diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/buffer_test.go b/Godeps/_workspace/src/github.com/bradfitz/http2/buffer_test.go
deleted file mode 100644
index 9eb13ff49c..0000000000
--- a/Godeps/_workspace/src/github.com/bradfitz/http2/buffer_test.go
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright 2014 The Go Authors.
-// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
-// Licensed under the same terms as Go itself:
-// https://code.google.com/p/go/source/browse/LICENSE
-
-package http2
-
-import (
- "io"
- "reflect"
- "testing"
-)
-
-var bufferReadTests = []struct {
- buf buffer
- read, wn int
- werr error
- wp []byte
- wbuf buffer
-}{
- {
- buffer{[]byte{'a', 0}, 0, 1, false, nil},
- 5, 1, nil, []byte{'a'},
- buffer{[]byte{'a', 0}, 1, 1, false, nil},
- },
- {
- buffer{[]byte{'a', 0}, 0, 1, true, io.EOF},
- 5, 1, io.EOF, []byte{'a'},
- buffer{[]byte{'a', 0}, 1, 1, true, io.EOF},
- },
- {
- buffer{[]byte{0, 'a'}, 1, 2, false, nil},
- 5, 1, nil, []byte{'a'},
- buffer{[]byte{0, 'a'}, 2, 2, false, nil},
- },
- {
- buffer{[]byte{0, 'a'}, 1, 2, true, io.EOF},
- 5, 1, io.EOF, []byte{'a'},
- buffer{[]byte{0, 'a'}, 2, 2, true, io.EOF},
- },
- {
- buffer{[]byte{}, 0, 0, false, nil},
- 5, 0, errReadEmpty, []byte{},
- buffer{[]byte{}, 0, 0, false, nil},
- },
- {
- buffer{[]byte{}, 0, 0, true, io.EOF},
- 5, 0, io.EOF, []byte{},
- buffer{[]byte{}, 0, 0, true, io.EOF},
- },
-}
-
-func TestBufferRead(t *testing.T) {
- for i, tt := range bufferReadTests {
- read := make([]byte, tt.read)
- n, err := tt.buf.Read(read)
- if n != tt.wn {
- t.Errorf("#%d: wn = %d want %d", i, n, tt.wn)
- continue
- }
- if err != tt.werr {
- t.Errorf("#%d: werr = %v want %v", i, err, tt.werr)
- continue
- }
- read = read[:n]
- if !reflect.DeepEqual(read, tt.wp) {
- t.Errorf("#%d: read = %+v want %+v", i, read, tt.wp)
- }
- if !reflect.DeepEqual(tt.buf, tt.wbuf) {
- t.Errorf("#%d: buf = %+v want %+v", i, tt.buf, tt.wbuf)
- }
- }
-}
diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/errors_test.go b/Godeps/_workspace/src/github.com/bradfitz/http2/errors_test.go
deleted file mode 100644
index 86d52ee658..0000000000
--- a/Godeps/_workspace/src/github.com/bradfitz/http2/errors_test.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
-// Licensed under the same terms as Go itself:
-// https://code.google.com/p/go/source/browse/LICENSE
-
-package http2
-
-import "testing"
-
-func TestErrCodeString(t *testing.T) {
- tests := []struct {
- err ErrCode
- want string
- }{
- {ErrCodeProtocol, "PROTOCOL_ERROR"},
- {0xd, "HTTP_1_1_REQUIRED"},
- {0xf, "unknown error code 0xf"},
- }
- for i, tt := range tests {
- got := tt.err.String()
- if got != tt.want {
- t.Errorf("%d. Error = %q; want %q", i, got, tt.want)
- }
- }
-}
diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/flow_test.go b/Godeps/_workspace/src/github.com/bradfitz/http2/flow_test.go
deleted file mode 100644
index dbb656f8e5..0000000000
--- a/Godeps/_workspace/src/github.com/bradfitz/http2/flow_test.go
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright 2014 The Go Authors.
-// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
-// Licensed under the same terms as Go itself:
-// https://code.google.com/p/go/source/browse/LICENSE
-
-package http2
-
-import "testing"
-
-func TestFlow(t *testing.T) {
- var st flow
- var conn flow
- st.add(3)
- conn.add(2)
-
- if got, want := st.available(), int32(3); got != want {
- t.Errorf("available = %d; want %d", got, want)
- }
- st.setConnFlow(&conn)
- if got, want := st.available(), int32(2); got != want {
- t.Errorf("after parent setup, available = %d; want %d", got, want)
- }
-
- st.take(2)
- if got, want := conn.available(), int32(0); got != want {
- t.Errorf("after taking 2, conn = %d; want %d", got, want)
- }
- if got, want := st.available(), int32(0); got != want {
- t.Errorf("after taking 2, stream = %d; want %d", got, want)
- }
-}
-
-func TestFlowAdd(t *testing.T) {
- var f flow
- if !f.add(1) {
- t.Fatal("failed to add 1")
- }
- if !f.add(-1) {
- t.Fatal("failed to add -1")
- }
- if got, want := f.available(), int32(0); got != want {
- t.Fatalf("size = %d; want %d", got, want)
- }
- if !f.add(1<<31 - 1) {
- t.Fatal("failed to add 2^31-1")
- }
- if got, want := f.available(), int32(1<<31-1); got != want {
- t.Fatalf("size = %d; want %d", got, want)
- }
- if f.add(1) {
- t.Fatal("adding 1 to max shouldn't be allowed")
- }
-
-}
diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/frame_test.go b/Godeps/_workspace/src/github.com/bradfitz/http2/frame_test.go
deleted file mode 100644
index 37f1735a5c..0000000000
--- a/Godeps/_workspace/src/github.com/bradfitz/http2/frame_test.go
+++ /dev/null
@@ -1,578 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http2
-
-import (
- "bytes"
- "reflect"
- "strings"
- "testing"
- "unsafe"
-)
-
-func testFramer() (*Framer, *bytes.Buffer) {
- buf := new(bytes.Buffer)
- return NewFramer(buf, buf), buf
-}
-
-func TestFrameSizes(t *testing.T) {
- // Catch people rearranging the FrameHeader fields.
- if got, want := int(unsafe.Sizeof(FrameHeader{})), 12; got != want {
- t.Errorf("FrameHeader size = %d; want %d", got, want)
- }
-}
-
-func TestWriteRST(t *testing.T) {
- fr, buf := testFramer()
- var streamID uint32 = 1<<24 + 2<<16 + 3<<8 + 4
- var errCode uint32 = 7<<24 + 6<<16 + 5<<8 + 4
- fr.WriteRSTStream(streamID, ErrCode(errCode))
- const wantEnc = "\x00\x00\x04\x03\x00\x01\x02\x03\x04\x07\x06\x05\x04"
- if buf.String() != wantEnc {
- t.Errorf("encoded as %q; want %q", buf.Bytes(), wantEnc)
- }
- f, err := fr.ReadFrame()
- if err != nil {
- t.Fatal(err)
- }
- want := &RSTStreamFrame{
- FrameHeader: FrameHeader{
- valid: true,
- Type: 0x3,
- Flags: 0x0,
- Length: 0x4,
- StreamID: 0x1020304,
- },
- ErrCode: 0x7060504,
- }
- if !reflect.DeepEqual(f, want) {
- t.Errorf("parsed back %#v; want %#v", f, want)
- }
-}
-
-func TestWriteData(t *testing.T) {
- fr, buf := testFramer()
- var streamID uint32 = 1<<24 + 2<<16 + 3<<8 + 4
- data := []byte("ABC")
- fr.WriteData(streamID, true, data)
- const wantEnc = "\x00\x00\x03\x00\x01\x01\x02\x03\x04ABC"
- if buf.String() != wantEnc {
- t.Errorf("encoded as %q; want %q", buf.Bytes(), wantEnc)
- }
- f, err := fr.ReadFrame()
- if err != nil {
- t.Fatal(err)
- }
- df, ok := f.(*DataFrame)
- if !ok {
- t.Fatalf("got %T; want *DataFrame", f)
- }
- if !bytes.Equal(df.Data(), data) {
- t.Errorf("got %q; want %q", df.Data(), data)
- }
- if f.Header().Flags&1 == 0 {
- t.Errorf("didn't see END_STREAM flag")
- }
-}
-
-func TestWriteHeaders(t *testing.T) {
- tests := []struct {
- name string
- p HeadersFrameParam
- wantEnc string
- wantFrame *HeadersFrame
- }{
- {
- "basic",
- HeadersFrameParam{
- StreamID: 42,
- BlockFragment: []byte("abc"),
- Priority: PriorityParam{},
- },
- "\x00\x00\x03\x01\x00\x00\x00\x00*abc",
- &HeadersFrame{
- FrameHeader: FrameHeader{
- valid: true,
- StreamID: 42,
- Type: FrameHeaders,
- Length: uint32(len("abc")),
- },
- Priority: PriorityParam{},
- headerFragBuf: []byte("abc"),
- },
- },
- {
- "basic + end flags",
- HeadersFrameParam{
- StreamID: 42,
- BlockFragment: []byte("abc"),
- EndStream: true,
- EndHeaders: true,
- Priority: PriorityParam{},
- },
- "\x00\x00\x03\x01\x05\x00\x00\x00*abc",
- &HeadersFrame{
- FrameHeader: FrameHeader{
- valid: true,
- StreamID: 42,
- Type: FrameHeaders,
- Flags: FlagHeadersEndStream | FlagHeadersEndHeaders,
- Length: uint32(len("abc")),
- },
- Priority: PriorityParam{},
- headerFragBuf: []byte("abc"),
- },
- },
- {
- "with padding",
- HeadersFrameParam{
- StreamID: 42,
- BlockFragment: []byte("abc"),
- EndStream: true,
- EndHeaders: true,
- PadLength: 5,
- Priority: PriorityParam{},
- },
- "\x00\x00\t\x01\r\x00\x00\x00*\x05abc\x00\x00\x00\x00\x00",
- &HeadersFrame{
- FrameHeader: FrameHeader{
- valid: true,
- StreamID: 42,
- Type: FrameHeaders,
- Flags: FlagHeadersEndStream | FlagHeadersEndHeaders | FlagHeadersPadded,
- Length: uint32(1 + len("abc") + 5), // pad length + contents + padding
- },
- Priority: PriorityParam{},
- headerFragBuf: []byte("abc"),
- },
- },
- {
- "with priority",
- HeadersFrameParam{
- StreamID: 42,
- BlockFragment: []byte("abc"),
- EndStream: true,
- EndHeaders: true,
- PadLength: 2,
- Priority: PriorityParam{
- StreamDep: 15,
- Exclusive: true,
- Weight: 127,
- },
- },
- "\x00\x00\v\x01-\x00\x00\x00*\x02\x80\x00\x00\x0f\u007fabc\x00\x00",
- &HeadersFrame{
- FrameHeader: FrameHeader{
- valid: true,
- StreamID: 42,
- Type: FrameHeaders,
- Flags: FlagHeadersEndStream | FlagHeadersEndHeaders | FlagHeadersPadded | FlagHeadersPriority,
- Length: uint32(1 + 5 + len("abc") + 2), // pad length + priority + contents + padding
- },
- Priority: PriorityParam{
- StreamDep: 15,
- Exclusive: true,
- Weight: 127,
- },
- headerFragBuf: []byte("abc"),
- },
- },
- }
- for _, tt := range tests {
- fr, buf := testFramer()
- if err := fr.WriteHeaders(tt.p); err != nil {
- t.Errorf("test %q: %v", tt.name, err)
- continue
- }
- if buf.String() != tt.wantEnc {
- t.Errorf("test %q: encoded %q; want %q", tt.name, buf.Bytes(), tt.wantEnc)
- }
- f, err := fr.ReadFrame()
- if err != nil {
- t.Errorf("test %q: failed to read the frame back: %v", tt.name, err)
- continue
- }
- if !reflect.DeepEqual(f, tt.wantFrame) {
- t.Errorf("test %q: mismatch.\n got: %#v\nwant: %#v\n", tt.name, f, tt.wantFrame)
- }
- }
-}
-
-func TestWriteContinuation(t *testing.T) {
- const streamID = 42
- tests := []struct {
- name string
- end bool
- frag []byte
-
- wantFrame *ContinuationFrame
- }{
- {
- "not end",
- false,
- []byte("abc"),
- &ContinuationFrame{
- FrameHeader: FrameHeader{
- valid: true,
- StreamID: streamID,
- Type: FrameContinuation,
- Length: uint32(len("abc")),
- },
- headerFragBuf: []byte("abc"),
- },
- },
- {
- "end",
- true,
- []byte("def"),
- &ContinuationFrame{
- FrameHeader: FrameHeader{
- valid: true,
- StreamID: streamID,
- Type: FrameContinuation,
- Flags: FlagContinuationEndHeaders,
- Length: uint32(len("def")),
- },
- headerFragBuf: []byte("def"),
- },
- },
- }
- for _, tt := range tests {
- fr, _ := testFramer()
- if err := fr.WriteContinuation(streamID, tt.end, tt.frag); err != nil {
- t.Errorf("test %q: %v", tt.name, err)
- continue
- }
- f, err := fr.ReadFrame()
- if err != nil {
- t.Errorf("test %q: failed to read the frame back: %v", tt.name, err)
- continue
- }
- if !reflect.DeepEqual(f, tt.wantFrame) {
- t.Errorf("test %q: mismatch.\n got: %#v\nwant: %#v\n", tt.name, f, tt.wantFrame)
- }
- }
-}
-
-func TestWritePriority(t *testing.T) {
- const streamID = 42
- tests := []struct {
- name string
- priority PriorityParam
- wantFrame *PriorityFrame
- }{
- {
- "not exclusive",
- PriorityParam{
- StreamDep: 2,
- Exclusive: false,
- Weight: 127,
- },
- &PriorityFrame{
- FrameHeader{
- valid: true,
- StreamID: streamID,
- Type: FramePriority,
- Length: 5,
- },
- PriorityParam{
- StreamDep: 2,
- Exclusive: false,
- Weight: 127,
- },
- },
- },
-
- {
- "exclusive",
- PriorityParam{
- StreamDep: 3,
- Exclusive: true,
- Weight: 77,
- },
- &PriorityFrame{
- FrameHeader{
- valid: true,
- StreamID: streamID,
- Type: FramePriority,
- Length: 5,
- },
- PriorityParam{
- StreamDep: 3,
- Exclusive: true,
- Weight: 77,
- },
- },
- },
- }
- for _, tt := range tests {
- fr, _ := testFramer()
- if err := fr.WritePriority(streamID, tt.priority); err != nil {
- t.Errorf("test %q: %v", tt.name, err)
- continue
- }
- f, err := fr.ReadFrame()
- if err != nil {
- t.Errorf("test %q: failed to read the frame back: %v", tt.name, err)
- continue
- }
- if !reflect.DeepEqual(f, tt.wantFrame) {
- t.Errorf("test %q: mismatch.\n got: %#v\nwant: %#v\n", tt.name, f, tt.wantFrame)
- }
- }
-}
-
-func TestWriteSettings(t *testing.T) {
- fr, buf := testFramer()
- settings := []Setting{{1, 2}, {3, 4}}
- fr.WriteSettings(settings...)
- const wantEnc = "\x00\x00\f\x04\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x02\x00\x03\x00\x00\x00\x04"
- if buf.String() != wantEnc {
- t.Errorf("encoded as %q; want %q", buf.Bytes(), wantEnc)
- }
- f, err := fr.ReadFrame()
- if err != nil {
- t.Fatal(err)
- }
- sf, ok := f.(*SettingsFrame)
- if !ok {
- t.Fatalf("Got a %T; want a SettingsFrame", f)
- }
- var got []Setting
- sf.ForeachSetting(func(s Setting) error {
- got = append(got, s)
- valBack, ok := sf.Value(s.ID)
- if !ok || valBack != s.Val {
- t.Errorf("Value(%d) = %v, %v; want %v, true", s.ID, valBack, ok, s.Val)
- }
- return nil
- })
- if !reflect.DeepEqual(settings, got) {
- t.Errorf("Read settings %+v != written settings %+v", got, settings)
- }
-}
-
-func TestWriteSettingsAck(t *testing.T) {
- fr, buf := testFramer()
- fr.WriteSettingsAck()
- const wantEnc = "\x00\x00\x00\x04\x01\x00\x00\x00\x00"
- if buf.String() != wantEnc {
- t.Errorf("encoded as %q; want %q", buf.Bytes(), wantEnc)
- }
-}
-
-func TestWriteWindowUpdate(t *testing.T) {
- fr, buf := testFramer()
- const streamID = 1<<24 + 2<<16 + 3<<8 + 4
- const incr = 7<<24 + 6<<16 + 5<<8 + 4
- if err := fr.WriteWindowUpdate(streamID, incr); err != nil {
- t.Fatal(err)
- }
- const wantEnc = "\x00\x00\x04\x08\x00\x01\x02\x03\x04\x07\x06\x05\x04"
- if buf.String() != wantEnc {
- t.Errorf("encoded as %q; want %q", buf.Bytes(), wantEnc)
- }
- f, err := fr.ReadFrame()
- if err != nil {
- t.Fatal(err)
- }
- want := &WindowUpdateFrame{
- FrameHeader: FrameHeader{
- valid: true,
- Type: 0x8,
- Flags: 0x0,
- Length: 0x4,
- StreamID: 0x1020304,
- },
- Increment: 0x7060504,
- }
- if !reflect.DeepEqual(f, want) {
- t.Errorf("parsed back %#v; want %#v", f, want)
- }
-}
-
-func TestWritePing(t *testing.T) { testWritePing(t, false) }
-func TestWritePingAck(t *testing.T) { testWritePing(t, true) }
-
-func testWritePing(t *testing.T, ack bool) {
- fr, buf := testFramer()
- if err := fr.WritePing(ack, [8]byte{1, 2, 3, 4, 5, 6, 7, 8}); err != nil {
- t.Fatal(err)
- }
- var wantFlags Flags
- if ack {
- wantFlags = FlagPingAck
- }
- var wantEnc = "\x00\x00\x08\x06" + string(wantFlags) + "\x00\x00\x00\x00" + "\x01\x02\x03\x04\x05\x06\x07\x08"
- if buf.String() != wantEnc {
- t.Errorf("encoded as %q; want %q", buf.Bytes(), wantEnc)
- }
-
- f, err := fr.ReadFrame()
- if err != nil {
- t.Fatal(err)
- }
- want := &PingFrame{
- FrameHeader: FrameHeader{
- valid: true,
- Type: 0x6,
- Flags: wantFlags,
- Length: 0x8,
- StreamID: 0,
- },
- Data: [8]byte{1, 2, 3, 4, 5, 6, 7, 8},
- }
- if !reflect.DeepEqual(f, want) {
- t.Errorf("parsed back %#v; want %#v", f, want)
- }
-}
-
-func TestReadFrameHeader(t *testing.T) {
- tests := []struct {
- in string
- want FrameHeader
- }{
- {in: "\x00\x00\x00" + "\x00" + "\x00" + "\x00\x00\x00\x00", want: FrameHeader{}},
- {in: "\x01\x02\x03" + "\x04" + "\x05" + "\x06\x07\x08\x09", want: FrameHeader{
- Length: 66051, Type: 4, Flags: 5, StreamID: 101124105,
- }},
- // Ignore high bit:
- {in: "\xff\xff\xff" + "\xff" + "\xff" + "\xff\xff\xff\xff", want: FrameHeader{
- Length: 16777215, Type: 255, Flags: 255, StreamID: 2147483647}},
- {in: "\xff\xff\xff" + "\xff" + "\xff" + "\x7f\xff\xff\xff", want: FrameHeader{
- Length: 16777215, Type: 255, Flags: 255, StreamID: 2147483647}},
- }
- for i, tt := range tests {
- got, err := readFrameHeader(make([]byte, 9), strings.NewReader(tt.in))
- if err != nil {
- t.Errorf("%d. readFrameHeader(%q) = %v", i, tt.in, err)
- continue
- }
- tt.want.valid = true
- if got != tt.want {
- t.Errorf("%d. readFrameHeader(%q) = %+v; want %+v", i, tt.in, got, tt.want)
- }
- }
-}
-
-func TestReadWriteFrameHeader(t *testing.T) {
- tests := []struct {
- len uint32
- typ FrameType
- flags Flags
- streamID uint32
- }{
- {len: 0, typ: 255, flags: 1, streamID: 0},
- {len: 0, typ: 255, flags: 1, streamID: 1},
- {len: 0, typ: 255, flags: 1, streamID: 255},
- {len: 0, typ: 255, flags: 1, streamID: 256},
- {len: 0, typ: 255, flags: 1, streamID: 65535},
- {len: 0, typ: 255, flags: 1, streamID: 65536},
-
- {len: 0, typ: 1, flags: 255, streamID: 1},
- {len: 255, typ: 1, flags: 255, streamID: 1},
- {len: 256, typ: 1, flags: 255, streamID: 1},
- {len: 65535, typ: 1, flags: 255, streamID: 1},
- {len: 65536, typ: 1, flags: 255, streamID: 1},
- {len: 16777215, typ: 1, flags: 255, streamID: 1},
- }
- for _, tt := range tests {
- fr, buf := testFramer()
- fr.startWrite(tt.typ, tt.flags, tt.streamID)
- fr.writeBytes(make([]byte, tt.len))
- fr.endWrite()
- fh, err := ReadFrameHeader(buf)
- if err != nil {
- t.Errorf("ReadFrameHeader(%+v) = %v", tt, err)
- continue
- }
- if fh.Type != tt.typ || fh.Flags != tt.flags || fh.Length != tt.len || fh.StreamID != tt.streamID {
- t.Errorf("ReadFrameHeader(%+v) = %+v; mismatch", tt, fh)
- }
- }
-
-}
-
-func TestWriteTooLargeFrame(t *testing.T) {
- fr, _ := testFramer()
- fr.startWrite(0, 1, 1)
- fr.writeBytes(make([]byte, 1<<24))
- err := fr.endWrite()
- if err != ErrFrameTooLarge {
- t.Errorf("endWrite = %v; want errFrameTooLarge", err)
- }
-}
-
-func TestWriteGoAway(t *testing.T) {
- const debug = "foo"
- fr, buf := testFramer()
- if err := fr.WriteGoAway(0x01020304, 0x05060708, []byte(debug)); err != nil {
- t.Fatal(err)
- }
- const wantEnc = "\x00\x00\v\a\x00\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x07\x08" + debug
- if buf.String() != wantEnc {
- t.Errorf("encoded as %q; want %q", buf.Bytes(), wantEnc)
- }
- f, err := fr.ReadFrame()
- if err != nil {
- t.Fatal(err)
- }
- want := &GoAwayFrame{
- FrameHeader: FrameHeader{
- valid: true,
- Type: 0x7,
- Flags: 0,
- Length: uint32(4 + 4 + len(debug)),
- StreamID: 0,
- },
- LastStreamID: 0x01020304,
- ErrCode: 0x05060708,
- debugData: []byte(debug),
- }
- if !reflect.DeepEqual(f, want) {
- t.Fatalf("parsed back:\n%#v\nwant:\n%#v", f, want)
- }
- if got := string(f.(*GoAwayFrame).DebugData()); got != debug {
- t.Errorf("debug data = %q; want %q", got, debug)
- }
-}
-
-func TestWritePushPromise(t *testing.T) {
- pp := PushPromiseParam{
- StreamID: 42,
- PromiseID: 42,
- BlockFragment: []byte("abc"),
- }
- fr, buf := testFramer()
- if err := fr.WritePushPromise(pp); err != nil {
- t.Fatal(err)
- }
- const wantEnc = "\x00\x00\x07\x05\x00\x00\x00\x00*\x00\x00\x00*abc"
- if buf.String() != wantEnc {
- t.Errorf("encoded as %q; want %q", buf.Bytes(), wantEnc)
- }
- f, err := fr.ReadFrame()
- if err != nil {
- t.Fatal(err)
- }
- _, ok := f.(*PushPromiseFrame)
- if !ok {
- t.Fatalf("got %T; want *PushPromiseFrame", f)
- }
- want := &PushPromiseFrame{
- FrameHeader: FrameHeader{
- valid: true,
- Type: 0x5,
- Flags: 0x0,
- Length: 0x7,
- StreamID: 42,
- },
- PromiseID: 42,
- headerFragBuf: []byte("abc"),
- }
- if !reflect.DeepEqual(f, want) {
- t.Fatalf("parsed back:\n%#v\nwant:\n%#v", f, want)
- }
-}
diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/gotrack_test.go b/Godeps/_workspace/src/github.com/bradfitz/http2/gotrack_test.go
deleted file mode 100644
index 3ff4957e97..0000000000
--- a/Godeps/_workspace/src/github.com/bradfitz/http2/gotrack_test.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
-// Licensed under the same terms as Go itself:
-// https://code.google.com/p/go/source/browse/LICENSE
-
-package http2
-
-import (
- "fmt"
- "strings"
- "testing"
-)
-
-func TestGoroutineLock(t *testing.T) {
- DebugGoroutines = true
- g := newGoroutineLock()
- g.check()
-
- sawPanic := make(chan interface{})
- go func() {
- defer func() { sawPanic <- recover() }()
- g.check() // should panic
- }()
- e := <-sawPanic
- if e == nil {
- t.Fatal("did not see panic from check in other goroutine")
- }
- if !strings.Contains(fmt.Sprint(e), "wrong goroutine") {
- t.Errorf("expected on see panic about running on the wrong goroutine; got %v", e)
- }
-}
diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/hpack/encode_test.go b/Godeps/_workspace/src/github.com/bradfitz/http2/hpack/encode_test.go
deleted file mode 100644
index dce66c90f5..0000000000
--- a/Godeps/_workspace/src/github.com/bradfitz/http2/hpack/encode_test.go
+++ /dev/null
@@ -1,331 +0,0 @@
-// Copyright 2014 The Go Authors.
-// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
-// Licensed under the same terms as Go itself:
-// https://code.google.com/p/go/source/browse/LICENSE
-
-package hpack
-
-import (
- "bytes"
- "encoding/hex"
- "reflect"
- "strings"
- "testing"
-)
-
-func TestEncoderTableSizeUpdate(t *testing.T) {
- tests := []struct {
- size1, size2 uint32
- wantHex string
- }{
- // Should emit 2 table size updates (2048 and 4096)
- {2048, 4096, "3fe10f 3fe11f 82"},
-
- // Should emit 1 table size update (2048)
- {16384, 2048, "3fe10f 82"},
- }
- for _, tt := range tests {
- var buf bytes.Buffer
- e := NewEncoder(&buf)
- e.SetMaxDynamicTableSize(tt.size1)
- e.SetMaxDynamicTableSize(tt.size2)
- if err := e.WriteField(pair(":method", "GET")); err != nil {
- t.Fatal(err)
- }
- want := removeSpace(tt.wantHex)
- if got := hex.EncodeToString(buf.Bytes()); got != want {
- t.Errorf("e.SetDynamicTableSize %v, %v = %q; want %q", tt.size1, tt.size2, got, want)
- }
- }
-}
-
-func TestEncoderWriteField(t *testing.T) {
- var buf bytes.Buffer
- e := NewEncoder(&buf)
- var got []HeaderField
- d := NewDecoder(4<<10, func(f HeaderField) {
- got = append(got, f)
- })
-
- tests := []struct {
- hdrs []HeaderField
- }{
- {[]HeaderField{
- pair(":method", "GET"),
- pair(":scheme", "http"),
- pair(":path", "/"),
- pair(":authority", "www.example.com"),
- }},
- {[]HeaderField{
- pair(":method", "GET"),
- pair(":scheme", "http"),
- pair(":path", "/"),
- pair(":authority", "www.example.com"),
- pair("cache-control", "no-cache"),
- }},
- {[]HeaderField{
- pair(":method", "GET"),
- pair(":scheme", "https"),
- pair(":path", "/index.html"),
- pair(":authority", "www.example.com"),
- pair("custom-key", "custom-value"),
- }},
- }
- for i, tt := range tests {
- buf.Reset()
- got = got[:0]
- for _, hf := range tt.hdrs {
- if err := e.WriteField(hf); err != nil {
- t.Fatal(err)
- }
- }
- _, err := d.Write(buf.Bytes())
- if err != nil {
- t.Errorf("%d. Decoder Write = %v", i, err)
- }
- if !reflect.DeepEqual(got, tt.hdrs) {
- t.Errorf("%d. Decoded %+v; want %+v", i, got, tt.hdrs)
- }
- }
-}
-
-func TestEncoderSearchTable(t *testing.T) {
- e := NewEncoder(nil)
-
- e.dynTab.add(pair("foo", "bar"))
- e.dynTab.add(pair("blake", "miz"))
- e.dynTab.add(pair(":method", "GET"))
-
- tests := []struct {
- hf HeaderField
- wantI uint64
- wantMatch bool
- }{
- // Name and Value match
- {pair("foo", "bar"), uint64(len(staticTable) + 3), true},
- {pair("blake", "miz"), uint64(len(staticTable) + 2), true},
- {pair(":method", "GET"), 2, true},
-
- // Only name match because Sensitive == true
- {HeaderField{":method", "GET", true}, 2, false},
-
- // Only Name matches
- {pair("foo", "..."), uint64(len(staticTable) + 3), false},
- {pair("blake", "..."), uint64(len(staticTable) + 2), false},
- {pair(":method", "..."), 2, false},
-
- // None match
- {pair("foo-", "bar"), 0, false},
- }
- for _, tt := range tests {
- if gotI, gotMatch := e.searchTable(tt.hf); gotI != tt.wantI || gotMatch != tt.wantMatch {
- t.Errorf("d.search(%+v) = %v, %v; want %v, %v", tt.hf, gotI, gotMatch, tt.wantI, tt.wantMatch)
- }
- }
-}
-
-func TestAppendVarInt(t *testing.T) {
- tests := []struct {
- n byte
- i uint64
- want []byte
- }{
- // Fits in a byte:
- {1, 0, []byte{0}},
- {2, 2, []byte{2}},
- {3, 6, []byte{6}},
- {4, 14, []byte{14}},
- {5, 30, []byte{30}},
- {6, 62, []byte{62}},
- {7, 126, []byte{126}},
- {8, 254, []byte{254}},
-
- // Multiple bytes:
- {5, 1337, []byte{31, 154, 10}},
- }
- for _, tt := range tests {
- got := appendVarInt(nil, tt.n, tt.i)
- if !bytes.Equal(got, tt.want) {
- t.Errorf("appendVarInt(nil, %v, %v) = %v; want %v", tt.n, tt.i, got, tt.want)
- }
- }
-}
-
-func TestAppendHpackString(t *testing.T) {
- tests := []struct {
- s, wantHex string
- }{
- // Huffman encoded
- {"www.example.com", "8c f1e3 c2e5 f23a 6ba0 ab90 f4ff"},
-
- // Not Huffman encoded
- {"a", "01 61"},
-
- // zero length
- {"", "00"},
- }
- for _, tt := range tests {
- want := removeSpace(tt.wantHex)
- buf := appendHpackString(nil, tt.s)
- if got := hex.EncodeToString(buf); want != got {
- t.Errorf("appendHpackString(nil, %q) = %q; want %q", tt.s, got, want)
- }
- }
-}
-
-func TestAppendIndexed(t *testing.T) {
- tests := []struct {
- i uint64
- wantHex string
- }{
- // 1 byte
- {1, "81"},
- {126, "fe"},
-
- // 2 bytes
- {127, "ff00"},
- {128, "ff01"},
- }
- for _, tt := range tests {
- want := removeSpace(tt.wantHex)
- buf := appendIndexed(nil, tt.i)
- if got := hex.EncodeToString(buf); want != got {
- t.Errorf("appendIndex(nil, %v) = %q; want %q", tt.i, got, want)
- }
- }
-}
-
-func TestAppendNewName(t *testing.T) {
- tests := []struct {
- f HeaderField
- indexing bool
- wantHex string
- }{
- // Incremental indexing
- {HeaderField{"custom-key", "custom-value", false}, true, "40 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf"},
-
- // Without indexing
- {HeaderField{"custom-key", "custom-value", false}, false, "00 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf"},
-
- // Never indexed
- {HeaderField{"custom-key", "custom-value", true}, true, "10 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf"},
- {HeaderField{"custom-key", "custom-value", true}, false, "10 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf"},
- }
- for _, tt := range tests {
- want := removeSpace(tt.wantHex)
- buf := appendNewName(nil, tt.f, tt.indexing)
- if got := hex.EncodeToString(buf); want != got {
- t.Errorf("appendNewName(nil, %+v, %v) = %q; want %q", tt.f, tt.indexing, got, want)
- }
- }
-}
-
-func TestAppendIndexedName(t *testing.T) {
- tests := []struct {
- f HeaderField
- i uint64
- indexing bool
- wantHex string
- }{
- // Incremental indexing
- {HeaderField{":status", "302", false}, 8, true, "48 82 6402"},
-
- // Without indexing
- {HeaderField{":status", "302", false}, 8, false, "08 82 6402"},
-
- // Never indexed
- {HeaderField{":status", "302", true}, 8, true, "18 82 6402"},
- {HeaderField{":status", "302", true}, 8, false, "18 82 6402"},
- }
- for _, tt := range tests {
- want := removeSpace(tt.wantHex)
- buf := appendIndexedName(nil, tt.f, tt.i, tt.indexing)
- if got := hex.EncodeToString(buf); want != got {
- t.Errorf("appendIndexedName(nil, %+v, %v) = %q; want %q", tt.f, tt.indexing, got, want)
- }
- }
-}
-
-func TestAppendTableSize(t *testing.T) {
- tests := []struct {
- i uint32
- wantHex string
- }{
- // Fits into 1 byte
- {30, "3e"},
-
- // Extra byte
- {31, "3f00"},
- {32, "3f01"},
- }
- for _, tt := range tests {
- want := removeSpace(tt.wantHex)
- buf := appendTableSize(nil, tt.i)
- if got := hex.EncodeToString(buf); want != got {
- t.Errorf("appendTableSize(nil, %v) = %q; want %q", tt.i, got, want)
- }
- }
-}
-
-func TestEncoderSetMaxDynamicTableSize(t *testing.T) {
- var buf bytes.Buffer
- e := NewEncoder(&buf)
- tests := []struct {
- v uint32
- wantUpdate bool
- wantMinSize uint32
- wantMaxSize uint32
- }{
- // Set new table size to 2048
- {2048, true, 2048, 2048},
-
- // Set new table size to 16384, but still limited to
- // 4096
- {16384, true, 2048, 4096},
- }
- for _, tt := range tests {
- e.SetMaxDynamicTableSize(tt.v)
- if got := e.tableSizeUpdate; tt.wantUpdate != got {
- t.Errorf("e.tableSizeUpdate = %v; want %v", got, tt.wantUpdate)
- }
- if got := e.minSize; tt.wantMinSize != got {
- t.Errorf("e.minSize = %v; want %v", got, tt.wantMinSize)
- }
- if got := e.dynTab.maxSize; tt.wantMaxSize != got {
- t.Errorf("e.maxSize = %v; want %v", got, tt.wantMaxSize)
- }
- }
-}
-
-func TestEncoderSetMaxDynamicTableSizeLimit(t *testing.T) {
- e := NewEncoder(nil)
- // 4095 < initialHeaderTableSize means maxSize is truncated to
- // 4095.
- e.SetMaxDynamicTableSizeLimit(4095)
- if got, want := e.dynTab.maxSize, uint32(4095); got != want {
- t.Errorf("e.dynTab.maxSize = %v; want %v", got, want)
- }
- if got, want := e.maxSizeLimit, uint32(4095); got != want {
- t.Errorf("e.maxSizeLimit = %v; want %v", got, want)
- }
- if got, want := e.tableSizeUpdate, true; got != want {
- t.Errorf("e.tableSizeUpdate = %v; want %v", got, want)
- }
- // maxSize will be truncated to maxSizeLimit
- e.SetMaxDynamicTableSize(16384)
- if got, want := e.dynTab.maxSize, uint32(4095); got != want {
- t.Errorf("e.dynTab.maxSize = %v; want %v", got, want)
- }
- // 8192 > current maxSizeLimit, so maxSize does not change.
- e.SetMaxDynamicTableSizeLimit(8192)
- if got, want := e.dynTab.maxSize, uint32(4095); got != want {
- t.Errorf("e.dynTab.maxSize = %v; want %v", got, want)
- }
- if got, want := e.maxSizeLimit, uint32(8192); got != want {
- t.Errorf("e.maxSizeLimit = %v; want %v", got, want)
- }
-}
-
-func removeSpace(s string) string {
- return strings.Replace(s, " ", "", -1)
-}
diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/hpack/hpack_test.go b/Godeps/_workspace/src/github.com/bradfitz/http2/hpack/hpack_test.go
deleted file mode 100644
index 08bee414f3..0000000000
--- a/Godeps/_workspace/src/github.com/bradfitz/http2/hpack/hpack_test.go
+++ /dev/null
@@ -1,648 +0,0 @@
-// Copyright 2014 The Go Authors.
-// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
-// Licensed under the same terms as Go itself:
-// https://code.google.com/p/go/source/browse/LICENSE
-
-package hpack
-
-import (
- "bufio"
- "bytes"
- "encoding/hex"
- "fmt"
- "reflect"
- "regexp"
- "strconv"
- "strings"
- "testing"
-)
-
-func TestStaticTable(t *testing.T) {
- fromSpec := `
- +-------+-----------------------------+---------------+
- | 1 | :authority | |
- | 2 | :method | GET |
- | 3 | :method | POST |
- | 4 | :path | / |
- | 5 | :path | /index.html |
- | 6 | :scheme | http |
- | 7 | :scheme | https |
- | 8 | :status | 200 |
- | 9 | :status | 204 |
- | 10 | :status | 206 |
- | 11 | :status | 304 |
- | 12 | :status | 400 |
- | 13 | :status | 404 |
- | 14 | :status | 500 |
- | 15 | accept-charset | |
- | 16 | accept-encoding | gzip, deflate |
- | 17 | accept-language | |
- | 18 | accept-ranges | |
- | 19 | accept | |
- | 20 | access-control-allow-origin | |
- | 21 | age | |
- | 22 | allow | |
- | 23 | authorization | |
- | 24 | cache-control | |
- | 25 | content-disposition | |
- | 26 | content-encoding | |
- | 27 | content-language | |
- | 28 | content-length | |
- | 29 | content-location | |
- | 30 | content-range | |
- | 31 | content-type | |
- | 32 | cookie | |
- | 33 | date | |
- | 34 | etag | |
- | 35 | expect | |
- | 36 | expires | |
- | 37 | from | |
- | 38 | host | |
- | 39 | if-match | |
- | 40 | if-modified-since | |
- | 41 | if-none-match | |
- | 42 | if-range | |
- | 43 | if-unmodified-since | |
- | 44 | last-modified | |
- | 45 | link | |
- | 46 | location | |
- | 47 | max-forwards | |
- | 48 | proxy-authenticate | |
- | 49 | proxy-authorization | |
- | 50 | range | |
- | 51 | referer | |
- | 52 | refresh | |
- | 53 | retry-after | |
- | 54 | server | |
- | 55 | set-cookie | |
- | 56 | strict-transport-security | |
- | 57 | transfer-encoding | |
- | 58 | user-agent | |
- | 59 | vary | |
- | 60 | via | |
- | 61 | www-authenticate | |
- +-------+-----------------------------+---------------+
-`
- bs := bufio.NewScanner(strings.NewReader(fromSpec))
- re := regexp.MustCompile(`\| (\d+)\s+\| (\S+)\s*\| (\S(.*\S)?)?\s+\|`)
- for bs.Scan() {
- l := bs.Text()
- if !strings.Contains(l, "|") {
- continue
- }
- m := re.FindStringSubmatch(l)
- if m == nil {
- continue
- }
- i, err := strconv.Atoi(m[1])
- if err != nil {
- t.Errorf("Bogus integer on line %q", l)
- continue
- }
- if i < 1 || i > len(staticTable) {
- t.Errorf("Bogus index %d on line %q", i, l)
- continue
- }
- if got, want := staticTable[i-1].Name, m[2]; got != want {
- t.Errorf("header index %d name = %q; want %q", i, got, want)
- }
- if got, want := staticTable[i-1].Value, m[3]; got != want {
- t.Errorf("header index %d value = %q; want %q", i, got, want)
- }
- }
- if err := bs.Err(); err != nil {
- t.Error(err)
- }
-}
-
-func (d *Decoder) mustAt(idx int) HeaderField {
- if hf, ok := d.at(uint64(idx)); !ok {
- panic(fmt.Sprintf("bogus index %d", idx))
- } else {
- return hf
- }
-}
-
-func TestDynamicTableAt(t *testing.T) {
- d := NewDecoder(4096, nil)
- at := d.mustAt
- if got, want := at(2), (pair(":method", "GET")); got != want {
- t.Errorf("at(2) = %v; want %v", got, want)
- }
- d.dynTab.add(pair("foo", "bar"))
- d.dynTab.add(pair("blake", "miz"))
- if got, want := at(len(staticTable)+1), (pair("blake", "miz")); got != want {
- t.Errorf("at(dyn 1) = %v; want %v", got, want)
- }
- if got, want := at(len(staticTable)+2), (pair("foo", "bar")); got != want {
- t.Errorf("at(dyn 2) = %v; want %v", got, want)
- }
- if got, want := at(3), (pair(":method", "POST")); got != want {
- t.Errorf("at(3) = %v; want %v", got, want)
- }
-}
-
-func TestDynamicTableSearch(t *testing.T) {
- dt := dynamicTable{}
- dt.setMaxSize(4096)
-
- dt.add(pair("foo", "bar"))
- dt.add(pair("blake", "miz"))
- dt.add(pair(":method", "GET"))
-
- tests := []struct {
- hf HeaderField
- wantI uint64
- wantMatch bool
- }{
- // Name and Value match
- {pair("foo", "bar"), 3, true},
- {pair(":method", "GET"), 1, true},
-
- // Only name match because of Sensitive == true
- {HeaderField{"blake", "miz", true}, 2, false},
-
- // Only Name matches
- {pair("foo", "..."), 3, false},
- {pair("blake", "..."), 2, false},
- {pair(":method", "..."), 1, false},
-
- // None match
- {pair("foo-", "bar"), 0, false},
- }
- for _, tt := range tests {
- if gotI, gotMatch := dt.search(tt.hf); gotI != tt.wantI || gotMatch != tt.wantMatch {
- t.Errorf("d.search(%+v) = %v, %v; want %v, %v", tt.hf, gotI, gotMatch, tt.wantI, tt.wantMatch)
- }
- }
-}
-
-func TestDynamicTableSizeEvict(t *testing.T) {
- d := NewDecoder(4096, nil)
- if want := uint32(0); d.dynTab.size != want {
- t.Fatalf("size = %d; want %d", d.dynTab.size, want)
- }
- add := d.dynTab.add
- add(pair("blake", "eats pizza"))
- if want := uint32(15 + 32); d.dynTab.size != want {
- t.Fatalf("after pizza, size = %d; want %d", d.dynTab.size, want)
- }
- add(pair("foo", "bar"))
- if want := uint32(15 + 32 + 6 + 32); d.dynTab.size != want {
- t.Fatalf("after foo bar, size = %d; want %d", d.dynTab.size, want)
- }
- d.dynTab.setMaxSize(15 + 32 + 1 /* slop */)
- if want := uint32(6 + 32); d.dynTab.size != want {
- t.Fatalf("after setMaxSize, size = %d; want %d", d.dynTab.size, want)
- }
- if got, want := d.mustAt(len(staticTable)+1), (pair("foo", "bar")); got != want {
- t.Errorf("at(dyn 1) = %v; want %v", got, want)
- }
- add(pair("long", strings.Repeat("x", 500)))
- if want := uint32(0); d.dynTab.size != want {
- t.Fatalf("after big one, size = %d; want %d", d.dynTab.size, want)
- }
-}
-
-func TestDecoderDecode(t *testing.T) {
- tests := []struct {
- name string
- in []byte
- want []HeaderField
- wantDynTab []HeaderField // newest entry first
- }{
- // C.2.1 Literal Header Field with Indexing
- // http://http2.github.io/http2-spec/compression.html#rfc.section.C.2.1
- {"C.2.1", dehex("400a 6375 7374 6f6d 2d6b 6579 0d63 7573 746f 6d2d 6865 6164 6572"),
- []HeaderField{pair("custom-key", "custom-header")},
- []HeaderField{pair("custom-key", "custom-header")},
- },
-
- // C.2.2 Literal Header Field without Indexing
- // http://http2.github.io/http2-spec/compression.html#rfc.section.C.2.2
- {"C.2.2", dehex("040c 2f73 616d 706c 652f 7061 7468"),
- []HeaderField{pair(":path", "/sample/path")},
- []HeaderField{}},
-
- // C.2.3 Literal Header Field never Indexed
- // http://http2.github.io/http2-spec/compression.html#rfc.section.C.2.3
- {"C.2.3", dehex("1008 7061 7373 776f 7264 0673 6563 7265 74"),
- []HeaderField{{"password", "secret", true}},
- []HeaderField{}},
-
- // C.2.4 Indexed Header Field
- // http://http2.github.io/http2-spec/compression.html#rfc.section.C.2.4
- {"C.2.4", []byte("\x82"),
- []HeaderField{pair(":method", "GET")},
- []HeaderField{}},
- }
- for _, tt := range tests {
- d := NewDecoder(4096, nil)
- hf, err := d.DecodeFull(tt.in)
- if err != nil {
- t.Errorf("%s: %v", tt.name, err)
- continue
- }
- if !reflect.DeepEqual(hf, tt.want) {
- t.Errorf("%s: Got %v; want %v", tt.name, hf, tt.want)
- }
- gotDynTab := d.dynTab.reverseCopy()
- if !reflect.DeepEqual(gotDynTab, tt.wantDynTab) {
- t.Errorf("%s: dynamic table after = %v; want %v", tt.name, gotDynTab, tt.wantDynTab)
- }
- }
-}
-
-func (dt *dynamicTable) reverseCopy() (hf []HeaderField) {
- hf = make([]HeaderField, len(dt.ents))
- for i := range hf {
- hf[i] = dt.ents[len(dt.ents)-1-i]
- }
- return
-}
-
-type encAndWant struct {
- enc []byte
- want []HeaderField
- wantDynTab []HeaderField
- wantDynSize uint32
-}
-
-// C.3 Request Examples without Huffman Coding
-// http://http2.github.io/http2-spec/compression.html#rfc.section.C.3
-func TestDecodeC3_NoHuffman(t *testing.T) {
- testDecodeSeries(t, 4096, []encAndWant{
- {dehex("8286 8441 0f77 7777 2e65 7861 6d70 6c65 2e63 6f6d"),
- []HeaderField{
- pair(":method", "GET"),
- pair(":scheme", "http"),
- pair(":path", "/"),
- pair(":authority", "www.example.com"),
- },
- []HeaderField{
- pair(":authority", "www.example.com"),
- },
- 57,
- },
- {dehex("8286 84be 5808 6e6f 2d63 6163 6865"),
- []HeaderField{
- pair(":method", "GET"),
- pair(":scheme", "http"),
- pair(":path", "/"),
- pair(":authority", "www.example.com"),
- pair("cache-control", "no-cache"),
- },
- []HeaderField{
- pair("cache-control", "no-cache"),
- pair(":authority", "www.example.com"),
- },
- 110,
- },
- {dehex("8287 85bf 400a 6375 7374 6f6d 2d6b 6579 0c63 7573 746f 6d2d 7661 6c75 65"),
- []HeaderField{
- pair(":method", "GET"),
- pair(":scheme", "https"),
- pair(":path", "/index.html"),
- pair(":authority", "www.example.com"),
- pair("custom-key", "custom-value"),
- },
- []HeaderField{
- pair("custom-key", "custom-value"),
- pair("cache-control", "no-cache"),
- pair(":authority", "www.example.com"),
- },
- 164,
- },
- })
-}
-
-// C.4 Request Examples with Huffman Coding
-// http://http2.github.io/http2-spec/compression.html#rfc.section.C.4
-func TestDecodeC4_Huffman(t *testing.T) {
- testDecodeSeries(t, 4096, []encAndWant{
- {dehex("8286 8441 8cf1 e3c2 e5f2 3a6b a0ab 90f4 ff"),
- []HeaderField{
- pair(":method", "GET"),
- pair(":scheme", "http"),
- pair(":path", "/"),
- pair(":authority", "www.example.com"),
- },
- []HeaderField{
- pair(":authority", "www.example.com"),
- },
- 57,
- },
- {dehex("8286 84be 5886 a8eb 1064 9cbf"),
- []HeaderField{
- pair(":method", "GET"),
- pair(":scheme", "http"),
- pair(":path", "/"),
- pair(":authority", "www.example.com"),
- pair("cache-control", "no-cache"),
- },
- []HeaderField{
- pair("cache-control", "no-cache"),
- pair(":authority", "www.example.com"),
- },
- 110,
- },
- {dehex("8287 85bf 4088 25a8 49e9 5ba9 7d7f 8925 a849 e95b b8e8 b4bf"),
- []HeaderField{
- pair(":method", "GET"),
- pair(":scheme", "https"),
- pair(":path", "/index.html"),
- pair(":authority", "www.example.com"),
- pair("custom-key", "custom-value"),
- },
- []HeaderField{
- pair("custom-key", "custom-value"),
- pair("cache-control", "no-cache"),
- pair(":authority", "www.example.com"),
- },
- 164,
- },
- })
-}
-
-// http://http2.github.io/http2-spec/compression.html#rfc.section.C.5
-// "This section shows several consecutive header lists, corresponding
-// to HTTP responses, on the same connection. The HTTP/2 setting
-// parameter SETTINGS_HEADER_TABLE_SIZE is set to the value of 256
-// octets, causing some evictions to occur."
-func TestDecodeC5_ResponsesNoHuff(t *testing.T) {
- testDecodeSeries(t, 256, []encAndWant{
- {dehex(`
-4803 3330 3258 0770 7269 7661 7465 611d
-4d6f 6e2c 2032 3120 4f63 7420 3230 3133
-2032 303a 3133 3a32 3120 474d 546e 1768
-7474 7073 3a2f 2f77 7777 2e65 7861 6d70
-6c65 2e63 6f6d
-`),
- []HeaderField{
- pair(":status", "302"),
- pair("cache-control", "private"),
- pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
- pair("location", "https://www.example.com"),
- },
- []HeaderField{
- pair("location", "https://www.example.com"),
- pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
- pair("cache-control", "private"),
- pair(":status", "302"),
- },
- 222,
- },
- {dehex("4803 3330 37c1 c0bf"),
- []HeaderField{
- pair(":status", "307"),
- pair("cache-control", "private"),
- pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
- pair("location", "https://www.example.com"),
- },
- []HeaderField{
- pair(":status", "307"),
- pair("location", "https://www.example.com"),
- pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
- pair("cache-control", "private"),
- },
- 222,
- },
- {dehex(`
-88c1 611d 4d6f 6e2c 2032 3120 4f63 7420
-3230 3133 2032 303a 3133 3a32 3220 474d
-54c0 5a04 677a 6970 7738 666f 6f3d 4153
-444a 4b48 514b 425a 584f 5157 454f 5049
-5541 5851 5745 4f49 553b 206d 6178 2d61
-6765 3d33 3630 303b 2076 6572 7369 6f6e
-3d31
-`),
- []HeaderField{
- pair(":status", "200"),
- pair("cache-control", "private"),
- pair("date", "Mon, 21 Oct 2013 20:13:22 GMT"),
- pair("location", "https://www.example.com"),
- pair("content-encoding", "gzip"),
- pair("set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"),
- },
- []HeaderField{
- pair("set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"),
- pair("content-encoding", "gzip"),
- pair("date", "Mon, 21 Oct 2013 20:13:22 GMT"),
- },
- 215,
- },
- })
-}
-
-// http://http2.github.io/http2-spec/compression.html#rfc.section.C.6
-// "This section shows the same examples as the previous section, but
-// using Huffman encoding for the literal values. The HTTP/2 setting
-// parameter SETTINGS_HEADER_TABLE_SIZE is set to the value of 256
-// octets, causing some evictions to occur. The eviction mechanism
-// uses the length of the decoded literal values, so the same
-// evictions occurs as in the previous section."
-func TestDecodeC6_ResponsesHuffman(t *testing.T) {
- testDecodeSeries(t, 256, []encAndWant{
- {dehex(`
-4882 6402 5885 aec3 771a 4b61 96d0 7abe
-9410 54d4 44a8 2005 9504 0b81 66e0 82a6
-2d1b ff6e 919d 29ad 1718 63c7 8f0b 97c8
-e9ae 82ae 43d3
-`),
- []HeaderField{
- pair(":status", "302"),
- pair("cache-control", "private"),
- pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
- pair("location", "https://www.example.com"),
- },
- []HeaderField{
- pair("location", "https://www.example.com"),
- pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
- pair("cache-control", "private"),
- pair(":status", "302"),
- },
- 222,
- },
- {dehex("4883 640e ffc1 c0bf"),
- []HeaderField{
- pair(":status", "307"),
- pair("cache-control", "private"),
- pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
- pair("location", "https://www.example.com"),
- },
- []HeaderField{
- pair(":status", "307"),
- pair("location", "https://www.example.com"),
- pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
- pair("cache-control", "private"),
- },
- 222,
- },
- {dehex(`
-88c1 6196 d07a be94 1054 d444 a820 0595
-040b 8166 e084 a62d 1bff c05a 839b d9ab
-77ad 94e7 821d d7f2 e6c7 b335 dfdf cd5b
-3960 d5af 2708 7f36 72c1 ab27 0fb5 291f
-9587 3160 65c0 03ed 4ee5 b106 3d50 07
-`),
- []HeaderField{
- pair(":status", "200"),
- pair("cache-control", "private"),
- pair("date", "Mon, 21 Oct 2013 20:13:22 GMT"),
- pair("location", "https://www.example.com"),
- pair("content-encoding", "gzip"),
- pair("set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"),
- },
- []HeaderField{
- pair("set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"),
- pair("content-encoding", "gzip"),
- pair("date", "Mon, 21 Oct 2013 20:13:22 GMT"),
- },
- 215,
- },
- })
-}
-
-func testDecodeSeries(t *testing.T, size uint32, steps []encAndWant) {
- d := NewDecoder(size, nil)
- for i, step := range steps {
- hf, err := d.DecodeFull(step.enc)
- if err != nil {
- t.Fatalf("Error at step index %d: %v", i, err)
- }
- if !reflect.DeepEqual(hf, step.want) {
- t.Fatalf("At step index %d: Got headers %v; want %v", i, hf, step.want)
- }
- gotDynTab := d.dynTab.reverseCopy()
- if !reflect.DeepEqual(gotDynTab, step.wantDynTab) {
- t.Errorf("After step index %d, dynamic table = %v; want %v", i, gotDynTab, step.wantDynTab)
- }
- if d.dynTab.size != step.wantDynSize {
- t.Errorf("After step index %d, dynamic table size = %v; want %v", i, d.dynTab.size, step.wantDynSize)
- }
- }
-}
-
-func TestHuffmanDecode(t *testing.T) {
- tests := []struct {
- inHex, want string
- }{
- {"f1e3 c2e5 f23a 6ba0 ab90 f4ff", "www.example.com"},
- {"a8eb 1064 9cbf", "no-cache"},
- {"25a8 49e9 5ba9 7d7f", "custom-key"},
- {"25a8 49e9 5bb8 e8b4 bf", "custom-value"},
- {"6402", "302"},
- {"aec3 771a 4b", "private"},
- {"d07a be94 1054 d444 a820 0595 040b 8166 e082 a62d 1bff", "Mon, 21 Oct 2013 20:13:21 GMT"},
- {"9d29 ad17 1863 c78f 0b97 c8e9 ae82 ae43 d3", "https://www.example.com"},
- {"9bd9 ab", "gzip"},
- {"94e7 821d d7f2 e6c7 b335 dfdf cd5b 3960 d5af 2708 7f36 72c1 ab27 0fb5 291f 9587 3160 65c0 03ed 4ee5 b106 3d50 07",
- "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"},
- }
- for i, tt := range tests {
- var buf bytes.Buffer
- in, err := hex.DecodeString(strings.Replace(tt.inHex, " ", "", -1))
- if err != nil {
- t.Errorf("%d. hex input error: %v", i, err)
- continue
- }
- if _, err := HuffmanDecode(&buf, in); err != nil {
- t.Errorf("%d. decode error: %v", i, err)
- continue
- }
- if got := buf.String(); tt.want != got {
- t.Errorf("%d. decode = %q; want %q", i, got, tt.want)
- }
- }
-}
-
-func TestAppendHuffmanString(t *testing.T) {
- tests := []struct {
- in, want string
- }{
- {"www.example.com", "f1e3 c2e5 f23a 6ba0 ab90 f4ff"},
- {"no-cache", "a8eb 1064 9cbf"},
- {"custom-key", "25a8 49e9 5ba9 7d7f"},
- {"custom-value", "25a8 49e9 5bb8 e8b4 bf"},
- {"302", "6402"},
- {"private", "aec3 771a 4b"},
- {"Mon, 21 Oct 2013 20:13:21 GMT", "d07a be94 1054 d444 a820 0595 040b 8166 e082 a62d 1bff"},
- {"https://www.example.com", "9d29 ad17 1863 c78f 0b97 c8e9 ae82 ae43 d3"},
- {"gzip", "9bd9 ab"},
- {"foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1",
- "94e7 821d d7f2 e6c7 b335 dfdf cd5b 3960 d5af 2708 7f36 72c1 ab27 0fb5 291f 9587 3160 65c0 03ed 4ee5 b106 3d50 07"},
- }
- for i, tt := range tests {
- buf := []byte{}
- want := strings.Replace(tt.want, " ", "", -1)
- buf = AppendHuffmanString(buf, tt.in)
- if got := hex.EncodeToString(buf); want != got {
- t.Errorf("%d. encode = %q; want %q", i, got, want)
- }
- }
-}
-
-func TestReadVarInt(t *testing.T) {
- type res struct {
- i uint64
- consumed int
- err error
- }
- tests := []struct {
- n byte
- p []byte
- want res
- }{
- // Fits in a byte:
- {1, []byte{0}, res{0, 1, nil}},
- {2, []byte{2}, res{2, 1, nil}},
- {3, []byte{6}, res{6, 1, nil}},
- {4, []byte{14}, res{14, 1, nil}},
- {5, []byte{30}, res{30, 1, nil}},
- {6, []byte{62}, res{62, 1, nil}},
- {7, []byte{126}, res{126, 1, nil}},
- {8, []byte{254}, res{254, 1, nil}},
-
- // Doesn't fit in a byte:
- {1, []byte{1}, res{0, 0, errNeedMore}},
- {2, []byte{3}, res{0, 0, errNeedMore}},
- {3, []byte{7}, res{0, 0, errNeedMore}},
- {4, []byte{15}, res{0, 0, errNeedMore}},
- {5, []byte{31}, res{0, 0, errNeedMore}},
- {6, []byte{63}, res{0, 0, errNeedMore}},
- {7, []byte{127}, res{0, 0, errNeedMore}},
- {8, []byte{255}, res{0, 0, errNeedMore}},
-
- // Ignoring top bits:
- {5, []byte{255, 154, 10}, res{1337, 3, nil}}, // high dummy three bits: 111
- {5, []byte{159, 154, 10}, res{1337, 3, nil}}, // high dummy three bits: 100
- {5, []byte{191, 154, 10}, res{1337, 3, nil}}, // high dummy three bits: 101
-
- // Extra byte:
- {5, []byte{191, 154, 10, 2}, res{1337, 3, nil}}, // extra byte
-
- // Short a byte:
- {5, []byte{191, 154}, res{0, 0, errNeedMore}},
-
- // integer overflow:
- {1, []byte{255, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128}, res{0, 0, errVarintOverflow}},
- }
- for _, tt := range tests {
- i, remain, err := readVarInt(tt.n, tt.p)
- consumed := len(tt.p) - len(remain)
- got := res{i, consumed, err}
- if got != tt.want {
- t.Errorf("readVarInt(%d, %v ~ %x) = %+v; want %+v", tt.n, tt.p, tt.p, got, tt.want)
- }
- }
-}
-
-func dehex(s string) []byte {
- s = strings.Replace(s, " ", "", -1)
- s = strings.Replace(s, "\n", "", -1)
- b, err := hex.DecodeString(s)
- if err != nil {
- panic(err)
- }
- return b
-}
diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/http2_test.go b/Godeps/_workspace/src/github.com/bradfitz/http2/http2_test.go
deleted file mode 100644
index 6a1b7e8361..0000000000
--- a/Godeps/_workspace/src/github.com/bradfitz/http2/http2_test.go
+++ /dev/null
@@ -1,152 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
-// Licensed under the same terms as Go itself:
-// https://code.google.com/p/go/source/browse/LICENSE
-
-package http2
-
-import (
- "bytes"
- "errors"
- "flag"
- "fmt"
- "net/http"
- "os/exec"
- "strconv"
- "strings"
- "testing"
-
- "github.com/bradfitz/http2/hpack"
-)
-
-var knownFailing = flag.Bool("known_failing", false, "Run known-failing tests.")
-
-func condSkipFailingTest(t *testing.T) {
- if !*knownFailing {
- t.Skip("Skipping known-failing test without --known_failing")
- }
-}
-
-func init() {
- DebugGoroutines = true
- flag.BoolVar(&VerboseLogs, "verboseh2", false, "Verbose HTTP/2 debug logging")
-}
-
-func TestSettingString(t *testing.T) {
- tests := []struct {
- s Setting
- want string
- }{
- {Setting{SettingMaxFrameSize, 123}, "[MAX_FRAME_SIZE = 123]"},
- {Setting{1<<16 - 1, 123}, "[UNKNOWN_SETTING_65535 = 123]"},
- }
- for i, tt := range tests {
- got := fmt.Sprint(tt.s)
- if got != tt.want {
- t.Errorf("%d. for %#v, string = %q; want %q", i, tt.s, got, tt.want)
- }
- }
-}
-
-type twriter struct {
- t testing.TB
- st *serverTester // optional
-}
-
-func (w twriter) Write(p []byte) (n int, err error) {
- if w.st != nil {
- ps := string(p)
- for _, phrase := range w.st.logFilter {
- if strings.Contains(ps, phrase) {
- return len(p), nil // no logging
- }
- }
- }
- w.t.Logf("%s", p)
- return len(p), nil
-}
-
-// like encodeHeader, but don't add implicit psuedo headers.
-func encodeHeaderNoImplicit(t *testing.T, headers ...string) []byte {
- var buf bytes.Buffer
- enc := hpack.NewEncoder(&buf)
- for len(headers) > 0 {
- k, v := headers[0], headers[1]
- headers = headers[2:]
- if err := enc.WriteField(hpack.HeaderField{Name: k, Value: v}); err != nil {
- t.Fatalf("HPACK encoding error for %q/%q: %v", k, v, err)
- }
- }
- return buf.Bytes()
-}
-
-// Verify that curl has http2.
-func requireCurl(t *testing.T) {
- out, err := dockerLogs(curl(t, "--version"))
- if err != nil {
- t.Skipf("failed to determine curl features; skipping test")
- }
- if !strings.Contains(string(out), "HTTP2") {
- t.Skip("curl doesn't support HTTP2; skipping test")
- }
-}
-
-func curl(t *testing.T, args ...string) (container string) {
- out, err := exec.Command("docker", append([]string{"run", "-d", "--net=host", "gohttp2/curl"}, args...)...).CombinedOutput()
- if err != nil {
- t.Skipf("Failed to run curl in docker: %v, %s", err, out)
- }
- return strings.TrimSpace(string(out))
-}
-
-type puppetCommand struct {
- fn func(w http.ResponseWriter, r *http.Request)
- done chan<- bool
-}
-
-type handlerPuppet struct {
- ch chan puppetCommand
-}
-
-func newHandlerPuppet() *handlerPuppet {
- return &handlerPuppet{
- ch: make(chan puppetCommand),
- }
-}
-
-func (p *handlerPuppet) act(w http.ResponseWriter, r *http.Request) {
- for cmd := range p.ch {
- cmd.fn(w, r)
- cmd.done <- true
- }
-}
-
-func (p *handlerPuppet) done() { close(p.ch) }
-func (p *handlerPuppet) do(fn func(http.ResponseWriter, *http.Request)) {
- done := make(chan bool)
- p.ch <- puppetCommand{fn, done}
- <-done
-}
-func dockerLogs(container string) ([]byte, error) {
- out, err := exec.Command("docker", "wait", container).CombinedOutput()
- if err != nil {
- return out, err
- }
- exitStatus, err := strconv.Atoi(strings.TrimSpace(string(out)))
- if err != nil {
- return out, errors.New("unexpected exit status from docker wait")
- }
- out, err = exec.Command("docker", "logs", container).CombinedOutput()
- exec.Command("docker", "rm", container).Run()
- if err == nil && exitStatus != 0 {
- err = fmt.Errorf("exit status %d: %s", exitStatus, out)
- }
- return out, err
-}
-
-func kill(container string) {
- exec.Command("docker", "kill", container).Run()
- exec.Command("docker", "rm", container).Run()
-}
diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/pipe_test.go b/Godeps/_workspace/src/github.com/bradfitz/http2/pipe_test.go
deleted file mode 100644
index 10c4c327c4..0000000000
--- a/Godeps/_workspace/src/github.com/bradfitz/http2/pipe_test.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2014 The Go Authors.
-// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
-// Licensed under the same terms as Go itself:
-// https://code.google.com/p/go/source/browse/LICENSE
-
-package http2
-
-import (
- "errors"
- "testing"
-)
-
-func TestPipeClose(t *testing.T) {
- var p pipe
- p.c.L = &p.m
- a := errors.New("a")
- b := errors.New("b")
- p.Close(a)
- p.Close(b)
- _, err := p.Read(make([]byte, 1))
- if err != a {
- t.Errorf("err = %v want %v", err, a)
- }
-}
diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/priority_test.go b/Godeps/_workspace/src/github.com/bradfitz/http2/priority_test.go
deleted file mode 100644
index d9648fd328..0000000000
--- a/Godeps/_workspace/src/github.com/bradfitz/http2/priority_test.go
+++ /dev/null
@@ -1,121 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
-// Licensed under the same terms as Go itself:
-// https://code.google.com/p/go/source/browse/LICENSE
-
-package http2
-
-import (
- "testing"
-)
-
-func TestPriority(t *testing.T) {
- // A -> B
- // move A's parent to B
- streams := make(map[uint32]*stream)
- a := &stream{
- parent: nil,
- weight: 16,
- }
- streams[1] = a
- b := &stream{
- parent: a,
- weight: 16,
- }
- streams[2] = b
- adjustStreamPriority(streams, 1, PriorityParam{
- Weight: 20,
- StreamDep: 2,
- })
- if a.parent != b {
- t.Errorf("Expected A's parent to be B")
- }
- if a.weight != 20 {
- t.Errorf("Expected A's weight to be 20; got %d", a.weight)
- }
- if b.parent != nil {
- t.Errorf("Expected B to have no parent")
- }
- if b.weight != 16 {
- t.Errorf("Expected B's weight to be 16; got %d", b.weight)
- }
-}
-
-func TestPriorityExclusiveZero(t *testing.T) {
- // A B and C are all children of the 0 stream.
- // Exclusive reprioritization to any of the streams
- // should bring the rest of the streams under the
- // reprioritized stream
- streams := make(map[uint32]*stream)
- a := &stream{
- parent: nil,
- weight: 16,
- }
- streams[1] = a
- b := &stream{
- parent: nil,
- weight: 16,
- }
- streams[2] = b
- c := &stream{
- parent: nil,
- weight: 16,
- }
- streams[3] = c
- adjustStreamPriority(streams, 3, PriorityParam{
- Weight: 20,
- StreamDep: 0,
- Exclusive: true,
- })
- if a.parent != c {
- t.Errorf("Expected A's parent to be C")
- }
- if a.weight != 16 {
- t.Errorf("Expected A's weight to be 16; got %d", a.weight)
- }
- if b.parent != c {
- t.Errorf("Expected B's parent to be C")
- }
- if b.weight != 16 {
- t.Errorf("Expected B's weight to be 16; got %d", b.weight)
- }
- if c.parent != nil {
- t.Errorf("Expected C to have no parent")
- }
- if c.weight != 20 {
- t.Errorf("Expected C's weight to be 20; got %d", b.weight)
- }
-}
-
-func TestPriorityOwnParent(t *testing.T) {
- streams := make(map[uint32]*stream)
- a := &stream{
- parent: nil,
- weight: 16,
- }
- streams[1] = a
- b := &stream{
- parent: a,
- weight: 16,
- }
- streams[2] = b
- adjustStreamPriority(streams, 1, PriorityParam{
- Weight: 20,
- StreamDep: 1,
- })
- if a.parent != nil {
- t.Errorf("Expected A's parent to be nil")
- }
- if a.weight != 20 {
- t.Errorf("Expected A's weight to be 20; got %d", a.weight)
- }
- if b.parent != a {
- t.Errorf("Expected B's parent to be A")
- }
- if b.weight != 16 {
- t.Errorf("Expected B's weight to be 16; got %d", b.weight)
- }
-
-}
diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/server_test.go b/Godeps/_workspace/src/github.com/bradfitz/http2/server_test.go
deleted file mode 100644
index 1133223b85..0000000000
--- a/Godeps/_workspace/src/github.com/bradfitz/http2/server_test.go
+++ /dev/null
@@ -1,2252 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
-// Licensed under the same terms as Go itself:
-// https://code.google.com/p/go/source/browse/LICENSE
-
-package http2
-
-import (
- "bytes"
- "crypto/tls"
- "errors"
- "flag"
- "fmt"
- "io"
- "io/ioutil"
- "log"
- "net"
- "net/http"
- "net/http/httptest"
- "os"
- "reflect"
- "runtime"
- "strconv"
- "strings"
- "sync"
- "sync/atomic"
- "testing"
- "time"
-
- "github.com/bradfitz/http2/hpack"
-)
-
-var stderrVerbose = flag.Bool("stderr_verbose", false, "Mirror verbosity to stderr, unbuffered")
-
-type serverTester struct {
- cc net.Conn // client conn
- t testing.TB
- ts *httptest.Server
- fr *Framer
- logBuf *bytes.Buffer
- logFilter []string // substrings to filter out
- scMu sync.Mutex // guards sc
- sc *serverConn
-
- // writing headers:
- headerBuf bytes.Buffer
- hpackEnc *hpack.Encoder
-
- // reading frames:
- frc chan Frame
- frErrc chan error
- readTimer *time.Timer
-}
-
-func init() {
- testHookOnPanicMu = new(sync.Mutex)
-}
-
-func resetHooks() {
- testHookOnPanicMu.Lock()
- testHookOnPanic = nil
- testHookOnPanicMu.Unlock()
-}
-
-type serverTesterOpt string
-
-var optOnlyServer = serverTesterOpt("only_server")
-
-func newServerTester(t testing.TB, handler http.HandlerFunc, opts ...interface{}) *serverTester {
- resetHooks()
-
- logBuf := new(bytes.Buffer)
- ts := httptest.NewUnstartedServer(handler)
-
- tlsConfig := &tls.Config{
- InsecureSkipVerify: true,
- // The h2-14 is temporary, until curl is updated. (as used by unit tests
- // in Docker)
- NextProtos: []string{NextProtoTLS, "h2-14"},
- }
-
- onlyServer := false
- for _, opt := range opts {
- switch v := opt.(type) {
- case func(*tls.Config):
- v(tlsConfig)
- case func(*httptest.Server):
- v(ts)
- case serverTesterOpt:
- onlyServer = (v == optOnlyServer)
- default:
- t.Fatalf("unknown newServerTester option type %T", v)
- }
- }
-
- ConfigureServer(ts.Config, &Server{})
-
- st := &serverTester{
- t: t,
- ts: ts,
- logBuf: logBuf,
- frc: make(chan Frame, 1),
- frErrc: make(chan error, 1),
- }
- st.hpackEnc = hpack.NewEncoder(&st.headerBuf)
-
- var stderrv io.Writer = ioutil.Discard
- if *stderrVerbose {
- stderrv = os.Stderr
- }
-
- ts.TLS = ts.Config.TLSConfig // the httptest.Server has its own copy of this TLS config
- ts.Config.ErrorLog = log.New(io.MultiWriter(stderrv, twriter{t: t, st: st}, logBuf), "", log.LstdFlags)
- ts.StartTLS()
-
- if VerboseLogs {
- t.Logf("Running test server at: %s", ts.URL)
- }
- testHookGetServerConn = func(v *serverConn) {
- st.scMu.Lock()
- defer st.scMu.Unlock()
- st.sc = v
- st.sc.testHookCh = make(chan func())
- }
- log.SetOutput(io.MultiWriter(stderrv, twriter{t: t, st: st}))
- if !onlyServer {
- cc, err := tls.Dial("tcp", ts.Listener.Addr().String(), tlsConfig)
- if err != nil {
- t.Fatal(err)
- }
- st.cc = cc
- st.fr = NewFramer(cc, cc)
- }
-
- return st
-}
-
-func (st *serverTester) closeConn() {
- st.scMu.Lock()
- defer st.scMu.Unlock()
- st.sc.conn.Close()
-}
-
-func (st *serverTester) addLogFilter(phrase string) {
- st.logFilter = append(st.logFilter, phrase)
-}
-
-func (st *serverTester) stream(id uint32) *stream {
- ch := make(chan *stream, 1)
- st.sc.testHookCh <- func() {
- ch <- st.sc.streams[id]
- }
- return <-ch
-}
-
-func (st *serverTester) streamState(id uint32) streamState {
- ch := make(chan streamState, 1)
- st.sc.testHookCh <- func() {
- state, _ := st.sc.state(id)
- ch <- state
- }
- return <-ch
-}
-
-func (st *serverTester) Close() {
- st.ts.Close()
- if st.cc != nil {
- st.cc.Close()
- }
- log.SetOutput(os.Stderr)
-}
-
-// greet initiates the client's HTTP/2 connection into a state where
-// frames may be sent.
-func (st *serverTester) greet() {
- st.writePreface()
- st.writeInitialSettings()
- st.wantSettings()
- st.writeSettingsAck()
- st.wantSettingsAck()
-}
-
-func (st *serverTester) writePreface() {
- n, err := st.cc.Write(clientPreface)
- if err != nil {
- st.t.Fatalf("Error writing client preface: %v", err)
- }
- if n != len(clientPreface) {
- st.t.Fatalf("Writing client preface, wrote %d bytes; want %d", n, len(clientPreface))
- }
-}
-
-func (st *serverTester) writeInitialSettings() {
- if err := st.fr.WriteSettings(); err != nil {
- st.t.Fatalf("Error writing initial SETTINGS frame from client to server: %v", err)
- }
-}
-
-func (st *serverTester) writeSettingsAck() {
- if err := st.fr.WriteSettingsAck(); err != nil {
- st.t.Fatalf("Error writing ACK of server's SETTINGS: %v", err)
- }
-}
-
-func (st *serverTester) writeHeaders(p HeadersFrameParam) {
- if err := st.fr.WriteHeaders(p); err != nil {
- st.t.Fatalf("Error writing HEADERS: %v", err)
- }
-}
-
-func (st *serverTester) encodeHeaderField(k, v string) {
- err := st.hpackEnc.WriteField(hpack.HeaderField{Name: k, Value: v})
- if err != nil {
- st.t.Fatalf("HPACK encoding error for %q/%q: %v", k, v, err)
- }
-}
-
-// encodeHeader encodes headers and returns their HPACK bytes. headers
-// must contain an even number of key/value pairs. There may be
-// multiple pairs for keys (e.g. "cookie"). The :method, :path, and
-// :scheme headers default to GET, / and https.
-func (st *serverTester) encodeHeader(headers ...string) []byte {
- if len(headers)%2 == 1 {
- panic("odd number of kv args")
- }
-
- st.headerBuf.Reset()
-
- if len(headers) == 0 {
- // Fast path, mostly for benchmarks, so test code doesn't pollute
- // profiles when we're looking to improve server allocations.
- st.encodeHeaderField(":method", "GET")
- st.encodeHeaderField(":path", "/")
- st.encodeHeaderField(":scheme", "https")
- return st.headerBuf.Bytes()
- }
-
- if len(headers) == 2 && headers[0] == ":method" {
- // Another fast path for benchmarks.
- st.encodeHeaderField(":method", headers[1])
- st.encodeHeaderField(":path", "/")
- st.encodeHeaderField(":scheme", "https")
- return st.headerBuf.Bytes()
- }
-
- pseudoCount := map[string]int{}
- keys := []string{":method", ":path", ":scheme"}
- vals := map[string][]string{
- ":method": {"GET"},
- ":path": {"/"},
- ":scheme": {"https"},
- }
- for len(headers) > 0 {
- k, v := headers[0], headers[1]
- headers = headers[2:]
- if _, ok := vals[k]; !ok {
- keys = append(keys, k)
- }
- if strings.HasPrefix(k, ":") {
- pseudoCount[k]++
- if pseudoCount[k] == 1 {
- vals[k] = []string{v}
- } else {
- // Allows testing of invalid headers w/ dup pseudo fields.
- vals[k] = append(vals[k], v)
- }
- } else {
- vals[k] = append(vals[k], v)
- }
- }
- st.headerBuf.Reset()
- for _, k := range keys {
- for _, v := range vals[k] {
- st.encodeHeaderField(k, v)
- }
- }
- return st.headerBuf.Bytes()
-}
-
-// bodylessReq1 writes a HEADERS frames with StreamID 1 and EndStream and EndHeaders set.
-func (st *serverTester) bodylessReq1(headers ...string) {
- st.writeHeaders(HeadersFrameParam{
- StreamID: 1, // clients send odd numbers
- BlockFragment: st.encodeHeader(headers...),
- EndStream: true,
- EndHeaders: true,
- })
-}
-
-func (st *serverTester) writeData(streamID uint32, endStream bool, data []byte) {
- if err := st.fr.WriteData(streamID, endStream, data); err != nil {
- st.t.Fatalf("Error writing DATA: %v", err)
- }
-}
-
-func (st *serverTester) readFrame() (Frame, error) {
- go func() {
- fr, err := st.fr.ReadFrame()
- if err != nil {
- st.frErrc <- err
- } else {
- st.frc <- fr
- }
- }()
- t := st.readTimer
- if t == nil {
- t = time.NewTimer(2 * time.Second)
- st.readTimer = t
- }
- t.Reset(2 * time.Second)
- defer t.Stop()
- select {
- case f := <-st.frc:
- return f, nil
- case err := <-st.frErrc:
- return nil, err
- case <-t.C:
- return nil, errors.New("timeout waiting for frame")
- }
-}
-
-func (st *serverTester) wantHeaders() *HeadersFrame {
- f, err := st.readFrame()
- if err != nil {
- st.t.Fatalf("Error while expecting a HEADERS frame: %v", err)
- }
- hf, ok := f.(*HeadersFrame)
- if !ok {
- st.t.Fatalf("got a %T; want *HeadersFrame", f)
- }
- return hf
-}
-
-func (st *serverTester) wantContinuation() *ContinuationFrame {
- f, err := st.readFrame()
- if err != nil {
- st.t.Fatalf("Error while expecting a CONTINUATION frame: %v", err)
- }
- cf, ok := f.(*ContinuationFrame)
- if !ok {
- st.t.Fatalf("got a %T; want *ContinuationFrame", f)
- }
- return cf
-}
-
-func (st *serverTester) wantData() *DataFrame {
- f, err := st.readFrame()
- if err != nil {
- st.t.Fatalf("Error while expecting a DATA frame: %v", err)
- }
- df, ok := f.(*DataFrame)
- if !ok {
- st.t.Fatalf("got a %T; want *DataFrame", f)
- }
- return df
-}
-
-func (st *serverTester) wantSettings() *SettingsFrame {
- f, err := st.readFrame()
- if err != nil {
- st.t.Fatalf("Error while expecting a SETTINGS frame: %v", err)
- }
- sf, ok := f.(*SettingsFrame)
- if !ok {
- st.t.Fatalf("got a %T; want *SettingsFrame", f)
- }
- return sf
-}
-
-func (st *serverTester) wantPing() *PingFrame {
- f, err := st.readFrame()
- if err != nil {
- st.t.Fatalf("Error while expecting a PING frame: %v", err)
- }
- pf, ok := f.(*PingFrame)
- if !ok {
- st.t.Fatalf("got a %T; want *PingFrame", f)
- }
- return pf
-}
-
-func (st *serverTester) wantGoAway() *GoAwayFrame {
- f, err := st.readFrame()
- if err != nil {
- st.t.Fatalf("Error while expecting a GOAWAY frame: %v", err)
- }
- gf, ok := f.(*GoAwayFrame)
- if !ok {
- st.t.Fatalf("got a %T; want *GoAwayFrame", f)
- }
- return gf
-}
-
-func (st *serverTester) wantRSTStream(streamID uint32, errCode ErrCode) {
- f, err := st.readFrame()
- if err != nil {
- st.t.Fatalf("Error while expecting an RSTStream frame: %v", err)
- }
- rs, ok := f.(*RSTStreamFrame)
- if !ok {
- st.t.Fatalf("got a %T; want *RSTStreamFrame", f)
- }
- if rs.FrameHeader.StreamID != streamID {
- st.t.Fatalf("RSTStream StreamID = %d; want %d", rs.FrameHeader.StreamID, streamID)
- }
- if rs.ErrCode != errCode {
- st.t.Fatalf("RSTStream ErrCode = %d (%s); want %d (%s)", rs.ErrCode, rs.ErrCode, errCode, errCode)
- }
-}
-
-func (st *serverTester) wantWindowUpdate(streamID, incr uint32) {
- f, err := st.readFrame()
- if err != nil {
- st.t.Fatalf("Error while expecting a WINDOW_UPDATE frame: %v", err)
- }
- wu, ok := f.(*WindowUpdateFrame)
- if !ok {
- st.t.Fatalf("got a %T; want *WindowUpdateFrame", f)
- }
- if wu.FrameHeader.StreamID != streamID {
- st.t.Fatalf("WindowUpdate StreamID = %d; want %d", wu.FrameHeader.StreamID, streamID)
- }
- if wu.Increment != incr {
- st.t.Fatalf("WindowUpdate increment = %d; want %d", wu.Increment, incr)
- }
-}
-
-func (st *serverTester) wantSettingsAck() {
- f, err := st.readFrame()
- if err != nil {
- st.t.Fatal(err)
- }
- sf, ok := f.(*SettingsFrame)
- if !ok {
- st.t.Fatalf("Wanting a settings ACK, received a %T", f)
- }
- if !sf.Header().Flags.Has(FlagSettingsAck) {
- st.t.Fatal("Settings Frame didn't have ACK set")
- }
-
-}
-
-func TestServer(t *testing.T) {
- gotReq := make(chan bool, 1)
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
- w.Header().Set("Foo", "Bar")
- gotReq <- true
- })
- defer st.Close()
-
- covers("3.5", `
- The server connection preface consists of a potentially empty
- SETTINGS frame ([SETTINGS]) that MUST be the first frame the
- server sends in the HTTP/2 connection.
- `)
-
- st.writePreface()
- st.writeInitialSettings()
- st.wantSettings()
- st.writeSettingsAck()
- st.wantSettingsAck()
-
- st.writeHeaders(HeadersFrameParam{
- StreamID: 1, // clients send odd numbers
- BlockFragment: st.encodeHeader(),
- EndStream: true, // no DATA frames
- EndHeaders: true,
- })
-
- select {
- case <-gotReq:
- case <-time.After(2 * time.Second):
- t.Error("timeout waiting for request")
- }
-}
-
-func TestServer_Request_Get(t *testing.T) {
- testServerRequest(t, func(st *serverTester) {
- st.writeHeaders(HeadersFrameParam{
- StreamID: 1, // clients send odd numbers
- BlockFragment: st.encodeHeader("foo-bar", "some-value"),
- EndStream: true, // no DATA frames
- EndHeaders: true,
- })
- }, func(r *http.Request) {
- if r.Method != "GET" {
- t.Errorf("Method = %q; want GET", r.Method)
- }
- if r.URL.Path != "/" {
- t.Errorf("URL.Path = %q; want /", r.URL.Path)
- }
- if r.ContentLength != 0 {
- t.Errorf("ContentLength = %v; want 0", r.ContentLength)
- }
- if r.Close {
- t.Error("Close = true; want false")
- }
- if !strings.Contains(r.RemoteAddr, ":") {
- t.Errorf("RemoteAddr = %q; want something with a colon", r.RemoteAddr)
- }
- if r.Proto != "HTTP/2.0" || r.ProtoMajor != 2 || r.ProtoMinor != 0 {
- t.Errorf("Proto = %q Major=%v,Minor=%v; want HTTP/2.0", r.Proto, r.ProtoMajor, r.ProtoMinor)
- }
- wantHeader := http.Header{
- "Foo-Bar": []string{"some-value"},
- }
- if !reflect.DeepEqual(r.Header, wantHeader) {
- t.Errorf("Header = %#v; want %#v", r.Header, wantHeader)
- }
- if n, err := r.Body.Read([]byte(" ")); err != io.EOF || n != 0 {
- t.Errorf("Read = %d, %v; want 0, EOF", n, err)
- }
- })
-}
-
-func TestServer_Request_Get_PathSlashes(t *testing.T) {
- testServerRequest(t, func(st *serverTester) {
- st.writeHeaders(HeadersFrameParam{
- StreamID: 1, // clients send odd numbers
- BlockFragment: st.encodeHeader(":path", "/%2f/"),
- EndStream: true, // no DATA frames
- EndHeaders: true,
- })
- }, func(r *http.Request) {
- if r.RequestURI != "/%2f/" {
- t.Errorf("RequestURI = %q; want /%%2f/", r.RequestURI)
- }
- if r.URL.Path != "///" {
- t.Errorf("URL.Path = %q; want ///", r.URL.Path)
- }
- })
-}
-
-// TODO: add a test with EndStream=true on the HEADERS but setting a
-// Content-Length anyway. Should we just omit it and force it to
-// zero?
-
-func TestServer_Request_Post_NoContentLength_EndStream(t *testing.T) {
- testServerRequest(t, func(st *serverTester) {
- st.writeHeaders(HeadersFrameParam{
- StreamID: 1, // clients send odd numbers
- BlockFragment: st.encodeHeader(":method", "POST"),
- EndStream: true,
- EndHeaders: true,
- })
- }, func(r *http.Request) {
- if r.Method != "POST" {
- t.Errorf("Method = %q; want POST", r.Method)
- }
- if r.ContentLength != 0 {
- t.Errorf("ContentLength = %v; want 0", r.ContentLength)
- }
- if n, err := r.Body.Read([]byte(" ")); err != io.EOF || n != 0 {
- t.Errorf("Read = %d, %v; want 0, EOF", n, err)
- }
- })
-}
-
-func TestServer_Request_Post_Body_ImmediateEOF(t *testing.T) {
- testBodyContents(t, -1, "", func(st *serverTester) {
- st.writeHeaders(HeadersFrameParam{
- StreamID: 1, // clients send odd numbers
- BlockFragment: st.encodeHeader(":method", "POST"),
- EndStream: false, // to say DATA frames are coming
- EndHeaders: true,
- })
- st.writeData(1, true, nil) // just kidding. empty body.
- })
-}
-
-func TestServer_Request_Post_Body_OneData(t *testing.T) {
- const content = "Some content"
- testBodyContents(t, -1, content, func(st *serverTester) {
- st.writeHeaders(HeadersFrameParam{
- StreamID: 1, // clients send odd numbers
- BlockFragment: st.encodeHeader(":method", "POST"),
- EndStream: false, // to say DATA frames are coming
- EndHeaders: true,
- })
- st.writeData(1, true, []byte(content))
- })
-}
-
-func TestServer_Request_Post_Body_TwoData(t *testing.T) {
- const content = "Some content"
- testBodyContents(t, -1, content, func(st *serverTester) {
- st.writeHeaders(HeadersFrameParam{
- StreamID: 1, // clients send odd numbers
- BlockFragment: st.encodeHeader(":method", "POST"),
- EndStream: false, // to say DATA frames are coming
- EndHeaders: true,
- })
- st.writeData(1, false, []byte(content[:5]))
- st.writeData(1, true, []byte(content[5:]))
- })
-}
-
-func TestServer_Request_Post_Body_ContentLength_Correct(t *testing.T) {
- const content = "Some content"
- testBodyContents(t, int64(len(content)), content, func(st *serverTester) {
- st.writeHeaders(HeadersFrameParam{
- StreamID: 1, // clients send odd numbers
- BlockFragment: st.encodeHeader(
- ":method", "POST",
- "content-length", strconv.Itoa(len(content)),
- ),
- EndStream: false, // to say DATA frames are coming
- EndHeaders: true,
- })
- st.writeData(1, true, []byte(content))
- })
-}
-
-func TestServer_Request_Post_Body_ContentLength_TooLarge(t *testing.T) {
- testBodyContentsFail(t, 3, "request declared a Content-Length of 3 but only wrote 2 bytes",
- func(st *serverTester) {
- st.writeHeaders(HeadersFrameParam{
- StreamID: 1, // clients send odd numbers
- BlockFragment: st.encodeHeader(
- ":method", "POST",
- "content-length", "3",
- ),
- EndStream: false, // to say DATA frames are coming
- EndHeaders: true,
- })
- st.writeData(1, true, []byte("12"))
- })
-}
-
-func TestServer_Request_Post_Body_ContentLength_TooSmall(t *testing.T) {
- testBodyContentsFail(t, 4, "sender tried to send more than declared Content-Length of 4 bytes",
- func(st *serverTester) {
- st.writeHeaders(HeadersFrameParam{
- StreamID: 1, // clients send odd numbers
- BlockFragment: st.encodeHeader(
- ":method", "POST",
- "content-length", "4",
- ),
- EndStream: false, // to say DATA frames are coming
- EndHeaders: true,
- })
- st.writeData(1, true, []byte("12345"))
- })
-}
-
-func testBodyContents(t *testing.T, wantContentLength int64, wantBody string, write func(st *serverTester)) {
- testServerRequest(t, write, func(r *http.Request) {
- if r.Method != "POST" {
- t.Errorf("Method = %q; want POST", r.Method)
- }
- if r.ContentLength != wantContentLength {
- t.Errorf("ContentLength = %v; want %d", r.ContentLength, wantContentLength)
- }
- all, err := ioutil.ReadAll(r.Body)
- if err != nil {
- t.Fatal(err)
- }
- if string(all) != wantBody {
- t.Errorf("Read = %q; want %q", all, wantBody)
- }
- if err := r.Body.Close(); err != nil {
- t.Fatalf("Close: %v", err)
- }
- })
-}
-
-func testBodyContentsFail(t *testing.T, wantContentLength int64, wantReadError string, write func(st *serverTester)) {
- testServerRequest(t, write, func(r *http.Request) {
- if r.Method != "POST" {
- t.Errorf("Method = %q; want POST", r.Method)
- }
- if r.ContentLength != wantContentLength {
- t.Errorf("ContentLength = %v; want %d", r.ContentLength, wantContentLength)
- }
- all, err := ioutil.ReadAll(r.Body)
- if err == nil {
- t.Fatalf("expected an error (%q) reading from the body. Successfully read %q instead.",
- wantReadError, all)
- }
- if !strings.Contains(err.Error(), wantReadError) {
- t.Fatalf("Body.Read = %v; want substring %q", err, wantReadError)
- }
- if err := r.Body.Close(); err != nil {
- t.Fatalf("Close: %v", err)
- }
- })
-}
-
-// Using a Host header, instead of :authority
-func TestServer_Request_Get_Host(t *testing.T) {
- const host = "example.com"
- testServerRequest(t, func(st *serverTester) {
- st.writeHeaders(HeadersFrameParam{
- StreamID: 1, // clients send odd numbers
- BlockFragment: st.encodeHeader("host", host),
- EndStream: true,
- EndHeaders: true,
- })
- }, func(r *http.Request) {
- if r.Host != host {
- t.Errorf("Host = %q; want %q", r.Host, host)
- }
- })
-}
-
-// Using an :authority pseudo-header, instead of Host
-func TestServer_Request_Get_Authority(t *testing.T) {
- const host = "example.com"
- testServerRequest(t, func(st *serverTester) {
- st.writeHeaders(HeadersFrameParam{
- StreamID: 1, // clients send odd numbers
- BlockFragment: st.encodeHeader(":authority", host),
- EndStream: true,
- EndHeaders: true,
- })
- }, func(r *http.Request) {
- if r.Host != host {
- t.Errorf("Host = %q; want %q", r.Host, host)
- }
- })
-}
-
-func TestServer_Request_WithContinuation(t *testing.T) {
- wantHeader := http.Header{
- "Foo-One": []string{"value-one"},
- "Foo-Two": []string{"value-two"},
- "Foo-Three": []string{"value-three"},
- }
- testServerRequest(t, func(st *serverTester) {
- fullHeaders := st.encodeHeader(
- "foo-one", "value-one",
- "foo-two", "value-two",
- "foo-three", "value-three",
- )
- remain := fullHeaders
- chunks := 0
- for len(remain) > 0 {
- const maxChunkSize = 5
- chunk := remain
- if len(chunk) > maxChunkSize {
- chunk = chunk[:maxChunkSize]
- }
- remain = remain[len(chunk):]
-
- if chunks == 0 {
- st.writeHeaders(HeadersFrameParam{
- StreamID: 1, // clients send odd numbers
- BlockFragment: chunk,
- EndStream: true, // no DATA frames
- EndHeaders: false, // we'll have continuation frames
- })
- } else {
- err := st.fr.WriteContinuation(1, len(remain) == 0, chunk)
- if err != nil {
- t.Fatal(err)
- }
- }
- chunks++
- }
- if chunks < 2 {
- t.Fatal("too few chunks")
- }
- }, func(r *http.Request) {
- if !reflect.DeepEqual(r.Header, wantHeader) {
- t.Errorf("Header = %#v; want %#v", r.Header, wantHeader)
- }
- })
-}
-
-// Concatenated cookie headers. ("8.1.2.5 Compressing the Cookie Header Field")
-func TestServer_Request_CookieConcat(t *testing.T) {
- const host = "example.com"
- testServerRequest(t, func(st *serverTester) {
- st.bodylessReq1(
- ":authority", host,
- "cookie", "a=b",
- "cookie", "c=d",
- "cookie", "e=f",
- )
- }, func(r *http.Request) {
- const want = "a=b; c=d; e=f"
- if got := r.Header.Get("Cookie"); got != want {
- t.Errorf("Cookie = %q; want %q", got, want)
- }
- })
-}
-
-func TestServer_Request_Reject_CapitalHeader(t *testing.T) {
- testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("UPPER", "v") })
-}
-
-func TestServer_Request_Reject_Pseudo_Missing_method(t *testing.T) {
- testRejectRequest(t, func(st *serverTester) { st.bodylessReq1(":method", "") })
-}
-
-func TestServer_Request_Reject_Pseudo_ExactlyOne(t *testing.T) {
- // 8.1.2.3 Request Pseudo-Header Fields
- // "All HTTP/2 requests MUST include exactly one valid value" ...
- testRejectRequest(t, func(st *serverTester) {
- st.addLogFilter("duplicate pseudo-header")
- st.bodylessReq1(":method", "GET", ":method", "POST")
- })
-}
-
-func TestServer_Request_Reject_Pseudo_AfterRegular(t *testing.T) {
- // 8.1.2.3 Request Pseudo-Header Fields
- // "All pseudo-header fields MUST appear in the header block
- // before regular header fields. Any request or response that
- // contains a pseudo-header field that appears in a header
- // block after a regular header field MUST be treated as
- // malformed (Section 8.1.2.6)."
- testRejectRequest(t, func(st *serverTester) {
- st.addLogFilter("pseudo-header after regular header")
- var buf bytes.Buffer
- enc := hpack.NewEncoder(&buf)
- enc.WriteField(hpack.HeaderField{Name: ":method", Value: "GET"})
- enc.WriteField(hpack.HeaderField{Name: "regular", Value: "foobar"})
- enc.WriteField(hpack.HeaderField{Name: ":path", Value: "/"})
- enc.WriteField(hpack.HeaderField{Name: ":scheme", Value: "https"})
- st.writeHeaders(HeadersFrameParam{
- StreamID: 1, // clients send odd numbers
- BlockFragment: buf.Bytes(),
- EndStream: true,
- EndHeaders: true,
- })
- })
-}
-
-func TestServer_Request_Reject_Pseudo_Missing_path(t *testing.T) {
- testRejectRequest(t, func(st *serverTester) { st.bodylessReq1(":path", "") })
-}
-
-func TestServer_Request_Reject_Pseudo_Missing_scheme(t *testing.T) {
- testRejectRequest(t, func(st *serverTester) { st.bodylessReq1(":scheme", "") })
-}
-
-func TestServer_Request_Reject_Pseudo_scheme_invalid(t *testing.T) {
- testRejectRequest(t, func(st *serverTester) { st.bodylessReq1(":scheme", "bogus") })
-}
-
-func TestServer_Request_Reject_Pseudo_Unknown(t *testing.T) {
- testRejectRequest(t, func(st *serverTester) {
- st.addLogFilter(`invalid pseudo-header ":unknown_thing"`)
- st.bodylessReq1(":unknown_thing", "")
- })
-}
-
-func testRejectRequest(t *testing.T, send func(*serverTester)) {
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
- t.Fatal("server request made it to handler; should've been rejected")
- })
- defer st.Close()
-
- st.greet()
- send(st)
- st.wantRSTStream(1, ErrCodeProtocol)
-}
-
-func TestServer_Ping(t *testing.T) {
- st := newServerTester(t, nil)
- defer st.Close()
- st.greet()
-
- // Server should ignore this one, since it has ACK set.
- ackPingData := [8]byte{1, 2, 4, 8, 16, 32, 64, 128}
- if err := st.fr.WritePing(true, ackPingData); err != nil {
- t.Fatal(err)
- }
-
- // But the server should reply to this one, since ACK is false.
- pingData := [8]byte{1, 2, 3, 4, 5, 6, 7, 8}
- if err := st.fr.WritePing(false, pingData); err != nil {
- t.Fatal(err)
- }
-
- pf := st.wantPing()
- if !pf.Flags.Has(FlagPingAck) {
- t.Error("response ping doesn't have ACK set")
- }
- if pf.Data != pingData {
- t.Errorf("response ping has data %q; want %q", pf.Data, pingData)
- }
-}
-
-func TestServer_RejectsLargeFrames(t *testing.T) {
- st := newServerTester(t, nil)
- defer st.Close()
- st.greet()
-
- // Write too large of a frame (too large by one byte)
- // We ignore the return value because it's expected that the server
- // will only read the first 9 bytes (the headre) and then disconnect.
- st.fr.WriteRawFrame(0xff, 0, 0, make([]byte, defaultMaxReadFrameSize+1))
-
- gf := st.wantGoAway()
- if gf.ErrCode != ErrCodeFrameSize {
- t.Errorf("GOAWAY err = %v; want %v", gf.ErrCode, ErrCodeFrameSize)
- }
-}
-
-func TestServer_Handler_Sends_WindowUpdate(t *testing.T) {
- puppet := newHandlerPuppet()
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
- puppet.act(w, r)
- })
- defer st.Close()
- defer puppet.done()
-
- st.greet()
-
- st.writeHeaders(HeadersFrameParam{
- StreamID: 1, // clients send odd numbers
- BlockFragment: st.encodeHeader(":method", "POST"),
- EndStream: false, // data coming
- EndHeaders: true,
- })
- st.writeData(1, false, []byte("abcdef"))
- puppet.do(readBodyHandler(t, "abc"))
- st.wantWindowUpdate(0, 3)
- st.wantWindowUpdate(1, 3)
-
- puppet.do(readBodyHandler(t, "def"))
- st.wantWindowUpdate(0, 3)
- st.wantWindowUpdate(1, 3)
-
- st.writeData(1, true, []byte("ghijkl")) // END_STREAM here
- puppet.do(readBodyHandler(t, "ghi"))
- puppet.do(readBodyHandler(t, "jkl"))
- st.wantWindowUpdate(0, 3)
- st.wantWindowUpdate(0, 3) // no more stream-level, since END_STREAM
-}
-
-func TestServer_Send_GoAway_After_Bogus_WindowUpdate(t *testing.T) {
- st := newServerTester(t, nil)
- defer st.Close()
- st.greet()
- if err := st.fr.WriteWindowUpdate(0, 1<<31-1); err != nil {
- t.Fatal(err)
- }
- gf := st.wantGoAway()
- if gf.ErrCode != ErrCodeFlowControl {
- t.Errorf("GOAWAY err = %v; want %v", gf.ErrCode, ErrCodeFlowControl)
- }
- if gf.LastStreamID != 0 {
- t.Errorf("GOAWAY last stream ID = %v; want %v", gf.LastStreamID, 0)
- }
-}
-
-func TestServer_Send_RstStream_After_Bogus_WindowUpdate(t *testing.T) {
- inHandler := make(chan bool)
- blockHandler := make(chan bool)
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
- inHandler <- true
- <-blockHandler
- })
- defer st.Close()
- defer close(blockHandler)
- st.greet()
- st.writeHeaders(HeadersFrameParam{
- StreamID: 1,
- BlockFragment: st.encodeHeader(":method", "POST"),
- EndStream: false, // keep it open
- EndHeaders: true,
- })
- <-inHandler
- // Send a bogus window update:
- if err := st.fr.WriteWindowUpdate(1, 1<<31-1); err != nil {
- t.Fatal(err)
- }
- st.wantRSTStream(1, ErrCodeFlowControl)
-}
-
-// testServerPostUnblock sends a hanging POST with unsent data to handler,
-// then runs fn once in the handler, and verifies that the error returned from
-// handler is acceptable. It fails if takes over 5 seconds for handler to exit.
-func testServerPostUnblock(t *testing.T,
- handler func(http.ResponseWriter, *http.Request) error,
- fn func(*serverTester),
- checkErr func(error),
- otherHeaders ...string) {
- inHandler := make(chan bool)
- errc := make(chan error, 1)
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
- inHandler <- true
- errc <- handler(w, r)
- })
- st.greet()
- st.writeHeaders(HeadersFrameParam{
- StreamID: 1,
- BlockFragment: st.encodeHeader(append([]string{":method", "POST"}, otherHeaders...)...),
- EndStream: false, // keep it open
- EndHeaders: true,
- })
- <-inHandler
- fn(st)
- select {
- case err := <-errc:
- if checkErr != nil {
- checkErr(err)
- }
- case <-time.After(5 * time.Second):
- t.Fatal("timeout waiting for Handler to return")
- }
- st.Close()
-}
-
-func TestServer_RSTStream_Unblocks_Read(t *testing.T) {
- testServerPostUnblock(t,
- func(w http.ResponseWriter, r *http.Request) (err error) {
- _, err = r.Body.Read(make([]byte, 1))
- return
- },
- func(st *serverTester) {
- if err := st.fr.WriteRSTStream(1, ErrCodeCancel); err != nil {
- t.Fatal(err)
- }
- },
- func(err error) {
- if err == nil {
- t.Error("unexpected nil error from Request.Body.Read")
- }
- },
- )
-}
-
-func TestServer_DeadConn_Unblocks_Read(t *testing.T) {
- testServerPostUnblock(t,
- func(w http.ResponseWriter, r *http.Request) (err error) {
- _, err = r.Body.Read(make([]byte, 1))
- return
- },
- func(st *serverTester) { st.cc.Close() },
- func(err error) {
- if err == nil {
- t.Error("unexpected nil error from Request.Body.Read")
- }
- },
- )
-}
-
-var blockUntilClosed = func(w http.ResponseWriter, r *http.Request) error {
- <-w.(http.CloseNotifier).CloseNotify()
- return nil
-}
-
-func TestServer_CloseNotify_After_RSTStream(t *testing.T) {
- testServerPostUnblock(t, blockUntilClosed, func(st *serverTester) {
- if err := st.fr.WriteRSTStream(1, ErrCodeCancel); err != nil {
- t.Fatal(err)
- }
- }, nil)
-}
-
-func TestServer_CloseNotify_After_ConnClose(t *testing.T) {
- testServerPostUnblock(t, blockUntilClosed, func(st *serverTester) { st.cc.Close() }, nil)
-}
-
-// that CloseNotify unblocks after a stream error due to the client's
-// problem that's unrelated to them explicitly canceling it (which is
-// TestServer_CloseNotify_After_RSTStream above)
-func TestServer_CloseNotify_After_StreamError(t *testing.T) {
- testServerPostUnblock(t, blockUntilClosed, func(st *serverTester) {
- // data longer than declared Content-Length => stream error
- st.writeData(1, true, []byte("1234"))
- }, nil, "content-length", "3")
-}
-
-func TestServer_StateTransitions(t *testing.T) {
- var st *serverTester
- inHandler := make(chan bool)
- writeData := make(chan bool)
- leaveHandler := make(chan bool)
- st = newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
- inHandler <- true
- if st.stream(1) == nil {
- t.Errorf("nil stream 1 in handler")
- }
- if got, want := st.streamState(1), stateOpen; got != want {
- t.Errorf("in handler, state is %v; want %v", got, want)
- }
- writeData <- true
- if n, err := r.Body.Read(make([]byte, 1)); n != 0 || err != io.EOF {
- t.Errorf("body read = %d, %v; want 0, EOF", n, err)
- }
- if got, want := st.streamState(1), stateHalfClosedRemote; got != want {
- t.Errorf("in handler, state is %v; want %v", got, want)
- }
-
- <-leaveHandler
- })
- st.greet()
- if st.stream(1) != nil {
- t.Fatal("stream 1 should be empty")
- }
- if got := st.streamState(1); got != stateIdle {
- t.Fatalf("stream 1 should be idle; got %v", got)
- }
-
- st.writeHeaders(HeadersFrameParam{
- StreamID: 1,
- BlockFragment: st.encodeHeader(":method", "POST"),
- EndStream: false, // keep it open
- EndHeaders: true,
- })
- <-inHandler
- <-writeData
- st.writeData(1, true, nil)
-
- leaveHandler <- true
- hf := st.wantHeaders()
- if !hf.StreamEnded() {
- t.Fatal("expected END_STREAM flag")
- }
-
- if got, want := st.streamState(1), stateClosed; got != want {
- t.Errorf("at end, state is %v; want %v", got, want)
- }
- if st.stream(1) != nil {
- t.Fatal("at end, stream 1 should be gone")
- }
-}
-
-// test HEADERS w/o EndHeaders + another HEADERS (should get rejected)
-func TestServer_Rejects_HeadersNoEnd_Then_Headers(t *testing.T) {
- testServerRejects(t, func(st *serverTester) {
- st.writeHeaders(HeadersFrameParam{
- StreamID: 1,
- BlockFragment: st.encodeHeader(),
- EndStream: true,
- EndHeaders: false,
- })
- st.writeHeaders(HeadersFrameParam{ // Not a continuation.
- StreamID: 3, // different stream.
- BlockFragment: st.encodeHeader(),
- EndStream: true,
- EndHeaders: true,
- })
- })
-}
-
-// test HEADERS w/o EndHeaders + PING (should get rejected)
-func TestServer_Rejects_HeadersNoEnd_Then_Ping(t *testing.T) {
- testServerRejects(t, func(st *serverTester) {
- st.writeHeaders(HeadersFrameParam{
- StreamID: 1,
- BlockFragment: st.encodeHeader(),
- EndStream: true,
- EndHeaders: false,
- })
- if err := st.fr.WritePing(false, [8]byte{}); err != nil {
- t.Fatal(err)
- }
- })
-}
-
-// test HEADERS w/ EndHeaders + a continuation HEADERS (should get rejected)
-func TestServer_Rejects_HeadersEnd_Then_Continuation(t *testing.T) {
- testServerRejects(t, func(st *serverTester) {
- st.writeHeaders(HeadersFrameParam{
- StreamID: 1,
- BlockFragment: st.encodeHeader(),
- EndStream: true,
- EndHeaders: true,
- })
- st.wantHeaders()
- if err := st.fr.WriteContinuation(1, true, encodeHeaderNoImplicit(t, "foo", "bar")); err != nil {
- t.Fatal(err)
- }
- })
-}
-
-// test HEADERS w/o EndHeaders + a continuation HEADERS on wrong stream ID
-func TestServer_Rejects_HeadersNoEnd_Then_ContinuationWrongStream(t *testing.T) {
- testServerRejects(t, func(st *serverTester) {
- st.writeHeaders(HeadersFrameParam{
- StreamID: 1,
- BlockFragment: st.encodeHeader(),
- EndStream: true,
- EndHeaders: false,
- })
- if err := st.fr.WriteContinuation(3, true, encodeHeaderNoImplicit(t, "foo", "bar")); err != nil {
- t.Fatal(err)
- }
- })
-}
-
-// No HEADERS on stream 0.
-func TestServer_Rejects_Headers0(t *testing.T) {
- testServerRejects(t, func(st *serverTester) {
- st.fr.AllowIllegalWrites = true
- st.writeHeaders(HeadersFrameParam{
- StreamID: 0,
- BlockFragment: st.encodeHeader(),
- EndStream: true,
- EndHeaders: true,
- })
- })
-}
-
-// No CONTINUATION on stream 0.
-func TestServer_Rejects_Continuation0(t *testing.T) {
- testServerRejects(t, func(st *serverTester) {
- st.fr.AllowIllegalWrites = true
- if err := st.fr.WriteContinuation(0, true, st.encodeHeader()); err != nil {
- t.Fatal(err)
- }
- })
-}
-
-func TestServer_Rejects_PushPromise(t *testing.T) {
- testServerRejects(t, func(st *serverTester) {
- pp := PushPromiseParam{
- StreamID: 1,
- PromiseID: 3,
- }
- if err := st.fr.WritePushPromise(pp); err != nil {
- t.Fatal(err)
- }
- })
-}
-
-// testServerRejects tests that the server hangs up with a GOAWAY
-// frame and a server close after the client does something
-// deserving a CONNECTION_ERROR.
-func testServerRejects(t *testing.T, writeReq func(*serverTester)) {
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {})
- st.addLogFilter("connection error: PROTOCOL_ERROR")
- defer st.Close()
- st.greet()
- writeReq(st)
-
- st.wantGoAway()
- errc := make(chan error, 1)
- go func() {
- fr, err := st.fr.ReadFrame()
- if err == nil {
- err = fmt.Errorf("got frame of type %T", fr)
- }
- errc <- err
- }()
- select {
- case err := <-errc:
- if err != io.EOF {
- t.Errorf("ReadFrame = %v; want io.EOF", err)
- }
- case <-time.After(2 * time.Second):
- t.Error("timeout waiting for disconnect")
- }
-}
-
-// testServerRequest sets up an idle HTTP/2 connection and lets you
-// write a single request with writeReq, and then verify that the
-// *http.Request is built correctly in checkReq.
-func testServerRequest(t *testing.T, writeReq func(*serverTester), checkReq func(*http.Request)) {
- gotReq := make(chan bool, 1)
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
- if r.Body == nil {
- t.Fatal("nil Body")
- }
- checkReq(r)
- gotReq <- true
- })
- defer st.Close()
-
- st.greet()
- writeReq(st)
-
- select {
- case <-gotReq:
- case <-time.After(2 * time.Second):
- t.Error("timeout waiting for request")
- }
-}
-
-func getSlash(st *serverTester) { st.bodylessReq1() }
-
-func TestServer_Response_NoData(t *testing.T) {
- testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
- // Nothing.
- return nil
- }, func(st *serverTester) {
- getSlash(st)
- hf := st.wantHeaders()
- if !hf.StreamEnded() {
- t.Fatal("want END_STREAM flag")
- }
- if !hf.HeadersEnded() {
- t.Fatal("want END_HEADERS flag")
- }
- })
-}
-
-func TestServer_Response_NoData_Header_FooBar(t *testing.T) {
- testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
- w.Header().Set("Foo-Bar", "some-value")
- return nil
- }, func(st *serverTester) {
- getSlash(st)
- hf := st.wantHeaders()
- if !hf.StreamEnded() {
- t.Fatal("want END_STREAM flag")
- }
- if !hf.HeadersEnded() {
- t.Fatal("want END_HEADERS flag")
- }
- goth := decodeHeader(t, hf.HeaderBlockFragment())
- wanth := [][2]string{
- {":status", "200"},
- {"foo-bar", "some-value"},
- {"content-type", "text/plain; charset=utf-8"},
- {"content-length", "0"},
- }
- if !reflect.DeepEqual(goth, wanth) {
- t.Errorf("Got headers %v; want %v", goth, wanth)
- }
- })
-}
-
-func TestServer_Response_Data_Sniff_DoesntOverride(t *testing.T) {
- const msg = "this is HTML."
- testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
- w.Header().Set("Content-Type", "foo/bar")
- io.WriteString(w, msg)
- return nil
- }, func(st *serverTester) {
- getSlash(st)
- hf := st.wantHeaders()
- if hf.StreamEnded() {
- t.Fatal("don't want END_STREAM, expecting data")
- }
- if !hf.HeadersEnded() {
- t.Fatal("want END_HEADERS flag")
- }
- goth := decodeHeader(t, hf.HeaderBlockFragment())
- wanth := [][2]string{
- {":status", "200"},
- {"content-type", "foo/bar"},
- {"content-length", strconv.Itoa(len(msg))},
- }
- if !reflect.DeepEqual(goth, wanth) {
- t.Errorf("Got headers %v; want %v", goth, wanth)
- }
- df := st.wantData()
- if !df.StreamEnded() {
- t.Error("expected DATA to have END_STREAM flag")
- }
- if got := string(df.Data()); got != msg {
- t.Errorf("got DATA %q; want %q", got, msg)
- }
- })
-}
-
-func TestServer_Response_TransferEncoding_chunked(t *testing.T) {
- const msg = "hi"
- testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
- w.Header().Set("Transfer-Encoding", "chunked") // should be stripped
- io.WriteString(w, msg)
- return nil
- }, func(st *serverTester) {
- getSlash(st)
- hf := st.wantHeaders()
- goth := decodeHeader(t, hf.HeaderBlockFragment())
- wanth := [][2]string{
- {":status", "200"},
- {"content-type", "text/plain; charset=utf-8"},
- {"content-length", strconv.Itoa(len(msg))},
- }
- if !reflect.DeepEqual(goth, wanth) {
- t.Errorf("Got headers %v; want %v", goth, wanth)
- }
- })
-}
-
-// Header accessed only after the initial write.
-func TestServer_Response_Data_IgnoreHeaderAfterWrite_After(t *testing.T) {
- const msg = "this is HTML."
- testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
- io.WriteString(w, msg)
- w.Header().Set("foo", "should be ignored")
- return nil
- }, func(st *serverTester) {
- getSlash(st)
- hf := st.wantHeaders()
- if hf.StreamEnded() {
- t.Fatal("unexpected END_STREAM")
- }
- if !hf.HeadersEnded() {
- t.Fatal("want END_HEADERS flag")
- }
- goth := decodeHeader(t, hf.HeaderBlockFragment())
- wanth := [][2]string{
- {":status", "200"},
- {"content-type", "text/html; charset=utf-8"},
- {"content-length", strconv.Itoa(len(msg))},
- }
- if !reflect.DeepEqual(goth, wanth) {
- t.Errorf("Got headers %v; want %v", goth, wanth)
- }
- })
-}
-
-// Header accessed before the initial write and later mutated.
-func TestServer_Response_Data_IgnoreHeaderAfterWrite_Overwrite(t *testing.T) {
- const msg = "this is HTML."
- testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
- w.Header().Set("foo", "proper value")
- io.WriteString(w, msg)
- w.Header().Set("foo", "should be ignored")
- return nil
- }, func(st *serverTester) {
- getSlash(st)
- hf := st.wantHeaders()
- if hf.StreamEnded() {
- t.Fatal("unexpected END_STREAM")
- }
- if !hf.HeadersEnded() {
- t.Fatal("want END_HEADERS flag")
- }
- goth := decodeHeader(t, hf.HeaderBlockFragment())
- wanth := [][2]string{
- {":status", "200"},
- {"foo", "proper value"},
- {"content-type", "text/html; charset=utf-8"},
- {"content-length", strconv.Itoa(len(msg))},
- }
- if !reflect.DeepEqual(goth, wanth) {
- t.Errorf("Got headers %v; want %v", goth, wanth)
- }
- })
-}
-
-func TestServer_Response_Data_SniffLenType(t *testing.T) {
- const msg = "this is HTML."
- testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
- io.WriteString(w, msg)
- return nil
- }, func(st *serverTester) {
- getSlash(st)
- hf := st.wantHeaders()
- if hf.StreamEnded() {
- t.Fatal("don't want END_STREAM, expecting data")
- }
- if !hf.HeadersEnded() {
- t.Fatal("want END_HEADERS flag")
- }
- goth := decodeHeader(t, hf.HeaderBlockFragment())
- wanth := [][2]string{
- {":status", "200"},
- {"content-type", "text/html; charset=utf-8"},
- {"content-length", strconv.Itoa(len(msg))},
- }
- if !reflect.DeepEqual(goth, wanth) {
- t.Errorf("Got headers %v; want %v", goth, wanth)
- }
- df := st.wantData()
- if !df.StreamEnded() {
- t.Error("expected DATA to have END_STREAM flag")
- }
- if got := string(df.Data()); got != msg {
- t.Errorf("got DATA %q; want %q", got, msg)
- }
- })
-}
-
-func TestServer_Response_Header_Flush_MidWrite(t *testing.T) {
- const msg = "this is HTML"
- const msg2 = ", and this is the next chunk"
- testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
- io.WriteString(w, msg)
- w.(http.Flusher).Flush()
- io.WriteString(w, msg2)
- return nil
- }, func(st *serverTester) {
- getSlash(st)
- hf := st.wantHeaders()
- if hf.StreamEnded() {
- t.Fatal("unexpected END_STREAM flag")
- }
- if !hf.HeadersEnded() {
- t.Fatal("want END_HEADERS flag")
- }
- goth := decodeHeader(t, hf.HeaderBlockFragment())
- wanth := [][2]string{
- {":status", "200"},
- {"content-type", "text/html; charset=utf-8"}, // sniffed
- // and no content-length
- }
- if !reflect.DeepEqual(goth, wanth) {
- t.Errorf("Got headers %v; want %v", goth, wanth)
- }
- {
- df := st.wantData()
- if df.StreamEnded() {
- t.Error("unexpected END_STREAM flag")
- }
- if got := string(df.Data()); got != msg {
- t.Errorf("got DATA %q; want %q", got, msg)
- }
- }
- {
- df := st.wantData()
- if !df.StreamEnded() {
- t.Error("wanted END_STREAM flag on last data chunk")
- }
- if got := string(df.Data()); got != msg2 {
- t.Errorf("got DATA %q; want %q", got, msg2)
- }
- }
- })
-}
-
-func TestServer_Response_LargeWrite(t *testing.T) {
- const size = 1 << 20
- const maxFrameSize = 16 << 10
- testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
- n, err := w.Write(bytes.Repeat([]byte("a"), size))
- if err != nil {
- return fmt.Errorf("Write error: %v", err)
- }
- if n != size {
- return fmt.Errorf("wrong size %d from Write", n)
- }
- return nil
- }, func(st *serverTester) {
- if err := st.fr.WriteSettings(
- Setting{SettingInitialWindowSize, 0},
- Setting{SettingMaxFrameSize, maxFrameSize},
- ); err != nil {
- t.Fatal(err)
- }
- st.wantSettingsAck()
-
- getSlash(st) // make the single request
-
- // Give the handler quota to write:
- if err := st.fr.WriteWindowUpdate(1, size); err != nil {
- t.Fatal(err)
- }
- // Give the handler quota to write to connection-level
- // window as well
- if err := st.fr.WriteWindowUpdate(0, size); err != nil {
- t.Fatal(err)
- }
- hf := st.wantHeaders()
- if hf.StreamEnded() {
- t.Fatal("unexpected END_STREAM flag")
- }
- if !hf.HeadersEnded() {
- t.Fatal("want END_HEADERS flag")
- }
- goth := decodeHeader(t, hf.HeaderBlockFragment())
- wanth := [][2]string{
- {":status", "200"},
- {"content-type", "text/plain; charset=utf-8"}, // sniffed
- // and no content-length
- }
- if !reflect.DeepEqual(goth, wanth) {
- t.Errorf("Got headers %v; want %v", goth, wanth)
- }
- var bytes, frames int
- for {
- df := st.wantData()
- bytes += len(df.Data())
- frames++
- for _, b := range df.Data() {
- if b != 'a' {
- t.Fatal("non-'a' byte seen in DATA")
- }
- }
- if df.StreamEnded() {
- break
- }
- }
- if bytes != size {
- t.Errorf("Got %d bytes; want %d", bytes, size)
- }
- if want := int(size / maxFrameSize); frames < want || frames > want*2 {
- t.Errorf("Got %d frames; want %d", frames, size)
- }
- })
-}
-
-// Test that the handler can't write more than the client allows
-func TestServer_Response_LargeWrite_FlowControlled(t *testing.T) {
- const size = 1 << 20
- const maxFrameSize = 16 << 10
- testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
- w.(http.Flusher).Flush()
- n, err := w.Write(bytes.Repeat([]byte("a"), size))
- if err != nil {
- return fmt.Errorf("Write error: %v", err)
- }
- if n != size {
- return fmt.Errorf("wrong size %d from Write", n)
- }
- return nil
- }, func(st *serverTester) {
- // Set the window size to something explicit for this test.
- // It's also how much initial data we expect.
- const initWindowSize = 123
- if err := st.fr.WriteSettings(
- Setting{SettingInitialWindowSize, initWindowSize},
- Setting{SettingMaxFrameSize, maxFrameSize},
- ); err != nil {
- t.Fatal(err)
- }
- st.wantSettingsAck()
-
- getSlash(st) // make the single request
- defer func() { st.fr.WriteRSTStream(1, ErrCodeCancel) }()
-
- hf := st.wantHeaders()
- if hf.StreamEnded() {
- t.Fatal("unexpected END_STREAM flag")
- }
- if !hf.HeadersEnded() {
- t.Fatal("want END_HEADERS flag")
- }
-
- df := st.wantData()
- if got := len(df.Data()); got != initWindowSize {
- t.Fatalf("Initial window size = %d but got DATA with %d bytes", initWindowSize, got)
- }
-
- for _, quota := range []int{1, 13, 127} {
- if err := st.fr.WriteWindowUpdate(1, uint32(quota)); err != nil {
- t.Fatal(err)
- }
- df := st.wantData()
- if int(quota) != len(df.Data()) {
- t.Fatalf("read %d bytes after giving %d quota", len(df.Data()), quota)
- }
- }
-
- if err := st.fr.WriteRSTStream(1, ErrCodeCancel); err != nil {
- t.Fatal(err)
- }
- })
-}
-
-// Test that the handler blocked in a Write is unblocked if the server sends a RST_STREAM.
-func TestServer_Response_RST_Unblocks_LargeWrite(t *testing.T) {
- const size = 1 << 20
- const maxFrameSize = 16 << 10
- testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
- w.(http.Flusher).Flush()
- errc := make(chan error, 1)
- go func() {
- _, err := w.Write(bytes.Repeat([]byte("a"), size))
- errc <- err
- }()
- select {
- case err := <-errc:
- if err == nil {
- return errors.New("unexpected nil error from Write in handler")
- }
- return nil
- case <-time.After(2 * time.Second):
- return errors.New("timeout waiting for Write in handler")
- }
- }, func(st *serverTester) {
- if err := st.fr.WriteSettings(
- Setting{SettingInitialWindowSize, 0},
- Setting{SettingMaxFrameSize, maxFrameSize},
- ); err != nil {
- t.Fatal(err)
- }
- st.wantSettingsAck()
-
- getSlash(st) // make the single request
-
- hf := st.wantHeaders()
- if hf.StreamEnded() {
- t.Fatal("unexpected END_STREAM flag")
- }
- if !hf.HeadersEnded() {
- t.Fatal("want END_HEADERS flag")
- }
-
- if err := st.fr.WriteRSTStream(1, ErrCodeCancel); err != nil {
- t.Fatal(err)
- }
- })
-}
-
-func TestServer_Response_Empty_Data_Not_FlowControlled(t *testing.T) {
- testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
- w.(http.Flusher).Flush()
- // Nothing; send empty DATA
- return nil
- }, func(st *serverTester) {
- // Handler gets no data quota:
- if err := st.fr.WriteSettings(Setting{SettingInitialWindowSize, 0}); err != nil {
- t.Fatal(err)
- }
- st.wantSettingsAck()
-
- getSlash(st) // make the single request
-
- hf := st.wantHeaders()
- if hf.StreamEnded() {
- t.Fatal("unexpected END_STREAM flag")
- }
- if !hf.HeadersEnded() {
- t.Fatal("want END_HEADERS flag")
- }
-
- df := st.wantData()
- if got := len(df.Data()); got != 0 {
- t.Fatalf("unexpected %d DATA bytes; want 0", got)
- }
- if !df.StreamEnded() {
- t.Fatal("DATA didn't have END_STREAM")
- }
- })
-}
-
-func TestServer_Response_Automatic100Continue(t *testing.T) {
- const msg = "foo"
- const reply = "bar"
- testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
- if v := r.Header.Get("Expect"); v != "" {
- t.Errorf("Expect header = %q; want empty", v)
- }
- buf := make([]byte, len(msg))
- // This read should trigger the 100-continue being sent.
- if n, err := io.ReadFull(r.Body, buf); err != nil || n != len(msg) || string(buf) != msg {
- return fmt.Errorf("ReadFull = %q, %v; want %q, nil", buf[:n], err, msg)
- }
- _, err := io.WriteString(w, reply)
- return err
- }, func(st *serverTester) {
- st.writeHeaders(HeadersFrameParam{
- StreamID: 1, // clients send odd numbers
- BlockFragment: st.encodeHeader(":method", "POST", "expect", "100-continue"),
- EndStream: false,
- EndHeaders: true,
- })
- hf := st.wantHeaders()
- if hf.StreamEnded() {
- t.Fatal("unexpected END_STREAM flag")
- }
- if !hf.HeadersEnded() {
- t.Fatal("want END_HEADERS flag")
- }
- goth := decodeHeader(t, hf.HeaderBlockFragment())
- wanth := [][2]string{
- {":status", "100"},
- }
- if !reflect.DeepEqual(goth, wanth) {
- t.Fatalf("Got headers %v; want %v", goth, wanth)
- }
-
- // Okay, they sent status 100, so we can send our
- // gigantic and/or sensitive "foo" payload now.
- st.writeData(1, true, []byte(msg))
-
- st.wantWindowUpdate(0, uint32(len(msg)))
-
- hf = st.wantHeaders()
- if hf.StreamEnded() {
- t.Fatal("expected data to follow")
- }
- if !hf.HeadersEnded() {
- t.Fatal("want END_HEADERS flag")
- }
- goth = decodeHeader(t, hf.HeaderBlockFragment())
- wanth = [][2]string{
- {":status", "200"},
- {"content-type", "text/plain; charset=utf-8"},
- {"content-length", strconv.Itoa(len(reply))},
- }
- if !reflect.DeepEqual(goth, wanth) {
- t.Errorf("Got headers %v; want %v", goth, wanth)
- }
-
- df := st.wantData()
- if string(df.Data()) != reply {
- t.Errorf("Client read %q; want %q", df.Data(), reply)
- }
- if !df.StreamEnded() {
- t.Errorf("expect data stream end")
- }
- })
-}
-
-func TestServer_HandlerWriteErrorOnDisconnect(t *testing.T) {
- errc := make(chan error, 1)
- testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
- p := []byte("some data.\n")
- for {
- _, err := w.Write(p)
- if err != nil {
- errc <- err
- return nil
- }
- }
- }, func(st *serverTester) {
- st.writeHeaders(HeadersFrameParam{
- StreamID: 1,
- BlockFragment: st.encodeHeader(),
- EndStream: false,
- EndHeaders: true,
- })
- hf := st.wantHeaders()
- if hf.StreamEnded() {
- t.Fatal("unexpected END_STREAM flag")
- }
- if !hf.HeadersEnded() {
- t.Fatal("want END_HEADERS flag")
- }
- // Close the connection and wait for the handler to (hopefully) notice.
- st.cc.Close()
- select {
- case <-errc:
- case <-time.After(5 * time.Second):
- t.Error("timeout")
- }
- })
-}
-
-func TestServer_Rejects_Too_Many_Streams(t *testing.T) {
- const testPath = "/some/path"
-
- inHandler := make(chan uint32)
- leaveHandler := make(chan bool)
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
- id := w.(*responseWriter).rws.stream.id
- inHandler <- id
- if id == 1+(defaultMaxStreams+1)*2 && r.URL.Path != testPath {
- t.Errorf("decoded final path as %q; want %q", r.URL.Path, testPath)
- }
- <-leaveHandler
- })
- defer st.Close()
- st.greet()
- nextStreamID := uint32(1)
- streamID := func() uint32 {
- defer func() { nextStreamID += 2 }()
- return nextStreamID
- }
- sendReq := func(id uint32, headers ...string) {
- st.writeHeaders(HeadersFrameParam{
- StreamID: id,
- BlockFragment: st.encodeHeader(headers...),
- EndStream: true,
- EndHeaders: true,
- })
- }
- for i := 0; i < defaultMaxStreams; i++ {
- sendReq(streamID())
- <-inHandler
- }
- defer func() {
- for i := 0; i < defaultMaxStreams; i++ {
- leaveHandler <- true
- }
- }()
-
- // And this one should cross the limit:
- // (It's also sent as a CONTINUATION, to verify we still track the decoder context,
- // even if we're rejecting it)
- rejectID := streamID()
- headerBlock := st.encodeHeader(":path", testPath)
- frag1, frag2 := headerBlock[:3], headerBlock[3:]
- st.writeHeaders(HeadersFrameParam{
- StreamID: rejectID,
- BlockFragment: frag1,
- EndStream: true,
- EndHeaders: false, // CONTINUATION coming
- })
- if err := st.fr.WriteContinuation(rejectID, true, frag2); err != nil {
- t.Fatal(err)
- }
- st.wantRSTStream(rejectID, ErrCodeProtocol)
-
- // But let a handler finish:
- leaveHandler <- true
- st.wantHeaders()
-
- // And now another stream should be able to start:
- goodID := streamID()
- sendReq(goodID, ":path", testPath)
- select {
- case got := <-inHandler:
- if got != goodID {
- t.Errorf("Got stream %d; want %d", got, goodID)
- }
- case <-time.After(3 * time.Second):
- t.Error("timeout waiting for handler")
- }
-}
-
-// So many response headers that the server needs to use CONTINUATION frames:
-func TestServer_Response_ManyHeaders_With_Continuation(t *testing.T) {
- testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
- h := w.Header()
- for i := 0; i < 5000; i++ {
- h.Set(fmt.Sprintf("x-header-%d", i), fmt.Sprintf("x-value-%d", i))
- }
- return nil
- }, func(st *serverTester) {
- getSlash(st)
- hf := st.wantHeaders()
- if hf.HeadersEnded() {
- t.Fatal("got unwanted END_HEADERS flag")
- }
- n := 0
- for {
- n++
- cf := st.wantContinuation()
- if cf.HeadersEnded() {
- break
- }
- }
- if n < 5 {
- t.Errorf("Only got %d CONTINUATION frames; expected 5+ (currently 6)", n)
- }
- })
-}
-
-// This previously crashed (reported by Mathieu Lonjaret as observed
-// while using Camlistore) because we got a DATA frame from the client
-// after the handler exited and our logic at the time was wrong,
-// keeping a stream in the map in stateClosed, which tickled an
-// invariant check later when we tried to remove that stream (via
-// defer sc.closeAllStreamsOnConnClose) when the serverConn serve loop
-// ended.
-func TestServer_NoCrash_HandlerClose_Then_ClientClose(t *testing.T) {
- testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
- // nothing
- return nil
- }, func(st *serverTester) {
- st.writeHeaders(HeadersFrameParam{
- StreamID: 1,
- BlockFragment: st.encodeHeader(),
- EndStream: false, // DATA is coming
- EndHeaders: true,
- })
- hf := st.wantHeaders()
- if !hf.HeadersEnded() || !hf.StreamEnded() {
- t.Fatalf("want END_HEADERS+END_STREAM, got %v", hf)
- }
-
- // Sent when the a Handler closes while a client has
- // indicated it's still sending DATA:
- st.wantRSTStream(1, ErrCodeCancel)
-
- // Now the handler has ended, so it's ended its
- // stream, but the client hasn't closed its side
- // (stateClosedLocal). So send more data and verify
- // it doesn't crash with an internal invariant panic, like
- // it did before.
- st.writeData(1, true, []byte("foo"))
-
- // Sent after a peer sends data anyway (admittedly the
- // previous RST_STREAM might've still been in-flight),
- // but they'll get the more friendly 'cancel' code
- // first.
- st.wantRSTStream(1, ErrCodeStreamClosed)
-
- // Set up a bunch of machinery to record the panic we saw
- // previously.
- var (
- panMu sync.Mutex
- panicVal interface{}
- )
-
- testHookOnPanicMu.Lock()
- testHookOnPanic = func(sc *serverConn, pv interface{}) bool {
- panMu.Lock()
- panicVal = pv
- panMu.Unlock()
- return true
- }
- testHookOnPanicMu.Unlock()
-
- // Now force the serve loop to end, via closing the connection.
- st.cc.Close()
- select {
- case <-st.sc.doneServing:
- // Loop has exited.
- panMu.Lock()
- got := panicVal
- panMu.Unlock()
- if got != nil {
- t.Errorf("Got panic: %v", got)
- }
- case <-time.After(5 * time.Second):
- t.Error("timeout")
- }
- })
-}
-
-func TestServer_Rejects_TLS10(t *testing.T) { testRejectTLS(t, tls.VersionTLS10) }
-func TestServer_Rejects_TLS11(t *testing.T) { testRejectTLS(t, tls.VersionTLS11) }
-
-func testRejectTLS(t *testing.T, max uint16) {
- st := newServerTester(t, nil, func(c *tls.Config) {
- c.MaxVersion = max
- })
- defer st.Close()
- gf := st.wantGoAway()
- if got, want := gf.ErrCode, ErrCodeInadequateSecurity; got != want {
- t.Errorf("Got error code %v; want %v", got, want)
- }
-}
-
-func TestServer_Rejects_TLSBadCipher(t *testing.T) {
- st := newServerTester(t, nil, func(c *tls.Config) {
- // Only list bad ones:
- c.CipherSuites = []uint16{
- tls.TLS_RSA_WITH_RC4_128_SHA,
- tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
- tls.TLS_RSA_WITH_AES_128_CBC_SHA,
- tls.TLS_RSA_WITH_AES_256_CBC_SHA,
- tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
- tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
- tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
- tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA,
- tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
- tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
- tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
- }
- })
- defer st.Close()
- gf := st.wantGoAway()
- if got, want := gf.ErrCode, ErrCodeInadequateSecurity; got != want {
- t.Errorf("Got error code %v; want %v", got, want)
- }
-}
-
-func TestServer_Advertises_Common_Cipher(t *testing.T) {
- const requiredSuite = tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
- st := newServerTester(t, nil, func(c *tls.Config) {
- // Have the client only support the one required by the spec.
- c.CipherSuites = []uint16{requiredSuite}
- }, func(ts *httptest.Server) {
- var srv *http.Server = ts.Config
- // Have the server configured with one specific cipher suite
- // which is banned. This tests that ConfigureServer ends up
- // adding the good one to this list.
- srv.TLSConfig = &tls.Config{
- CipherSuites: []uint16{tls.TLS_RSA_WITH_AES_128_CBC_SHA}, // just a banned one
- }
- })
- defer st.Close()
- st.greet()
-}
-
-// TODO: move this onto *serverTester, and re-use the same hpack
-// decoding context throughout. We're just getting lucky here with
-// creating a new decoder each time.
-func decodeHeader(t *testing.T, headerBlock []byte) (pairs [][2]string) {
- d := hpack.NewDecoder(initialHeaderTableSize, func(f hpack.HeaderField) {
- pairs = append(pairs, [2]string{f.Name, f.Value})
- })
- if _, err := d.Write(headerBlock); err != nil {
- t.Fatalf("hpack decoding error: %v", err)
- }
- if err := d.Close(); err != nil {
- t.Fatalf("hpack decoding error: %v", err)
- }
- return
-}
-
-// testServerResponse sets up an idle HTTP/2 connection and lets you
-// write a single request with writeReq, and then reply to it in some way with the provided handler,
-// and then verify the output with the serverTester again (assuming the handler returns nil)
-func testServerResponse(t testing.TB,
- handler func(http.ResponseWriter, *http.Request) error,
- client func(*serverTester),
-) {
- errc := make(chan error, 1)
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
- if r.Body == nil {
- t.Fatal("nil Body")
- }
- errc <- handler(w, r)
- })
- defer st.Close()
-
- donec := make(chan bool)
- go func() {
- defer close(donec)
- st.greet()
- client(st)
- }()
-
- select {
- case <-donec:
- return
- case <-time.After(5 * time.Second):
- t.Fatal("timeout")
- }
-
- select {
- case err := <-errc:
- if err != nil {
- t.Fatalf("Error in handler: %v", err)
- }
- case <-time.After(2 * time.Second):
- t.Error("timeout waiting for handler to finish")
- }
-}
-
-// readBodyHandler returns an http Handler func that reads len(want)
-// bytes from r.Body and fails t if the contents read were not
-// the value of want.
-func readBodyHandler(t *testing.T, want string) func(w http.ResponseWriter, r *http.Request) {
- return func(w http.ResponseWriter, r *http.Request) {
- buf := make([]byte, len(want))
- _, err := io.ReadFull(r.Body, buf)
- if err != nil {
- t.Error(err)
- return
- }
- if string(buf) != want {
- t.Errorf("read %q; want %q", buf, want)
- }
- }
-}
-
-// TestServerWithCurl currently fails, hence the LenientCipherSuites test. See:
-// https://github.com/tatsuhiro-t/nghttp2/issues/140 &
-// http://sourceforge.net/p/curl/bugs/1472/
-func TestServerWithCurl(t *testing.T) { testServerWithCurl(t, false) }
-func TestServerWithCurl_LenientCipherSuites(t *testing.T) { testServerWithCurl(t, true) }
-
-func testServerWithCurl(t *testing.T, permitProhibitedCipherSuites bool) {
- if runtime.GOOS != "linux" {
- t.Skip("skipping Docker test when not on Linux; requires --net which won't work with boot2docker anyway")
- }
- requireCurl(t)
- const msg = "Hello from curl!\n"
- ts := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- w.Header().Set("Foo", "Bar")
- w.Header().Set("Client-Proto", r.Proto)
- io.WriteString(w, msg)
- }))
- ConfigureServer(ts.Config, &Server{
- PermitProhibitedCipherSuites: permitProhibitedCipherSuites,
- })
- ts.TLS = ts.Config.TLSConfig // the httptest.Server has its own copy of this TLS config
- ts.StartTLS()
- defer ts.Close()
-
- var gotConn int32
- testHookOnConn = func() { atomic.StoreInt32(&gotConn, 1) }
-
- t.Logf("Running test server for curl to hit at: %s", ts.URL)
- container := curl(t, "--silent", "--http2", "--insecure", "-v", ts.URL)
- defer kill(container)
- resc := make(chan interface{}, 1)
- go func() {
- res, err := dockerLogs(container)
- if err != nil {
- resc <- err
- } else {
- resc <- res
- }
- }()
- select {
- case res := <-resc:
- if err, ok := res.(error); ok {
- t.Fatal(err)
- }
- if !strings.Contains(string(res.([]byte)), "foo: Bar") {
- t.Errorf("didn't see foo: Bar header")
- t.Logf("Got: %s", res)
- }
- if !strings.Contains(string(res.([]byte)), "client-proto: HTTP/2") {
- t.Errorf("didn't see client-proto: HTTP/2 header")
- t.Logf("Got: %s", res)
- }
- if !strings.Contains(string(res.([]byte)), msg) {
- t.Errorf("didn't see %q content", msg)
- t.Logf("Got: %s", res)
- }
- case <-time.After(3 * time.Second):
- t.Errorf("timeout waiting for curl")
- }
-
- if atomic.LoadInt32(&gotConn) == 0 {
- t.Error("never saw an http2 connection")
- }
-}
-
-func BenchmarkServerGets(b *testing.B) {
- b.ReportAllocs()
-
- const msg = "Hello, world"
- st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) {
- io.WriteString(w, msg)
- })
- defer st.Close()
- st.greet()
-
- // Give the server quota to reply. (plus it has the the 64KB)
- if err := st.fr.WriteWindowUpdate(0, uint32(b.N*len(msg))); err != nil {
- b.Fatal(err)
- }
-
- for i := 0; i < b.N; i++ {
- id := 1 + uint32(i)*2
- st.writeHeaders(HeadersFrameParam{
- StreamID: id,
- BlockFragment: st.encodeHeader(),
- EndStream: true,
- EndHeaders: true,
- })
- st.wantHeaders()
- df := st.wantData()
- if !df.StreamEnded() {
- b.Fatalf("DATA didn't have END_STREAM; got %v", df)
- }
- }
-}
-
-func BenchmarkServerPosts(b *testing.B) {
- b.ReportAllocs()
-
- const msg = "Hello, world"
- st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) {
- io.WriteString(w, msg)
- })
- defer st.Close()
- st.greet()
-
- // Give the server quota to reply. (plus it has the the 64KB)
- if err := st.fr.WriteWindowUpdate(0, uint32(b.N*len(msg))); err != nil {
- b.Fatal(err)
- }
-
- for i := 0; i < b.N; i++ {
- id := 1 + uint32(i)*2
- st.writeHeaders(HeadersFrameParam{
- StreamID: id,
- BlockFragment: st.encodeHeader(":method", "POST"),
- EndStream: false,
- EndHeaders: true,
- })
- st.writeData(id, true, nil)
- st.wantHeaders()
- df := st.wantData()
- if !df.StreamEnded() {
- b.Fatalf("DATA didn't have END_STREAM; got %v", df)
- }
- }
-}
diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/testdata/draft-ietf-httpbis-http2.xml b/Godeps/_workspace/src/github.com/bradfitz/http2/testdata/draft-ietf-httpbis-http2.xml
deleted file mode 100644
index 31a84bed4f..0000000000
--- a/Godeps/_workspace/src/github.com/bradfitz/http2/testdata/draft-ietf-httpbis-http2.xml
+++ /dev/null
@@ -1,5021 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- Hypertext Transfer Protocol version 2
-
-
- Twist
-
- mbelshe@chromium.org
-
-
-
-
- Google, Inc
-
- fenix@google.com
-
-
-
-
- Mozilla
-
-
- 331 E Evelyn Street
- Mountain View
- CA
- 94041
- US
-
- martin.thomson@gmail.com
-
-
-
-
- Applications
- HTTPbis
- HTTP
- SPDY
- Web
-
-
-
- This specification describes an optimized expression of the semantics of the Hypertext
- Transfer Protocol (HTTP). HTTP/2 enables a more efficient use of network resources and a
- reduced perception of latency by introducing header field compression and allowing multiple
- concurrent messages on the same connection. It also introduces unsolicited push of
- representations from servers to clients.
-
-
- This specification is an alternative to, but does not obsolete, the HTTP/1.1 message syntax.
- HTTP's existing semantics remain unchanged.
-
-
-
-
-
- Discussion of this draft takes place on the HTTPBIS working group mailing list
- (ietf-http-wg@w3.org), which is archived at .
-
-
- Working Group information can be found at ; that specific to HTTP/2 are at .
-
-
- The changes in this draft are summarized in .
-
-
-
-
-
-
-
-
-
- The Hypertext Transfer Protocol (HTTP) is a wildly successful protocol. However, the
- HTTP/1.1 message format () has
- several characteristics that have a negative overall effect on application performance
- today.
-
-
- In particular, HTTP/1.0 allowed only one request to be outstanding at a time on a given
- TCP connection. HTTP/1.1 added request pipelining, but this only partially addressed
- request concurrency and still suffers from head-of-line blocking. Therefore, HTTP/1.1
- clients that need to make many requests typically use multiple connections to a server in
- order to achieve concurrency and thereby reduce latency.
-
-
- Furthermore, HTTP header fields are often repetitive and verbose, causing unnecessary
- network traffic, as well as causing the initial TCP congestion
- window to quickly fill. This can result in excessive latency when multiple requests are
- made on a new TCP connection.
-
-
- HTTP/2 addresses these issues by defining an optimized mapping of HTTP's semantics to an
- underlying connection. Specifically, it allows interleaving of request and response
- messages on the same connection and uses an efficient coding for HTTP header fields. It
- also allows prioritization of requests, letting more important requests complete more
- quickly, further improving performance.
-
-
- The resulting protocol is more friendly to the network, because fewer TCP connections can
- be used in comparison to HTTP/1.x. This means less competition with other flows, and
- longer-lived connections, which in turn leads to better utilization of available network
- capacity.
-
-
- Finally, HTTP/2 also enables more efficient processing of messages through use of binary
- message framing.
-
-
-
-
-
- HTTP/2 provides an optimized transport for HTTP semantics. HTTP/2 supports all of the core
- features of HTTP/1.1, but aims to be more efficient in several ways.
-
-
- The basic protocol unit in HTTP/2 is a frame. Each frame
- type serves a different purpose. For example, HEADERS and
- DATA frames form the basis of HTTP requests and
- responses; other frame types like SETTINGS,
- WINDOW_UPDATE, and PUSH_PROMISE are used in support of other
- HTTP/2 features.
-
-
- Multiplexing of requests is achieved by having each HTTP request-response exchange
- associated with its own stream. Streams are largely
- independent of each other, so a blocked or stalled request or response does not prevent
- progress on other streams.
-
-
- Flow control and prioritization ensure that it is possible to efficiently use multiplexed
- streams. Flow control helps to ensure that only data that
- can be used by a receiver is transmitted. Prioritization ensures that limited resources can be directed
- to the most important streams first.
-
-
- HTTP/2 adds a new interaction mode, whereby a server can push
- responses to a client. Server push allows a server to speculatively send a client
- data that the server anticipates the client will need, trading off some network usage
- against a potential latency gain. The server does this by synthesizing a request, which it
- sends as a PUSH_PROMISE frame. The server is then able to send a response to
- the synthetic request on a separate stream.
-
-
- Frames that contain HTTP header fields are compressed.
- HTTP requests can be highly redundant, so compression can reduce the size of requests and
- responses significantly.
-
-
-
-
- The HTTP/2 specification is split into four parts:
-
-
- Starting HTTP/2 covers how an HTTP/2 connection is
- initiated.
-
-
- The framing and streams layers describe the way HTTP/2 frames are
- structured and formed into multiplexed streams.
-
-
- Frame and error
- definitions include details of the frame and error types used in HTTP/2.
-
-
- HTTP mappings and additional
- requirements describe how HTTP semantics are expressed using frames and
- streams.
-
-
-
-
- While some of the frame and stream layer concepts are isolated from HTTP, this
- specification does not define a completely generic framing layer. The framing and streams
- layers are tailored to the needs of the HTTP protocol and server push.
-
-
-
-
-
- The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD", "SHOULD
- NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this document are to be interpreted as
- described in RFC 2119.
-
-
- All numeric values are in network byte order. Values are unsigned unless otherwise
- indicated. Literal values are provided in decimal or hexadecimal as appropriate.
- Hexadecimal literals are prefixed with 0x to distinguish them
- from decimal literals.
-
-
- The following terms are used:
-
-
- The endpoint initiating the HTTP/2 connection.
-
-
- A transport-layer connection between two endpoints.
-
-
- An error that affects the entire HTTP/2 connection.
-
-
- Either the client or server of the connection.
-
-
- The smallest unit of communication within an HTTP/2 connection, consisting of a header
- and a variable-length sequence of octets structured according to the frame type.
-
-
- An endpoint. When discussing a particular endpoint, "peer" refers to the endpoint
- that is remote to the primary subject of discussion.
-
-
- An endpoint that is receiving frames.
-
-
- An endpoint that is transmitting frames.
-
-
- The endpoint which did not initiate the HTTP/2 connection.
-
-
- A bi-directional flow of frames across a virtual channel within the HTTP/2 connection.
-
-
- An error on the individual HTTP/2 stream.
-
-
-
-
- Finally, the terms "gateway", "intermediary", "proxy", and "tunnel" are defined
- in .
-
-
-
-
-
-
- An HTTP/2 connection is an application layer protocol running on top of a TCP connection
- (). The client is the TCP connection initiator.
-
-
- HTTP/2 uses the same "http" and "https" URI schemes used by HTTP/1.1. HTTP/2 shares the same
- default port numbers: 80 for "http" URIs and 443 for "https" URIs. As a result,
- implementations processing requests for target resource URIs like http://example.org/foo or https://example.com/bar are required to first discover whether the
- upstream server (the immediate peer to which the client wishes to establish a connection)
- supports HTTP/2.
-
-
-
- The means by which support for HTTP/2 is determined is different for "http" and "https"
- URIs. Discovery for "http" URIs is described in . Discovery
- for "https" URIs is described in .
-
-
-
-
- The protocol defined in this document has two identifiers.
-
-
-
- The string "h2" identifies the protocol where HTTP/2 uses TLS. This identifier is used in the TLS application layer protocol negotiation extension (ALPN)
- field and any place that HTTP/2 over TLS is identified.
-
-
- The "h2" string is serialized into an ALPN protocol identifier as the two octet
- sequence: 0x68, 0x32.
-
-
-
-
- The string "h2c" identifies the protocol where HTTP/2 is run over cleartext TCP.
- This identifier is used in the HTTP/1.1 Upgrade header field and any place that
- HTTP/2 over TCP is identified.
-
-
-
-
-
- Negotiating "h2" or "h2c" implies the use of the transport, security, framing and message
- semantics described in this document.
-
-
- RFC Editor's Note: please remove the remainder of this section prior to the
- publication of a final version of this document.
-
-
- Only implementations of the final, published RFC can identify themselves as "h2" or "h2c".
- Until such an RFC exists, implementations MUST NOT identify themselves using these
- strings.
-
-
- Examples and text throughout the rest of this document use "h2" as a matter of
- editorial convenience only. Implementations of draft versions MUST NOT identify using
- this string.
-
-
- Implementations of draft versions of the protocol MUST add the string "-" and the
- corresponding draft number to the identifier. For example, draft-ietf-httpbis-http2-11
- over TLS is identified using the string "h2-11".
-
-
- Non-compatible experiments that are based on these draft versions MUST append the string
- "-" and an experiment name to the identifier. For example, an experimental implementation
- of packet mood-based encoding based on draft-ietf-httpbis-http2-09 might identify itself
- as "h2-09-emo". Note that any label MUST conform to the "token" syntax defined in
- . Experimenters are
- encouraged to coordinate their experiments on the ietf-http-wg@w3.org mailing list.
-
-
-
-
-
- A client that makes a request for an "http" URI without prior knowledge about support for
- HTTP/2 uses the HTTP Upgrade mechanism (). The client makes an HTTP/1.1 request that includes an Upgrade
- header field identifying HTTP/2 with the "h2c" token. The HTTP/1.1 request MUST include
- exactly one HTTP2-Settings header field.
-
-
- For example:
-
-
-]]>
-
-
- Requests that contain an entity body MUST be sent in their entirety before the client can
- send HTTP/2 frames. This means that a large request entity can block the use of the
- connection until it is completely sent.
-
-
- If concurrency of an initial request with subsequent requests is important, an OPTIONS
- request can be used to perform the upgrade to HTTP/2, at the cost of an additional
- round-trip.
-
-
- A server that does not support HTTP/2 can respond to the request as though the Upgrade
- header field were absent:
-
-
-
-HTTP/1.1 200 OK
-Content-Length: 243
-Content-Type: text/html
-
-...
-
-
-
- A server MUST ignore a "h2" token in an Upgrade header field. Presence of a token with
- "h2" implies HTTP/2 over TLS, which is instead negotiated as described in .
-
-
- A server that supports HTTP/2 can accept the upgrade with a 101 (Switching Protocols)
- response. After the empty line that terminates the 101 response, the server can begin
- sending HTTP/2 frames. These frames MUST include a response to the request that initiated
- the Upgrade.
-
-
-
-
- For example:
-
-
-HTTP/1.1 101 Switching Protocols
-Connection: Upgrade
-Upgrade: h2c
-
-[ HTTP/2 connection ...
-
-
-
- The first HTTP/2 frame sent by the server is a SETTINGS frame () as the server connection preface (). Upon receiving the 101 response, the client sends a connection preface, which includes a
- SETTINGS frame.
-
-
- The HTTP/1.1 request that is sent prior to upgrade is assigned stream identifier 1 and is
- assigned default priority values. Stream 1 is
- implicitly half closed from the client toward the server, since the request is completed
- as an HTTP/1.1 request. After commencing the HTTP/2 connection, stream 1 is used for the
- response.
-
-
-
-
- A request that upgrades from HTTP/1.1 to HTTP/2 MUST include exactly one HTTP2-Settings header field. The HTTP2-Settings header field is a connection-specific header field
- that includes parameters that govern the HTTP/2 connection, provided in anticipation of
- the server accepting the request to upgrade.
-
-
-
-
-
- A server MUST NOT upgrade the connection to HTTP/2 if this header field is not present,
- or if more than one is present. A server MUST NOT send this header field.
-
-
-
- The content of the HTTP2-Settings header field is the
- payload of a SETTINGS frame (), encoded as a
- base64url string (that is, the URL- and filename-safe Base64 encoding described in , with any trailing '=' characters omitted). The
- ABNF production for token68 is
- defined in .
-
-
- Since the upgrade is only intended to apply to the immediate connection, a client
- sending HTTP2-Settings MUST also send HTTP2-Settings as a connection option in the Connection header field to prevent it from being forwarded
- downstream.
-
-
- A server decodes and interprets these values as it would any other
- SETTINGS frame. Acknowledgement of the
- SETTINGS parameters is not necessary, since a 101 response serves as implicit
- acknowledgment. Providing these values in the Upgrade request gives a client an
- opportunity to provide parameters prior to receiving any frames from the server.
-
-
-
-
-
-
- A client that makes a request to an "https" URI uses TLS
- with the application layer protocol negotiation extension.
-
-
- HTTP/2 over TLS uses the "h2" application token. The "h2c" token MUST NOT be sent by a
- client or selected by a server.
-
-
- Once TLS negotiation is complete, both the client and the server send a connection preface.
-
-
-
-
-
- A client can learn that a particular server supports HTTP/2 by other means. For example,
- describes a mechanism for advertising this capability.
-
-
- A client MAY immediately send HTTP/2 frames to a server that is known to support HTTP/2,
- after the connection preface; a server can
- identify such a connection by the presence of the connection preface. This only affects
- the establishment of HTTP/2 connections over cleartext TCP; implementations that support
- HTTP/2 over TLS MUST use protocol negotiation in TLS.
-
-
- Without additional information, prior support for HTTP/2 is not a strong signal that a
- given server will support HTTP/2 for future connections. For example, it is possible for
- server configurations to change, for configurations to differ between instances in
- clustered servers, or for network conditions to change.
-
-
-
-
-
- Upon establishment of a TCP connection and determination that HTTP/2 will be used by both
- peers, each endpoint MUST send a connection preface as a final confirmation and to
- establish the initial SETTINGS parameters for the HTTP/2 connection. The client and
- server each send a different connection preface.
-
-
- The client connection preface starts with a sequence of 24 octets, which in hex notation
- are:
-
-
-
-
-
- (the string PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n). This sequence
- is followed by a SETTINGS frame (). The
- SETTINGS frame MAY be empty. The client sends the client connection
- preface immediately upon receipt of a 101 Switching Protocols response (indicating a
- successful upgrade), or as the first application data octets of a TLS connection. If
- starting an HTTP/2 connection with prior knowledge of server support for the protocol, the
- client connection preface is sent upon connection establishment.
-
-
-
-
- The client connection preface is selected so that a large proportion of HTTP/1.1 or
- HTTP/1.0 servers and intermediaries do not attempt to process further frames. Note
- that this does not address the concerns raised in .
-
-
-
-
- The server connection preface consists of a potentially empty SETTINGS
- frame () that MUST be the first frame the server sends in the
- HTTP/2 connection.
-
-
- The SETTINGS frames received from a peer as part of the connection preface
- MUST be acknowledged (see ) after sending the connection
- preface.
-
-
- To avoid unnecessary latency, clients are permitted to send additional frames to the
- server immediately after sending the client connection preface, without waiting to receive
- the server connection preface. It is important to note, however, that the server
- connection preface SETTINGS frame might include parameters that necessarily
- alter how a client is expected to communicate with the server. Upon receiving the
- SETTINGS frame, the client is expected to honor any parameters established.
- In some configurations, it is possible for the server to transmit SETTINGS
- before the client sends additional frames, providing an opportunity to avoid this issue.
-
-
- Clients and servers MUST treat an invalid connection preface as a connection error of type
- PROTOCOL_ERROR. A GOAWAY frame ()
- MAY be omitted in this case, since an invalid preface indicates that the peer is not using
- HTTP/2.
-
-
-
-
-
-
- Once the HTTP/2 connection is established, endpoints can begin exchanging frames.
-
-
-
-
- All frames begin with a fixed 9-octet header followed by a variable-length payload.
-
-
-
-
-
- The fields of the frame header are defined as:
-
-
-
- The length of the frame payload expressed as an unsigned 24-bit integer. Values
- greater than 214 (16,384) MUST NOT be sent unless the receiver has
- set a larger value for SETTINGS_MAX_FRAME_SIZE.
-
-
- The 9 octets of the frame header are not included in this value.
-
-
-
-
- The 8-bit type of the frame. The frame type determines the format and semantics of
- the frame. Implementations MUST ignore and discard any frame that has a type that
- is unknown.
-
-
-
-
- An 8-bit field reserved for frame-type specific boolean flags.
-
-
- Flags are assigned semantics specific to the indicated frame type. Flags that have
- no defined semantics for a particular frame type MUST be ignored, and MUST be left
- unset (0) when sending.
-
-
-
-
- A reserved 1-bit field. The semantics of this bit are undefined and the bit MUST
- remain unset (0) when sending and MUST be ignored when receiving.
-
-
-
-
- A 31-bit stream identifier (see ). The value 0 is
- reserved for frames that are associated with the connection as a whole as opposed to
- an individual stream.
-
-
-
-
-
- The structure and content of the frame payload is dependent entirely on the frame type.
-
-
-
-
-
- The size of a frame payload is limited by the maximum size that a receiver advertises in
- the SETTINGS_MAX_FRAME_SIZE setting. This setting can have any value
- between 214 (16,384) and 224-1 (16,777,215) octets,
- inclusive.
-
-
- All implementations MUST be capable of receiving and minimally processing frames up to
- 214 octets in length, plus the 9 octet frame
- header. The size of the frame header is not included when describing frame sizes.
-
-
- Certain frame types, such as PING, impose additional limits
- on the amount of payload data allowed.
-
-
-
-
- If a frame size exceeds any defined limit, or is too small to contain mandatory frame
- data, the endpoint MUST send a FRAME_SIZE_ERROR error. A frame size error
- in a frame that could alter the state of the entire connection MUST be treated as a connection error; this includes any frame carrying
- a header block (that is, HEADERS,
- PUSH_PROMISE, and CONTINUATION), SETTINGS,
- and any WINDOW_UPDATE frame with a stream identifier of 0.
-
-
- Endpoints are not obligated to use all available space in a frame. Responsiveness can be
- improved by using frames that are smaller than the permitted maximum size. Sending large
- frames can result in delays in sending time-sensitive frames (such
- RST_STREAM, WINDOW_UPDATE, or PRIORITY)
- which if blocked by the transmission of a large frame, could affect performance.
-
-
-
-
-
- Just as in HTTP/1, a header field in HTTP/2 is a name with one or more associated values.
- They are used within HTTP request and response messages as well as server push operations
- (see ).
-
-
- Header lists are collections of zero or more header fields. When transmitted over a
- connection, a header list is serialized into a header block using HTTP Header Compression. The serialized header block is then
- divided into one or more octet sequences, called header block fragments, and transmitted
- within the payload of HEADERS, PUSH_PROMISE or CONTINUATION frames.
-
-
- The Cookie header field is treated specially by the HTTP
- mapping (see ).
-
-
- A receiving endpoint reassembles the header block by concatenating its fragments, then
- decompresses the block to reconstruct the header list.
-
-
- A complete header block consists of either:
-
-
- a single HEADERS or PUSH_PROMISE frame,
- with the END_HEADERS flag set, or
-
-
- a HEADERS or PUSH_PROMISE frame with the END_HEADERS
- flag cleared and one or more CONTINUATION frames,
- where the last CONTINUATION frame has the END_HEADERS flag set.
-
-
-
-
- Header compression is stateful. One compression context and one decompression context is
- used for the entire connection. Each header block is processed as a discrete unit.
- Header blocks MUST be transmitted as a contiguous sequence of frames, with no interleaved
- frames of any other type or from any other stream. The last frame in a sequence of
- HEADERS or CONTINUATION frames MUST have the END_HEADERS
- flag set. The last frame in a sequence of PUSH_PROMISE or
- CONTINUATION frames MUST have the END_HEADERS flag set. This allows a
- header block to be logically equivalent to a single frame.
-
-
- Header block fragments can only be sent as the payload of HEADERS,
- PUSH_PROMISE or CONTINUATION frames, because these frames
- carry data that can modify the compression context maintained by a receiver. An endpoint
- receiving HEADERS, PUSH_PROMISE or
- CONTINUATION frames MUST reassemble header blocks and perform decompression
- even if the frames are to be discarded. A receiver MUST terminate the connection with a
- connection error of type
- COMPRESSION_ERROR if it does not decompress a header block.
-
-
-
-
-
-
- A "stream" is an independent, bi-directional sequence of frames exchanged between the client
- and server within an HTTP/2 connection. Streams have several important characteristics:
-
-
- A single HTTP/2 connection can contain multiple concurrently open streams, with either
- endpoint interleaving frames from multiple streams.
-
-
- Streams can be established and used unilaterally or shared by either the client or
- server.
-
-
- Streams can be closed by either endpoint.
-
-
- The order in which frames are sent on a stream is significant. Recipients process frames
- in the order they are received. In particular, the order of HEADERS,
- and DATA frames is semantically significant.
-
-
- Streams are identified by an integer. Stream identifiers are assigned to streams by the
- endpoint initiating the stream.
-
-
-
-
-
-
- The lifecycle of a stream is shown in .
-
-
-
-
- | |<-----------' |
- | R | closed | R |
- `-------------------->| |<--------------------'
- +--------+
-
- H: HEADERS frame (with implied CONTINUATIONs)
- PP: PUSH_PROMISE frame (with implied CONTINUATIONs)
- ES: END_STREAM flag
- R: RST_STREAM frame
-]]>
-
-
-
-
- Note that this diagram shows stream state transitions and the frames and flags that affect
- those transitions only. In this regard, CONTINUATION frames do not result
- in state transitions; they are effectively part of the HEADERS or
- PUSH_PROMISE that they follow. For this purpose, the END_STREAM flag is
- processed as a separate event to the frame that bears it; a HEADERS frame
- with the END_STREAM flag set can cause two state transitions.
-
-
- Both endpoints have a subjective view of the state of a stream that could be different
- when frames are in transit. Endpoints do not coordinate the creation of streams; they are
- created unilaterally by either endpoint. The negative consequences of a mismatch in
- states are limited to the "closed" state after sending RST_STREAM, where
- frames might be received for some time after closing.
-
-
- Streams have the following states:
-
-
-
-
-
- All streams start in the "idle" state. In this state, no frames have been
- exchanged.
-
-
- The following transitions are valid from this state:
-
-
- Sending or receiving a HEADERS frame causes the stream to become
- "open". The stream identifier is selected as described in . The same HEADERS frame can also
- cause a stream to immediately become "half closed".
-
-
- Sending a PUSH_PROMISE frame marks the associated stream for
- later use. The stream state for the reserved stream transitions to "reserved
- (local)".
-
-
- Receiving a PUSH_PROMISE frame marks the associated stream as
- reserved by the remote peer. The state of the stream becomes "reserved
- (remote)".
-
-
-
-
- Receiving any frames other than HEADERS or
- PUSH_PROMISE on a stream in this state MUST be treated as a connection error of type
- PROTOCOL_ERROR.
-
-
-
-
-
-
- A stream in the "reserved (local)" state is one that has been promised by sending a
- PUSH_PROMISE frame. A PUSH_PROMISE frame reserves an
- idle stream by associating the stream with an open stream that was initiated by the
- remote peer (see ).
-
-
- In this state, only the following transitions are possible:
-
-
- The endpoint can send a HEADERS frame. This causes the stream to
- open in a "half closed (remote)" state.
-
-
- Either endpoint can send a RST_STREAM frame to cause the stream
- to become "closed". This releases the stream reservation.
-
-
-
-
- An endpoint MUST NOT send any type of frame other than HEADERS or
- RST_STREAM in this state.
-
-
- A PRIORITY frame MAY be received in this state. Receiving any type
- of frame other than RST_STREAM or PRIORITY on a stream
- in this state MUST be treated as a connection
- error of type PROTOCOL_ERROR.
-
-
-
-
-
-
- A stream in the "reserved (remote)" state has been reserved by a remote peer.
-
-
- In this state, only the following transitions are possible:
-
-
- Receiving a HEADERS frame causes the stream to transition to
- "half closed (local)".
-
-
- Either endpoint can send a RST_STREAM frame to cause the stream
- to become "closed". This releases the stream reservation.
-
-
-
-
- An endpoint MAY send a PRIORITY frame in this state to reprioritize
- the reserved stream. An endpoint MUST NOT send any type of frame other than
- RST_STREAM, WINDOW_UPDATE, or PRIORITY
- in this state.
-
-
- Receiving any type of frame other than HEADERS or
- RST_STREAM on a stream in this state MUST be treated as a connection error of type
- PROTOCOL_ERROR.
-
-
-
-
-
-
- A stream in the "open" state may be used by both peers to send frames of any type.
- In this state, sending peers observe advertised stream
- level flow control limits.
-
-
- From this state either endpoint can send a frame with an END_STREAM flag set, which
- causes the stream to transition into one of the "half closed" states: an endpoint
- sending an END_STREAM flag causes the stream state to become "half closed (local)";
- an endpoint receiving an END_STREAM flag causes the stream state to become "half
- closed (remote)".
-
-
- Either endpoint can send a RST_STREAM frame from this state, causing
- it to transition immediately to "closed".
-
-
-
-
-
-
- A stream that is in the "half closed (local)" state cannot be used for sending
- frames. Only WINDOW_UPDATE, PRIORITY and
- RST_STREAM frames can be sent in this state.
-
-
- A stream transitions from this state to "closed" when a frame that contains an
- END_STREAM flag is received, or when either peer sends a RST_STREAM
- frame.
-
-
- A receiver can ignore WINDOW_UPDATE frames in this state, which might
- arrive for a short period after a frame bearing the END_STREAM flag is sent.
-
-
- PRIORITY frames received in this state are used to reprioritize
- streams that depend on the current stream.
-
-
-
-
-
-
- A stream that is "half closed (remote)" is no longer being used by the peer to send
- frames. In this state, an endpoint is no longer obligated to maintain a receiver
- flow control window if it performs flow control.
-
-
- If an endpoint receives additional frames for a stream that is in this state, other
- than WINDOW_UPDATE, PRIORITY or
- RST_STREAM, it MUST respond with a stream error of type
- STREAM_CLOSED.
-
-
- A stream that is "half closed (remote)" can be used by the endpoint to send frames
- of any type. In this state, the endpoint continues to observe advertised stream level flow control limits.
-
-
- A stream can transition from this state to "closed" by sending a frame that contains
- an END_STREAM flag, or when either peer sends a RST_STREAM frame.
-
-
-
-
-
-
- The "closed" state is the terminal state.
-
-
- An endpoint MUST NOT send frames other than PRIORITY on a closed
- stream. An endpoint that receives any frame other than PRIORITY
- after receiving a RST_STREAM MUST treat that as a stream error of type
- STREAM_CLOSED. Similarly, an endpoint that receives any frames after
- receiving a frame with the END_STREAM flag set MUST treat that as a connection error of type
- STREAM_CLOSED, unless the frame is permitted as described below.
-
-
- WINDOW_UPDATE or RST_STREAM frames can be received in
- this state for a short period after a DATA or HEADERS
- frame containing an END_STREAM flag is sent. Until the remote peer receives and
- processes RST_STREAM or the frame bearing the END_STREAM flag, it
- might send frames of these types. Endpoints MUST ignore
- WINDOW_UPDATE or RST_STREAM frames received in this
- state, though endpoints MAY choose to treat frames that arrive a significant time
- after sending END_STREAM as a connection
- error of type PROTOCOL_ERROR.
-
-
- PRIORITY frames can be sent on closed streams to prioritize streams
- that are dependent on the closed stream. Endpoints SHOULD process
- PRIORITY frame, though they can be ignored if the stream has been
- removed from the dependency tree (see ).
-
-
- If this state is reached as a result of sending a RST_STREAM frame,
- the peer that receives the RST_STREAM might have already sent - or
- enqueued for sending - frames on the stream that cannot be withdrawn. An endpoint
- MUST ignore frames that it receives on closed streams after it has sent a
- RST_STREAM frame. An endpoint MAY choose to limit the period over
- which it ignores frames and treat frames that arrive after this time as being in
- error.
-
-
- Flow controlled frames (i.e., DATA) received after sending
- RST_STREAM are counted toward the connection flow control window.
- Even though these frames might be ignored, because they are sent before the sender
- receives the RST_STREAM, the sender will consider the frames to count
- against the flow control window.
-
-
- An endpoint might receive a PUSH_PROMISE frame after it sends
- RST_STREAM. PUSH_PROMISE causes a stream to become
- "reserved" even if the associated stream has been reset. Therefore, a
- RST_STREAM is needed to close an unwanted promised stream.
-
-
-
-
-
- In the absence of more specific guidance elsewhere in this document, implementations
- SHOULD treat the receipt of a frame that is not expressly permitted in the description of
- a state as a connection error of type
- PROTOCOL_ERROR. Frame of unknown types are ignored.
-
-
- An example of the state transitions for an HTTP request/response exchange can be found in
- . An example of the state transitions for server push can be
- found in and .
-
-
-
-
- Streams are identified with an unsigned 31-bit integer. Streams initiated by a client
- MUST use odd-numbered stream identifiers; those initiated by the server MUST use
- even-numbered stream identifiers. A stream identifier of zero (0x0) is used for
- connection control messages; the stream identifier zero cannot be used to establish a
- new stream.
-
-
- HTTP/1.1 requests that are upgraded to HTTP/2 (see ) are
- responded to with a stream identifier of one (0x1). After the upgrade
- completes, stream 0x1 is "half closed (local)" to the client. Therefore, stream 0x1
- cannot be selected as a new stream identifier by a client that upgrades from HTTP/1.1.
-
-
- The identifier of a newly established stream MUST be numerically greater than all
- streams that the initiating endpoint has opened or reserved. This governs streams that
- are opened using a HEADERS frame and streams that are reserved using
- PUSH_PROMISE. An endpoint that receives an unexpected stream identifier
- MUST respond with a connection error of
- type PROTOCOL_ERROR.
-
-
- The first use of a new stream identifier implicitly closes all streams in the "idle"
- state that might have been initiated by that peer with a lower-valued stream identifier.
- For example, if a client sends a HEADERS frame on stream 7 without ever
- sending a frame on stream 5, then stream 5 transitions to the "closed" state when the
- first frame for stream 7 is sent or received.
-
-
- Stream identifiers cannot be reused. Long-lived connections can result in an endpoint
- exhausting the available range of stream identifiers. A client that is unable to
- establish a new stream identifier can establish a new connection for new streams. A
- server that is unable to establish a new stream identifier can send a
- GOAWAY frame so that the client is forced to open a new connection for
- new streams.
-
-
-
-
-
- A peer can limit the number of concurrently active streams using the
- SETTINGS_MAX_CONCURRENT_STREAMS parameter (see ) within a SETTINGS frame. The maximum concurrent
- streams setting is specific to each endpoint and applies only to the peer that receives
- the setting. That is, clients specify the maximum number of concurrent streams the
- server can initiate, and servers specify the maximum number of concurrent streams the
- client can initiate.
-
-
- Streams that are in the "open" state, or either of the "half closed" states count toward
- the maximum number of streams that an endpoint is permitted to open. Streams in any of
- these three states count toward the limit advertised in the
- SETTINGS_MAX_CONCURRENT_STREAMS setting. Streams in either of the
- "reserved" states do not count toward the stream limit.
-
-
- Endpoints MUST NOT exceed the limit set by their peer. An endpoint that receives a
- HEADERS frame that causes their advertised concurrent stream limit to be
- exceeded MUST treat this as a stream error. An
- endpoint that wishes to reduce the value of
- SETTINGS_MAX_CONCURRENT_STREAMS to a value that is below the current
- number of open streams can either close streams that exceed the new value or allow
- streams to complete.
-
-
-
-
-
-
- Using streams for multiplexing introduces contention over use of the TCP connection,
- resulting in blocked streams. A flow control scheme ensures that streams on the same
- connection do not destructively interfere with each other. Flow control is used for both
- individual streams and for the connection as a whole.
-
-
- HTTP/2 provides for flow control through use of the WINDOW_UPDATE frame.
-
-
-
-
- HTTP/2 stream flow control aims to allow a variety of flow control algorithms to be
- used without requiring protocol changes. Flow control in HTTP/2 has the following
- characteristics:
-
-
- Flow control is specific to a connection; i.e., it is "hop-by-hop", not
- "end-to-end".
-
-
- Flow control is based on window update frames. Receivers advertise how many octets
- they are prepared to receive on a stream and for the entire connection. This is a
- credit-based scheme.
-
-
- Flow control is directional with overall control provided by the receiver. A
- receiver MAY choose to set any window size that it desires for each stream and for
- the entire connection. A sender MUST respect flow control limits imposed by a
- receiver. Clients, servers and intermediaries all independently advertise their
- flow control window as a receiver and abide by the flow control limits set by
- their peer when sending.
-
-
- The initial value for the flow control window is 65,535 octets for both new streams
- and the overall connection.
-
-
- The frame type determines whether flow control applies to a frame. Of the frames
- specified in this document, only DATA frames are subject to flow
- control; all other frame types do not consume space in the advertised flow control
- window. This ensures that important control frames are not blocked by flow control.
-
-
- Flow control cannot be disabled.
-
-
- HTTP/2 defines only the format and semantics of the WINDOW_UPDATE
- frame (). This document does not stipulate how a
- receiver decides when to send this frame or the value that it sends, nor does it
- specify how a sender chooses to send packets. Implementations are able to select
- any algorithm that suits their needs.
-
-
-
-
- Implementations are also responsible for managing how requests and responses are sent
- based on priority; choosing how to avoid head of line blocking for requests; and
- managing the creation of new streams. Algorithm choices for these could interact with
- any flow control algorithm.
-
-
-
-
-
- Flow control is defined to protect endpoints that are operating under resource
- constraints. For example, a proxy needs to share memory between many connections, and
- also might have a slow upstream connection and a fast downstream one. Flow control
- addresses cases where the receiver is unable process data on one stream, yet wants to
- continue to process other streams in the same connection.
-
-
- Deployments that do not require this capability can advertise a flow control window of
- the maximum size, incrementing the available space when new data is received. This
- effectively disables flow control for that receiver. Conversely, a sender is always
- subject to the flow control window advertised by the receiver.
-
-
- Deployments with constrained resources (for example, memory) can employ flow control to
- limit the amount of memory a peer can consume. Note, however, that this can lead to
- suboptimal use of available network resources if flow control is enabled without
- knowledge of the bandwidth-delay product (see ).
-
-
- Even with full awareness of the current bandwidth-delay product, implementation of flow
- control can be difficult. When using flow control, the receiver MUST read from the TCP
- receive buffer in a timely fashion. Failure to do so could lead to a deadlock when
- critical frames, such as WINDOW_UPDATE, are not read and acted upon.
-
-
-
-
-
-
- A client can assign a priority for a new stream by including prioritization information in
- the HEADERS frame that opens the stream. For an existing
- stream, the PRIORITY frame can be used to change the
- priority.
-
-
- The purpose of prioritization is to allow an endpoint to express how it would prefer its
- peer allocate resources when managing concurrent streams. Most importantly, priority can
- be used to select streams for transmitting frames when there is limited capacity for
- sending.
-
-
- Streams can be prioritized by marking them as dependent on the completion of other streams
- (). Each dependency is assigned a relative weight, a number
- that is used to determine the relative proportion of available resources that are assigned
- to streams dependent on the same stream.
-
-
-
- Explicitly setting the priority for a stream is input to a prioritization process. It
- does not guarantee any particular processing or transmission order for the stream relative
- to any other stream. An endpoint cannot force a peer to process concurrent streams in a
- particular order using priority. Expressing priority is therefore only ever a suggestion.
-
-
- Providing prioritization information is optional, so default values are used if no
- explicit indicator is provided ().
-
-
-
-
- Each stream can be given an explicit dependency on another stream. Including a
- dependency expresses a preference to allocate resources to the identified stream rather
- than to the dependent stream.
-
-
- A stream that is not dependent on any other stream is given a stream dependency of 0x0.
- In other words, the non-existent stream 0 forms the root of the tree.
-
-
- A stream that depends on another stream is a dependent stream. The stream upon which a
- stream is dependent is a parent stream. A dependency on a stream that is not currently
- in the tree - such as a stream in the "idle" state - results in that stream being given
- a default priority.
-
-
- When assigning a dependency on another stream, the stream is added as a new dependency
- of the parent stream. Dependent streams that share the same parent are not ordered with
- respect to each other. For example, if streams B and C are dependent on stream A, and
- if stream D is created with a dependency on stream A, this results in a dependency order
- of A followed by B, C, and D in any order.
-
-
- /|\
- B C B D C
-]]>
-
-
- An exclusive flag allows for the insertion of a new level of dependencies. The
- exclusive flag causes the stream to become the sole dependency of its parent stream,
- causing other dependencies to become dependent on the exclusive stream. In the
- previous example, if stream D is created with an exclusive dependency on stream A, this
- results in D becoming the dependency parent of B and C.
-
-
- D
- B C / \
- B C
-]]>
-
-
- Inside the dependency tree, a dependent stream SHOULD only be allocated resources if all
- of the streams that it depends on (the chain of parent streams up to 0x0) are either
- closed, or it is not possible to make progress on them.
-
-
- A stream cannot depend on itself. An endpoint MUST treat this as a stream error of type PROTOCOL_ERROR.
-
-
-
-
-
- All dependent streams are allocated an integer weight between 1 and 256 (inclusive).
-
-
- Streams with the same parent SHOULD be allocated resources proportionally based on their
- weight. Thus, if stream B depends on stream A with weight 4, and C depends on stream A
- with weight 12, and if no progress can be made on A, stream B ideally receives one third
- of the resources allocated to stream C.
-
-
-
-
-
- Stream priorities are changed using the PRIORITY frame. Setting a
- dependency causes a stream to become dependent on the identified parent stream.
-
-
- Dependent streams move with their parent stream if the parent is reprioritized. Setting
- a dependency with the exclusive flag for a reprioritized stream moves all the
- dependencies of the new parent stream to become dependent on the reprioritized stream.
-
-
- If a stream is made dependent on one of its own dependencies, the formerly dependent
- stream is first moved to be dependent on the reprioritized stream's previous parent.
- The moved dependency retains its weight.
-
-
-
- For example, consider an original dependency tree where B and C depend on A, D and E
- depend on C, and F depends on D. If A is made dependent on D, then D takes the place
- of A. All other dependency relationships stay the same, except for F, which becomes
- dependent on A if the reprioritization is exclusive.
-
- F B C ==> F A OR A
- / \ | / \ /|\
- D E E B C B C F
- | | |
- F E E
- (intermediate) (non-exclusive) (exclusive)
-]]>
-
-
-
-
-
- When a stream is removed from the dependency tree, its dependencies can be moved to
- become dependent on the parent of the closed stream. The weights of new dependencies
- are recalculated by distributing the weight of the dependency of the closed stream
- proportionally based on the weights of its dependencies.
-
-
- Streams that are removed from the dependency tree cause some prioritization information
- to be lost. Resources are shared between streams with the same parent stream, which
- means that if a stream in that set closes or becomes blocked, any spare capacity
- allocated to a stream is distributed to the immediate neighbors of the stream. However,
- if the common dependency is removed from the tree, those streams share resources with
- streams at the next highest level.
-
-
- For example, assume streams A and B share a parent, and streams C and D both depend on
- stream A. Prior to the removal of stream A, if streams A and D are unable to proceed,
- then stream C receives all the resources dedicated to stream A. If stream A is removed
- from the tree, the weight of stream A is divided between streams C and D. If stream D
- is still unable to proceed, this results in stream C receiving a reduced proportion of
- resources. For equal starting weights, C receives one third, rather than one half, of
- available resources.
-
-
- It is possible for a stream to become closed while prioritization information that
- creates a dependency on that stream is in transit. If a stream identified in a
- dependency has no associated priority information, then the dependent stream is instead
- assigned a default priority. This potentially creates
- suboptimal prioritization, since the stream could be given a priority that is different
- to what is intended.
-
-
- To avoid these problems, an endpoint SHOULD retain stream prioritization state for a
- period after streams become closed. The longer state is retained, the lower the chance
- that streams are assigned incorrect or default priority values.
-
-
- This could create a large state burden for an endpoint, so this state MAY be limited.
- An endpoint MAY apply a fixed upper limit on the number of closed streams for which
- prioritization state is tracked to limit state exposure. The amount of additional state
- an endpoint maintains could be dependent on load; under high load, prioritization state
- can be discarded to limit resource commitments. In extreme cases, an endpoint could
- even discard prioritization state for active or reserved streams. If a fixed limit is
- applied, endpoints SHOULD maintain state for at least as many streams as allowed by
- their setting for SETTINGS_MAX_CONCURRENT_STREAMS.
-
-
- An endpoint receiving a PRIORITY frame that changes the priority of a
- closed stream SHOULD alter the dependencies of the streams that depend on it, if it has
- retained enough state to do so.
-
-
-
-
-
- Providing priority information is optional. Streams are assigned a non-exclusive
- dependency on stream 0x0 by default. Pushed streams
- initially depend on their associated stream. In both cases, streams are assigned a
- default weight of 16.
-
-
-
-
-
-
- HTTP/2 framing permits two classes of error:
-
-
- An error condition that renders the entire connection unusable is a connection error.
-
-
- An error in an individual stream is a stream error.
-
-
-
-
- A list of error codes is included in .
-
-
-
-
- A connection error is any error which prevents further processing of the framing layer,
- or which corrupts any connection state.
-
-
- An endpoint that encounters a connection error SHOULD first send a GOAWAY
- frame () with the stream identifier of the last stream that it
- successfully received from its peer. The GOAWAY frame includes an error
- code that indicates why the connection is terminating. After sending the
- GOAWAY frame, the endpoint MUST close the TCP connection.
-
-
- It is possible that the GOAWAY will not be reliably received by the
- receiving endpoint (see ). In the event of a connection error,
- GOAWAY only provides a best effort attempt to communicate with the peer
- about why the connection is being terminated.
-
-
- An endpoint can end a connection at any time. In particular, an endpoint MAY choose to
- treat a stream error as a connection error. Endpoints SHOULD send a
- GOAWAY frame when ending a connection, providing that circumstances
- permit it.
-
-
-
-
-
- A stream error is an error related to a specific stream that does not affect processing
- of other streams.
-
-
- An endpoint that detects a stream error sends a RST_STREAM frame () that contains the stream identifier of the stream where the error
- occurred. The RST_STREAM frame includes an error code that indicates the
- type of error.
-
-
- A RST_STREAM is the last frame that an endpoint can send on a stream.
- The peer that sends the RST_STREAM frame MUST be prepared to receive any
- frames that were sent or enqueued for sending by the remote peer. These frames can be
- ignored, except where they modify connection state (such as the state maintained for
- header compression, or flow control).
-
-
- Normally, an endpoint SHOULD NOT send more than one RST_STREAM frame for
- any stream. However, an endpoint MAY send additional RST_STREAM frames if
- it receives frames on a closed stream after more than a round-trip time. This behavior
- is permitted to deal with misbehaving implementations.
-
-
- An endpoint MUST NOT send a RST_STREAM in response to an
- RST_STREAM frame, to avoid looping.
-
-
-
-
-
- If the TCP connection is closed or reset while streams remain in open or half closed
- states, then the endpoint MUST assume that those streams were abnormally interrupted and
- could be incomplete.
-
-
-
-
-
-
- HTTP/2 permits extension of the protocol. Protocol extensions can be used to provide
- additional services or alter any aspect of the protocol, within the limitations described
- in this section. Extensions are effective only within the scope of a single HTTP/2
- connection.
-
-
- Extensions are permitted to use new frame types, new
- settings, or new error
- codes. Registries are established for managing these extension points: frame types, settings and
- error codes.
-
-
- Implementations MUST ignore unknown or unsupported values in all extensible protocol
- elements. Implementations MUST discard frames that have unknown or unsupported types.
- This means that any of these extension points can be safely used by extensions without
- prior arrangement or negotiation. However, extension frames that appear in the middle of
- a header block are not permitted; these MUST be treated
- as a connection error of type
- PROTOCOL_ERROR.
-
-
- However, extensions that could change the semantics of existing protocol components MUST
- be negotiated before being used. For example, an extension that changes the layout of the
- HEADERS frame cannot be used until the peer has given a positive signal
- that this is acceptable. In this case, it could also be necessary to coordinate when the
- revised layout comes into effect. Note that treating any frame other than
- DATA frames as flow controlled is such a change in semantics, and can only
- be done through negotiation.
-
-
- This document doesn't mandate a specific method for negotiating the use of an extension,
- but notes that a setting could be used for that
- purpose. If both peers set a value that indicates willingness to use the extension, then
- the extension can be used. If a setting is used for extension negotiation, the initial
- value MUST be defined so that the extension is initially disabled.
-
-
-
-
-
-
- This specification defines a number of frame types, each identified by a unique 8-bit type
- code. Each frame type serves a distinct purpose either in the establishment and management
- of the connection as a whole, or of individual streams.
-
-
- The transmission of specific frame types can alter the state of a connection. If endpoints
- fail to maintain a synchronized view of the connection state, successful communication
- within the connection will no longer be possible. Therefore, it is important that endpoints
- have a shared comprehension of how the state is affected by the use any given frame.
-
-
-
-
- DATA frames (type=0x0) convey arbitrary, variable-length sequences of octets associated
- with a stream. One or more DATA frames are used, for instance, to carry HTTP request or
- response payloads.
-
-
- DATA frames MAY also contain arbitrary padding. Padding can be added to DATA frames to
- obscure the size of messages.
-
-
-
-
-
- The DATA frame contains the following fields:
-
-
- An 8-bit field containing the length of the frame padding in units of octets. This
- field is optional and is only present if the PADDED flag is set.
-
-
- Application data. The amount of data is the remainder of the frame payload after
- subtracting the length of the other fields that are present.
-
-
- Padding octets that contain no application semantic value. Padding octets MUST be set
- to zero when sending and ignored when receiving.
-
-
-
-
-
- The DATA frame defines the following flags:
-
-
- Bit 1 being set indicates that this frame is the last that the endpoint will send for
- the identified stream. Setting this flag causes the stream to enter one of the "half closed" states or the "closed" state.
-
-
- Bit 4 being set indicates that the Pad Length field and any padding that it describes
- is present.
-
-
-
-
- DATA frames MUST be associated with a stream. If a DATA frame is received whose stream
- identifier field is 0x0, the recipient MUST respond with a connection error of type
- PROTOCOL_ERROR.
-
-
- DATA frames are subject to flow control and can only be sent when a stream is in the
- "open" or "half closed (remote)" states. The entire DATA frame payload is included in flow
- control, including Pad Length and Padding fields if present. If a DATA frame is received
- whose stream is not in "open" or "half closed (local)" state, the recipient MUST respond
- with a stream error of type
- STREAM_CLOSED.
-
-
- The total number of padding octets is determined by the value of the Pad Length field. If
- the length of the padding is greater than the length of the frame payload, the recipient
- MUST treat this as a connection error of
- type PROTOCOL_ERROR.
-
-
- A frame can be increased in size by one octet by including a Pad Length field with a
- value of zero.
-
-
-
-
- Padding is a security feature; see .
-
-
-
-
-
- The HEADERS frame (type=0x1) is used to open a stream,
- and additionally carries a header block fragment. HEADERS frames can be sent on a stream
- in the "open" or "half closed (remote)" states.
-
-
-
-
-
- The HEADERS frame payload has the following fields:
-
-
- An 8-bit field containing the length of the frame padding in units of octets. This
- field is only present if the PADDED flag is set.
-
-
- A single bit flag indicates that the stream dependency is exclusive, see . This field is only present if the PRIORITY flag is set.
-
-
- A 31-bit stream identifier for the stream that this stream depends on, see . This field is only present if the PRIORITY flag is set.
-
-
- An 8-bit weight for the stream, see . Add one to the
- value to obtain a weight between 1 and 256. This field is only present if the
- PRIORITY flag is set.
-
-
- A header block fragment.
-
-
- Padding octets that contain no application semantic value. Padding octets MUST be set
- to zero when sending and ignored when receiving.
-
-
-
-
-
- The HEADERS frame defines the following flags:
-
-
-
- Bit 1 being set indicates that the header block is
- the last that the endpoint will send for the identified stream. Setting this flag
- causes the stream to enter one of "half closed"
- states.
-
-
- A HEADERS frame carries the END_STREAM flag that signals the end of a stream.
- However, a HEADERS frame with the END_STREAM flag set can be followed by
- CONTINUATION frames on the same stream. Logically, the
- CONTINUATION frames are part of the HEADERS frame.
-
-
-
-
- Bit 3 being set indicates that this frame contains an entire header block and is not followed by any
- CONTINUATION frames.
-
-
- A HEADERS frame without the END_HEADERS flag set MUST be followed by a
- CONTINUATION frame for the same stream. A receiver MUST treat the
- receipt of any other type of frame or a frame on a different stream as a connection error of type
- PROTOCOL_ERROR.
-
-
-
-
- Bit 4 being set indicates that the Pad Length field and any padding that it
- describes is present.
-
-
-
-
- Bit 6 being set indicates that the Exclusive Flag (E), Stream Dependency, and Weight
- fields are present; see .
-
-
-
-
-
-
- The payload of a HEADERS frame contains a header block
- fragment. A header block that does not fit within a HEADERS frame is continued in
- a CONTINUATION frame.
-
-
-
- HEADERS frames MUST be associated with a stream. If a HEADERS frame is received whose
- stream identifier field is 0x0, the recipient MUST respond with a connection error of type
- PROTOCOL_ERROR.
-
-
-
- The HEADERS frame changes the connection state as described in .
-
-
-
- The HEADERS frame includes optional padding. Padding fields and flags are identical to
- those defined for DATA frames.
-
-
- Prioritization information in a HEADERS frame is logically equivalent to a separate
- PRIORITY frame, but inclusion in HEADERS avoids the potential for churn in
- stream prioritization when new streams are created. Priorization fields in HEADERS frames
- subsequent to the first on a stream reprioritize the
- stream.
-
-
-
-
-
- The PRIORITY frame (type=0x2) specifies the sender-advised
- priority of a stream. It can be sent at any time for an existing stream, including
- closed streams. This enables reprioritization of existing streams.
-
-
-
-
-
- The payload of a PRIORITY frame contains the following fields:
-
-
- A single bit flag indicates that the stream dependency is exclusive, see .
-
-
- A 31-bit stream identifier for the stream that this stream depends on, see .
-
-
- An 8-bit weight for the identified stream dependency, see . Add one to the value to obtain a weight between 1 and 256.
-
-
-
-
-
- The PRIORITY frame does not define any flags.
-
-
-
- The PRIORITY frame is associated with an existing stream. If a PRIORITY frame is received
- with a stream identifier of 0x0, the recipient MUST respond with a connection error of type
- PROTOCOL_ERROR.
-
-
- The PRIORITY frame can be sent on a stream in any of the "reserved (remote)", "open",
- "half closed (local)", "half closed (remote)", or "closed" states, though it cannot be
- sent between consecutive frames that comprise a single header
- block. Note that this frame could arrive after processing or frame sending has
- completed, which would cause it to have no effect on the current stream. For a stream
- that is in the "half closed (remote)" or "closed" - state, this frame can only affect
- processing of the current stream and not frame transmission.
-
-
- The PRIORITY frame is the only frame that can be sent for a stream in the "closed" state.
- This allows for the reprioritization of a group of dependent streams by altering the
- priority of a parent stream, which might be closed. However, a PRIORITY frame sent on a
- closed stream risks being ignored due to the peer having discarded priority state
- information for that stream.
-
-
-
-
-
- The RST_STREAM frame (type=0x3) allows for abnormal termination of a stream. When sent by
- the initiator of a stream, it indicates that they wish to cancel the stream or that an
- error condition has occurred. When sent by the receiver of a stream, it indicates that
- either the receiver is rejecting the stream, requesting that the stream be cancelled, or
- that an error condition has occurred.
-
-
-
-
-
-
- The RST_STREAM frame contains a single unsigned, 32-bit integer identifying the error code. The error code indicates why the stream is being
- terminated.
-
-
-
- The RST_STREAM frame does not define any flags.
-
-
-
- The RST_STREAM frame fully terminates the referenced stream and causes it to enter the
- closed state. After receiving a RST_STREAM on a stream, the receiver MUST NOT send
- additional frames for that stream, with the exception of PRIORITY. However,
- after sending the RST_STREAM, the sending endpoint MUST be prepared to receive and process
- additional frames sent on the stream that might have been sent by the peer prior to the
- arrival of the RST_STREAM.
-
-
-
- RST_STREAM frames MUST be associated with a stream. If a RST_STREAM frame is received
- with a stream identifier of 0x0, the recipient MUST treat this as a connection error of type
- PROTOCOL_ERROR.
-
-
-
- RST_STREAM frames MUST NOT be sent for a stream in the "idle" state. If a RST_STREAM
- frame identifying an idle stream is received, the recipient MUST treat this as a connection error of type
- PROTOCOL_ERROR.
-
-
-
-
-
-
- The SETTINGS frame (type=0x4) conveys configuration parameters that affect how endpoints
- communicate, such as preferences and constraints on peer behavior. The SETTINGS frame is
- also used to acknowledge the receipt of those parameters. Individually, a SETTINGS
- parameter can also be referred to as a "setting".
-
-
- SETTINGS parameters are not negotiated; they describe characteristics of the sending peer,
- which are used by the receiving peer. Different values for the same parameter can be
- advertised by each peer. For example, a client might set a high initial flow control
- window, whereas a server might set a lower value to conserve resources.
-
-
-
- A SETTINGS frame MUST be sent by both endpoints at the start of a connection, and MAY be
- sent at any other time by either endpoint over the lifetime of the connection.
- Implementations MUST support all of the parameters defined by this specification.
-
-
-
- Each parameter in a SETTINGS frame replaces any existing value for that parameter.
- Parameters are processed in the order in which they appear, and a receiver of a SETTINGS
- frame does not need to maintain any state other than the current value of its
- parameters. Therefore, the value of a SETTINGS parameter is the last value that is seen by
- a receiver.
-
-
- SETTINGS parameters are acknowledged by the receiving peer. To enable this, the SETTINGS
- frame defines the following flag:
-
-
- Bit 1 being set indicates that this frame acknowledges receipt and application of the
- peer's SETTINGS frame. When this bit is set, the payload of the SETTINGS frame MUST
- be empty. Receipt of a SETTINGS frame with the ACK flag set and a length field value
- other than 0 MUST be treated as a connection
- error of type FRAME_SIZE_ERROR. For more info, see Settings Synchronization.
-
-
-
-
- SETTINGS frames always apply to a connection, never a single stream. The stream
- identifier for a SETTINGS frame MUST be zero (0x0). If an endpoint receives a SETTINGS
- frame whose stream identifier field is anything other than 0x0, the endpoint MUST respond
- with a connection error of type
- PROTOCOL_ERROR.
-
-
- The SETTINGS frame affects connection state. A badly formed or incomplete SETTINGS frame
- MUST be treated as a connection error of type
- PROTOCOL_ERROR.
-
-
-
-
- The payload of a SETTINGS frame consists of zero or more parameters, each consisting of
- an unsigned 16-bit setting identifier and an unsigned 32-bit value.
-
-
-
-
-
-
-
-
-
- The following parameters are defined:
-
-
-
- Allows the sender to inform the remote endpoint of the maximum size of the header
- compression table used to decode header blocks, in octets. The encoder can select
- any size equal to or less than this value by using signaling specific to the
- header compression format inside a header block. The initial value is 4,096
- octets.
-
-
-
-
- This setting can be use to disable server
- push. An endpoint MUST NOT send a PUSH_PROMISE frame if it
- receives this parameter set to a value of 0. An endpoint that has both set this
- parameter to 0 and had it acknowledged MUST treat the receipt of a
- PUSH_PROMISE frame as a connection error of type
- PROTOCOL_ERROR.
-
-
- The initial value is 1, which indicates that server push is permitted. Any value
- other than 0 or 1 MUST be treated as a connection error of type
- PROTOCOL_ERROR.
-
-
-
-
- Indicates the maximum number of concurrent streams that the sender will allow.
- This limit is directional: it applies to the number of streams that the sender
- permits the receiver to create. Initially there is no limit to this value. It is
- recommended that this value be no smaller than 100, so as to not unnecessarily
- limit parallelism.
-
-
- A value of 0 for SETTINGS_MAX_CONCURRENT_STREAMS SHOULD NOT be treated as special
- by endpoints. A zero value does prevent the creation of new streams, however this
- can also happen for any limit that is exhausted with active streams. Servers
- SHOULD only set a zero value for short durations; if a server does not wish to
- accept requests, closing the connection could be preferable.
-
-
-
-
- Indicates the sender's initial window size (in octets) for stream level flow
- control. The initial value is 216-1 (65,535) octets.
-
-
- This setting affects the window size of all streams, including existing streams,
- see .
-
-
- Values above the maximum flow control window size of 231-1 MUST
- be treated as a connection error of
- type FLOW_CONTROL_ERROR.
-
-
-
-
- Indicates the size of the largest frame payload that the sender is willing to
- receive, in octets.
-
-
- The initial value is 214 (16,384) octets. The value advertised by
- an endpoint MUST be between this initial value and the maximum allowed frame size
- (224-1 or 16,777,215 octets), inclusive. Values outside this range
- MUST be treated as a connection error
- of type PROTOCOL_ERROR.
-
-
-
-
- This advisory setting informs a peer of the maximum size of header list that the
- sender is prepared to accept, in octets. The value is based on the uncompressed
- size of header fields, including the length of the name and value in octets plus
- an overhead of 32 octets for each header field.
-
-
- For any given request, a lower limit than what is advertised MAY be enforced. The
- initial value of this setting is unlimited.
-
-
-
-
-
- An endpoint that receives a SETTINGS frame with any unknown or unsupported identifier
- MUST ignore that setting.
-
-
-
-
-
- Most values in SETTINGS benefit from or require an understanding of when the peer has
- received and applied the changed parameter values. In order to provide
- such synchronization timepoints, the recipient of a SETTINGS frame in which the ACK flag
- is not set MUST apply the updated parameters as soon as possible upon receipt.
-
-
- The values in the SETTINGS frame MUST be processed in the order they appear, with no
- other frame processing between values. Unsupported parameters MUST be ignored. Once
- all values have been processed, the recipient MUST immediately emit a SETTINGS frame
- with the ACK flag set. Upon receiving a SETTINGS frame with the ACK flag set, the sender
- of the altered parameters can rely on the setting having been applied.
-
-
- If the sender of a SETTINGS frame does not receive an acknowledgement within a
- reasonable amount of time, it MAY issue a connection error of type
- SETTINGS_TIMEOUT.
-
-
-
-
-
-
- The PUSH_PROMISE frame (type=0x5) is used to notify the peer endpoint in advance of
- streams the sender intends to initiate. The PUSH_PROMISE frame includes the unsigned
- 31-bit identifier of the stream the endpoint plans to create along with a set of headers
- that provide additional context for the stream. contains a
- thorough description of the use of PUSH_PROMISE frames.
-
-
-
-
-
-
- The PUSH_PROMISE frame payload has the following fields:
-
-
- An 8-bit field containing the length of the frame padding in units of octets. This
- field is only present if the PADDED flag is set.
-
-
- A single reserved bit.
-
-
- An unsigned 31-bit integer that identifies the stream that is reserved by the
- PUSH_PROMISE. The promised stream identifier MUST be a valid choice for the next
- stream sent by the sender (see new stream
- identifier).
-
-
- A header block fragment containing request header
- fields.
-
-
- Padding octets.
-
-
-
-
-
- The PUSH_PROMISE frame defines the following flags:
-
-
-
- Bit 3 being set indicates that this frame contains an entire header block and is not followed by any
- CONTINUATION frames.
-
-
- A PUSH_PROMISE frame without the END_HEADERS flag set MUST be followed by a
- CONTINUATION frame for the same stream. A receiver MUST treat the receipt of any
- other type of frame or a frame on a different stream as a connection error of type
- PROTOCOL_ERROR.
-
-
-
-
- Bit 4 being set indicates that the Pad Length field and any padding that it
- describes is present.
-
-
-
-
-
-
- PUSH_PROMISE frames MUST be associated with an existing, peer-initiated stream. The stream
- identifier of a PUSH_PROMISE frame indicates the stream it is associated with. If the
- stream identifier field specifies the value 0x0, a recipient MUST respond with a connection error of type
- PROTOCOL_ERROR.
-
-
-
- Promised streams are not required to be used in the order they are promised. The
- PUSH_PROMISE only reserves stream identifiers for later use.
-
-
-
- PUSH_PROMISE MUST NOT be sent if the SETTINGS_ENABLE_PUSH setting of the
- peer endpoint is set to 0. An endpoint that has set this setting and has received
- acknowledgement MUST treat the receipt of a PUSH_PROMISE frame as a connection error of type
- PROTOCOL_ERROR.
-
-
- Recipients of PUSH_PROMISE frames can choose to reject promised streams by returning a
- RST_STREAM referencing the promised stream identifier back to the sender of
- the PUSH_PROMISE.
-
-
-
- A PUSH_PROMISE frame modifies the connection state in two ways. The inclusion of a header block potentially modifies the state maintained for
- header compression. PUSH_PROMISE also reserves a stream for later use, causing the
- promised stream to enter the "reserved" state. A sender MUST NOT send a PUSH_PROMISE on a
- stream unless that stream is either "open" or "half closed (remote)"; the sender MUST
- ensure that the promised stream is a valid choice for a new stream identifier (that is, the promised stream MUST
- be in the "idle" state).
-
-
- Since PUSH_PROMISE reserves a stream, ignoring a PUSH_PROMISE frame causes the stream
- state to become indeterminate. A receiver MUST treat the receipt of a PUSH_PROMISE on a
- stream that is neither "open" nor "half closed (local)" as a connection error of type
- PROTOCOL_ERROR. However, an endpoint that has sent
- RST_STREAM on the associated stream MUST handle PUSH_PROMISE frames that
- might have been created before the RST_STREAM frame is received and
- processed.
-
-
- A receiver MUST treat the receipt of a PUSH_PROMISE that promises an illegal stream identifier (that is, an identifier for a
- stream that is not currently in the "idle" state) as a connection error of type
- PROTOCOL_ERROR.
-
-
-
- The PUSH_PROMISE frame includes optional padding. Padding fields and flags are identical
- to those defined for DATA frames.
-
-
-
-
-
- The PING frame (type=0x6) is a mechanism for measuring a minimal round trip time from the
- sender, as well as determining whether an idle connection is still functional. PING
- frames can be sent from any endpoint.
-
-
-
-
-
-
- In addition to the frame header, PING frames MUST contain 8 octets of data in the payload.
- A sender can include any value it chooses and use those bytes in any fashion.
-
-
- Receivers of a PING frame that does not include an ACK flag MUST send a PING frame with
- the ACK flag set in response, with an identical payload. PING responses SHOULD be given
- higher priority than any other frame.
-
-
-
- The PING frame defines the following flags:
-
-
- Bit 1 being set indicates that this PING frame is a PING response. An endpoint MUST
- set this flag in PING responses. An endpoint MUST NOT respond to PING frames
- containing this flag.
-
-
-
-
- PING frames are not associated with any individual stream. If a PING frame is received
- with a stream identifier field value other than 0x0, the recipient MUST respond with a
- connection error of type
- PROTOCOL_ERROR.
-
-
- Receipt of a PING frame with a length field value other than 8 MUST be treated as a connection error of type
- FRAME_SIZE_ERROR.
-
-
-
-
-
-
- The GOAWAY frame (type=0x7) informs the remote peer to stop creating streams on this
- connection. GOAWAY can be sent by either the client or the server. Once sent, the sender
- will ignore frames sent on any new streams with identifiers higher than the included last
- stream identifier. Receivers of a GOAWAY frame MUST NOT open additional streams on the
- connection, although a new connection can be established for new streams.
-
-
- The purpose of this frame is to allow an endpoint to gracefully stop accepting new
- streams, while still finishing processing of previously established streams. This enables
- administrative actions, like server maintainance.
-
-
- There is an inherent race condition between an endpoint starting new streams and the
- remote sending a GOAWAY frame. To deal with this case, the GOAWAY contains the stream
- identifier of the last peer-initiated stream which was or might be processed on the
- sending endpoint in this connection. For instance, if the server sends a GOAWAY frame,
- the identified stream is the highest numbered stream initiated by the client.
-
-
- If the receiver of the GOAWAY has sent data on streams with a higher stream identifier
- than what is indicated in the GOAWAY frame, those streams are not or will not be
- processed. The receiver of the GOAWAY frame can treat the streams as though they had
- never been created at all, thereby allowing those streams to be retried later on a new
- connection.
-
-
- Endpoints SHOULD always send a GOAWAY frame before closing a connection so that the remote
- can know whether a stream has been partially processed or not. For example, if an HTTP
- client sends a POST at the same time that a server closes a connection, the client cannot
- know if the server started to process that POST request if the server does not send a
- GOAWAY frame to indicate what streams it might have acted on.
-
-
- An endpoint might choose to close a connection without sending GOAWAY for misbehaving
- peers.
-
-
-
-
-
-
- The GOAWAY frame does not define any flags.
-
-
- The GOAWAY frame applies to the connection, not a specific stream. An endpoint MUST treat
- a GOAWAY frame with a stream identifier other than 0x0 as a connection error of type
- PROTOCOL_ERROR.
-
-
- The last stream identifier in the GOAWAY frame contains the highest numbered stream
- identifier for which the sender of the GOAWAY frame might have taken some action on, or
- might yet take action on. All streams up to and including the identified stream might
- have been processed in some way. The last stream identifier can be set to 0 if no streams
- were processed.
-
-
- In this context, "processed" means that some data from the stream was passed to some
- higher layer of software that might have taken some action as a result.
-
-
- If a connection terminates without a GOAWAY frame, the last stream identifier is
- effectively the highest possible stream identifier.
-
-
- On streams with lower or equal numbered identifiers that were not closed completely prior
- to the connection being closed, re-attempting requests, transactions, or any protocol
- activity is not possible, with the exception of idempotent actions like HTTP GET, PUT, or
- DELETE. Any protocol activity that uses higher numbered streams can be safely retried
- using a new connection.
-
-
- Activity on streams numbered lower or equal to the last stream identifier might still
- complete successfully. The sender of a GOAWAY frame might gracefully shut down a
- connection by sending a GOAWAY frame, maintaining the connection in an open state until
- all in-progress streams complete.
-
-
- An endpoint MAY send multiple GOAWAY frames if circumstances change. For instance, an
- endpoint that sends GOAWAY with NO_ERROR during graceful shutdown could
- subsequently encounter an condition that requires immediate termination of the connection.
- The last stream identifier from the last GOAWAY frame received indicates which streams
- could have been acted upon. Endpoints MUST NOT increase the value they send in the last
- stream identifier, since the peers might already have retried unprocessed requests on
- another connection.
-
-
- A client that is unable to retry requests loses all requests that are in flight when the
- server closes the connection. This is especially true for intermediaries that might
- not be serving clients using HTTP/2. A server that is attempting to gracefully shut down
- a connection SHOULD send an initial GOAWAY frame with the last stream identifier set to
- 231-1 and a NO_ERROR code. This signals to the client that
- a shutdown is imminent and that no further requests can be initiated. After waiting at
- least one round trip time, the server can send another GOAWAY frame with an updated last
- stream identifier. This ensures that a connection can be cleanly shut down without losing
- requests.
-
-
-
- After sending a GOAWAY frame, the sender can discard frames for streams with identifiers
- higher than the identified last stream. However, any frames that alter connection state
- cannot be completely ignored. For instance, HEADERS,
- PUSH_PROMISE and CONTINUATION frames MUST be minimally
- processed to ensure the state maintained for header compression is consistent (see ); similarly DATA frames MUST be counted toward the connection flow
- control window. Failure to process these frames can cause flow control or header
- compression state to become unsynchronized.
-
-
-
- The GOAWAY frame also contains a 32-bit error code that
- contains the reason for closing the connection.
-
-
- Endpoints MAY append opaque data to the payload of any GOAWAY frame. Additional debug
- data is intended for diagnostic purposes only and carries no semantic value. Debug
- information could contain security- or privacy-sensitive data. Logged or otherwise
- persistently stored debug data MUST have adequate safeguards to prevent unauthorized
- access.
-
-
-
-
-
- The WINDOW_UPDATE frame (type=0x8) is used to implement flow control; see for an overview.
-
-
- Flow control operates at two levels: on each individual stream and on the entire
- connection.
-
-
- Both types of flow control are hop-by-hop; that is, only between the two endpoints.
- Intermediaries do not forward WINDOW_UPDATE frames between dependent connections.
- However, throttling of data transfer by any receiver can indirectly cause the propagation
- of flow control information toward the original sender.
-
-
- Flow control only applies to frames that are identified as being subject to flow control.
- Of the frame types defined in this document, this includes only DATA frames.
- Frames that are exempt from flow control MUST be accepted and processed, unless the
- receiver is unable to assign resources to handling the frame. A receiver MAY respond with
- a stream error or connection error of type
- FLOW_CONTROL_ERROR if it is unable to accept a frame.
-
-
-
-
-
- The payload of a WINDOW_UPDATE frame is one reserved bit, plus an unsigned 31-bit integer
- indicating the number of octets that the sender can transmit in addition to the existing
- flow control window. The legal range for the increment to the flow control window is 1 to
- 231-1 (0x7fffffff) octets.
-
-
- The WINDOW_UPDATE frame does not define any flags.
-
-
- The WINDOW_UPDATE frame can be specific to a stream or to the entire connection. In the
- former case, the frame's stream identifier indicates the affected stream; in the latter,
- the value "0" indicates that the entire connection is the subject of the frame.
-
-
- A receiver MUST treat the receipt of a WINDOW_UPDATE frame with an flow control window
- increment of 0 as a stream error of type
- PROTOCOL_ERROR; errors on the connection flow control window MUST be
- treated as a connection error.
-
-
- WINDOW_UPDATE can be sent by a peer that has sent a frame bearing the END_STREAM flag.
- This means that a receiver could receive a WINDOW_UPDATE frame on a "half closed (remote)"
- or "closed" stream. A receiver MUST NOT treat this as an error, see .
-
-
- A receiver that receives a flow controlled frame MUST always account for its contribution
- against the connection flow control window, unless the receiver treats this as a connection error. This is necessary even if the
- frame is in error. Since the sender counts the frame toward the flow control window, if
- the receiver does not, the flow control window at sender and receiver can become
- different.
-
-
-
-
- Flow control in HTTP/2 is implemented using a window kept by each sender on every
- stream. The flow control window is a simple integer value that indicates how many octets
- of data the sender is permitted to transmit; as such, its size is a measure of the
- buffering capacity of the receiver.
-
-
- Two flow control windows are applicable: the stream flow control window and the
- connection flow control window. The sender MUST NOT send a flow controlled frame with a
- length that exceeds the space available in either of the flow control windows advertised
- by the receiver. Frames with zero length with the END_STREAM flag set (that is, an
- empty DATA frame) MAY be sent if there is no available space in either
- flow control window.
-
-
- For flow control calculations, the 9 octet frame header is not counted.
-
-
- After sending a flow controlled frame, the sender reduces the space available in both
- windows by the length of the transmitted frame.
-
-
- The receiver of a frame sends a WINDOW_UPDATE frame as it consumes data and frees up
- space in flow control windows. Separate WINDOW_UPDATE frames are sent for the stream
- and connection level flow control windows.
-
-
- A sender that receives a WINDOW_UPDATE frame updates the corresponding window by the
- amount specified in the frame.
-
-
- A sender MUST NOT allow a flow control window to exceed 231-1 octets.
- If a sender receives a WINDOW_UPDATE that causes a flow control window to exceed this
- maximum it MUST terminate either the stream or the connection, as appropriate. For
- streams, the sender sends a RST_STREAM with the error code of
- FLOW_CONTROL_ERROR code; for the connection, a GOAWAY
- frame with a FLOW_CONTROL_ERROR code.
-
-
- Flow controlled frames from the sender and WINDOW_UPDATE frames from the receiver are
- completely asynchronous with respect to each other. This property allows a receiver to
- aggressively update the window size kept by the sender to prevent streams from stalling.
-
-
-
-
-
- When an HTTP/2 connection is first established, new streams are created with an initial
- flow control window size of 65,535 octets. The connection flow control window is 65,535
- octets. Both endpoints can adjust the initial window size for new streams by including
- a value for SETTINGS_INITIAL_WINDOW_SIZE in the SETTINGS
- frame that forms part of the connection preface. The connection flow control window can
- only be changed using WINDOW_UPDATE frames.
-
-
- Prior to receiving a SETTINGS frame that sets a value for
- SETTINGS_INITIAL_WINDOW_SIZE, an endpoint can only use the default
- initial window size when sending flow controlled frames. Similarly, the connection flow
- control window is set to the default initial window size until a WINDOW_UPDATE frame is
- received.
-
-
- A SETTINGS frame can alter the initial flow control window size for all
- current streams. When the value of SETTINGS_INITIAL_WINDOW_SIZE changes,
- a receiver MUST adjust the size of all stream flow control windows that it maintains by
- the difference between the new value and the old value.
-
-
- A change to SETTINGS_INITIAL_WINDOW_SIZE can cause the available space in
- a flow control window to become negative. A sender MUST track the negative flow control
- window, and MUST NOT send new flow controlled frames until it receives WINDOW_UPDATE
- frames that cause the flow control window to become positive.
-
-
- For example, if the client sends 60KB immediately on connection establishment, and the
- server sets the initial window size to be 16KB, the client will recalculate the
- available flow control window to be -44KB on receipt of the SETTINGS
- frame. The client retains a negative flow control window until WINDOW_UPDATE frames
- restore the window to being positive, after which the client can resume sending.
-
-
- A SETTINGS frame cannot alter the connection flow control window.
-
-
- An endpoint MUST treat a change to SETTINGS_INITIAL_WINDOW_SIZE that
- causes any flow control window to exceed the maximum size as a connection error of type
- FLOW_CONTROL_ERROR.
-
-
-
-
-
- A receiver that wishes to use a smaller flow control window than the current size can
- send a new SETTINGS frame. However, the receiver MUST be prepared to
- receive data that exceeds this window size, since the sender might send data that
- exceeds the lower limit prior to processing the SETTINGS frame.
-
-
- After sending a SETTINGS frame that reduces the initial flow control window size, a
- receiver has two options for handling streams that exceed flow control limits:
-
-
- The receiver can immediately send RST_STREAM with
- FLOW_CONTROL_ERROR error code for the affected streams.
-
-
- The receiver can accept the streams and tolerate the resulting head of line
- blocking, sending WINDOW_UPDATE frames as it consumes data.
-
-
-
-
-
-
-
-
- The CONTINUATION frame (type=0x9) is used to continue a sequence of header block fragments. Any number of CONTINUATION frames can
- be sent on an existing stream, as long as the preceding frame is on the same stream and is
- a HEADERS, PUSH_PROMISE or CONTINUATION frame without the
- END_HEADERS flag set.
-
-
-
-
-
-
- The CONTINUATION frame payload contains a header block
- fragment.
-
-
-
- The CONTINUATION frame defines the following flag:
-
-
-
- Bit 3 being set indicates that this frame ends a header
- block.
-
-
- If the END_HEADERS bit is not set, this frame MUST be followed by another
- CONTINUATION frame. A receiver MUST treat the receipt of any other type of frame or
- a frame on a different stream as a connection
- error of type PROTOCOL_ERROR.
-
-
-
-
-
-
- The CONTINUATION frame changes the connection state as defined in .
-
-
-
- CONTINUATION frames MUST be associated with a stream. If a CONTINUATION frame is received
- whose stream identifier field is 0x0, the recipient MUST respond with a connection error of type PROTOCOL_ERROR.
-
-
-
- A CONTINUATION frame MUST be preceded by a HEADERS,
- PUSH_PROMISE or CONTINUATION frame without the END_HEADERS flag set. A
- recipient that observes violation of this rule MUST respond with a connection error of type
- PROTOCOL_ERROR.
-
-
-
-
-
-
- Error codes are 32-bit fields that are used in RST_STREAM and
- GOAWAY frames to convey the reasons for the stream or connection error.
-
-
-
- Error codes share a common code space. Some error codes apply only to either streams or the
- entire connection and have no defined semantics in the other context.
-
-
-
- The following error codes are defined:
-
-
- The associated condition is not as a result of an error. For example, a
- GOAWAY might include this code to indicate graceful shutdown of a
- connection.
-
-
- The endpoint detected an unspecific protocol error. This error is for use when a more
- specific error code is not available.
-
-
- The endpoint encountered an unexpected internal error.
-
-
- The endpoint detected that its peer violated the flow control protocol.
-
-
- The endpoint sent a SETTINGS frame, but did not receive a response in a
- timely manner. See Settings Synchronization.
-
-
- The endpoint received a frame after a stream was half closed.
-
-
- The endpoint received a frame with an invalid size.
-
-
- The endpoint refuses the stream prior to performing any application processing, see
- for details.
-
-
- Used by the endpoint to indicate that the stream is no longer needed.
-
-
- The endpoint is unable to maintain the header compression context for the connection.
-
-
- The connection established in response to a CONNECT
- request was reset or abnormally closed.
-
-
- The endpoint detected that its peer is exhibiting a behavior that might be generating
- excessive load.
-
-
- The underlying transport has properties that do not meet minimum security
- requirements (see ).
-
-
-
-
- Unknown or unsupported error codes MUST NOT trigger any special behavior. These MAY be
- treated by an implementation as being equivalent to INTERNAL_ERROR.
-
-
-
-
-
- HTTP/2 is intended to be as compatible as possible with current uses of HTTP. This means
- that, from the application perspective, the features of the protocol are largely
- unchanged. To achieve this, all request and response semantics are preserved, although the
- syntax of conveying those semantics has changed.
-
-
- Thus, the specification and requirements of HTTP/1.1 Semantics and Content , Conditional Requests , Range Requests , Caching and Authentication are applicable to HTTP/2. Selected portions of HTTP/1.1 Message Syntax
- and Routing , such as the HTTP and HTTPS URI schemes, are also
- applicable in HTTP/2, but the expression of those semantics for this protocol are defined
- in the sections below.
-
-
-
-
- A client sends an HTTP request on a new stream, using a previously unused stream identifier. A server sends an HTTP response on
- the same stream as the request.
-
-
- An HTTP message (request or response) consists of:
-
-
- for a response only, zero or more HEADERS frames (each followed by zero
- or more CONTINUATION frames) containing the message headers of
- informational (1xx) HTTP responses (see and ),
- and
-
-
- one HEADERS frame (followed by zero or more CONTINUATION
- frames) containing the message headers (see ), and
-
-
- zero or more DATA frames containing the message payload (see ), and
-
-
- optionally, one HEADERS frame, followed by zero or more
- CONTINUATION frames containing the trailer-part, if present (see ).
-
-
- The last frame in the sequence bears an END_STREAM flag, noting that a
- HEADERS frame bearing the END_STREAM flag can be followed by
- CONTINUATION frames that carry any remaining portions of the header block.
-
-
- Other frames (from any stream) MUST NOT occur between either HEADERS frame
- and any CONTINUATION frames that might follow.
-
-
-
- Trailing header fields are carried in a header block that also terminates the stream.
- That is, a sequence starting with a HEADERS frame, followed by zero or more
- CONTINUATION frames, where the HEADERS frame bears an
- END_STREAM flag. Header blocks after the first that do not terminate the stream are not
- part of an HTTP request or response.
-
-
- A HEADERS frame (and associated CONTINUATION frames) can
- only appear at the start or end of a stream. An endpoint that receives a
- HEADERS frame without the END_STREAM flag set after receiving a final
- (non-informational) status code MUST treat the corresponding request or response as malformed.
-
-
-
- An HTTP request/response exchange fully consumes a single stream. A request starts with
- the HEADERS frame that puts the stream into an "open" state. The request
- ends with a frame bearing END_STREAM, which causes the stream to become "half closed
- (local)" for the client and "half closed (remote)" for the server. A response starts with
- a HEADERS frame and ends with a frame bearing END_STREAM, which places the
- stream in the "closed" state.
-
-
-
-
-
- HTTP/2 removes support for the 101 (Switching Protocols) informational status code
- ().
-
-
- The semantics of 101 (Switching Protocols) aren't applicable to a multiplexed protocol.
- Alternative protocols are able to use the same mechanisms that HTTP/2 uses to negotiate
- their use (see ).
-
-
-
-
-
- HTTP header fields carry information as a series of key-value pairs. For a listing of
- registered HTTP headers, see the Message Header Field Registry maintained at .
-
-
-
-
- While HTTP/1.x used the message start-line (see ) to convey the target URI and method of the request, and the
- status code for the response, HTTP/2 uses special pseudo-header fields beginning with
- ':' character (ASCII 0x3a) for this purpose.
-
-
- Pseudo-header fields are not HTTP header fields. Endpoints MUST NOT generate
- pseudo-header fields other than those defined in this document.
-
-
- Pseudo-header fields are only valid in the context in which they are defined.
- Pseudo-header fields defined for requests MUST NOT appear in responses; pseudo-header
- fields defined for responses MUST NOT appear in requests. Pseudo-header fields MUST
- NOT appear in trailers. Endpoints MUST treat a request or response that contains
- undefined or invalid pseudo-header fields as malformed.
-
-
- Just as in HTTP/1.x, header field names are strings of ASCII characters that are
- compared in a case-insensitive fashion. However, header field names MUST be converted
- to lowercase prior to their encoding in HTTP/2. A request or response containing
- uppercase header field names MUST be treated as malformed.
-
-
- All pseudo-header fields MUST appear in the header block before regular header fields.
- Any request or response that contains a pseudo-header field that appears in a header
- block after a regular header field MUST be treated as malformed.
-
-
-
-
-
- HTTP/2 does not use the Connection header field to
- indicate connection-specific header fields; in this protocol, connection-specific
- metadata is conveyed by other means. An endpoint MUST NOT generate a HTTP/2 message
- containing connection-specific header fields; any message containing
- connection-specific header fields MUST be treated as malformed.
-
-
- This means that an intermediary transforming an HTTP/1.x message to HTTP/2 will need
- to remove any header fields nominated by the Connection header field, along with the
- Connection header field itself. Such intermediaries SHOULD also remove other
- connection-specific header fields, such as Keep-Alive, Proxy-Connection,
- Transfer-Encoding and Upgrade, even if they are not nominated by Connection.
-
-
- One exception to this is the TE header field, which MAY be present in an HTTP/2
- request, but when it is MUST NOT contain any value other than "trailers".
-
-
-
-
- HTTP/2 purposefully does not support upgrade to another protocol. The handshake
- methods described in are believed sufficient to
- negotiate the use of alternative protocols.
-
-
-
-
-
-
-
- The following pseudo-header fields are defined for HTTP/2 requests:
-
-
-
- The :method pseudo-header field includes the HTTP
- method ().
-
-
-
-
- The :scheme pseudo-header field includes the scheme
- portion of the target URI ().
-
-
- :scheme is not restricted to http and https schemed URIs. A
- proxy or gateway can translate requests for non-HTTP schemes, enabling the use
- of HTTP to interact with non-HTTP services.
-
-
-
-
- The :authority pseudo-header field includes the
- authority portion of the target URI (). The authority MUST NOT include the deprecated userinfo subcomponent for http
- or https schemed URIs.
-
-
- To ensure that the HTTP/1.1 request line can be reproduced accurately, this
- pseudo-header field MUST be omitted when translating from an HTTP/1.1 request
- that has a request target in origin or asterisk form (see ). Clients that generate
- HTTP/2 requests directly SHOULD use the :authority pseudo-header
- field instead of the Host header field. An
- intermediary that converts an HTTP/2 request to HTTP/1.1 MUST create a Host header field if one is not present in a request by
- copying the value of the :authority pseudo-header
- field.
-
-
-
-
- The :path pseudo-header field includes the path and
- query parts of the target URI (the path-absolute
- production from and optionally a '?' character
- followed by the query production, see and ). A request in asterisk form includes the value '*' for the
- :path pseudo-header field.
-
-
- This pseudo-header field MUST NOT be empty for http
- or https URIs; http or
- https URIs that do not contain a path component
- MUST include a value of '/'. The exception to this rule is an OPTIONS request
- for an http or https
- URI that does not include a path component; these MUST include a :path pseudo-header field with a value of '*' (see ).
-
-
-
-
-
- All HTTP/2 requests MUST include exactly one valid value for the :method, :scheme, and :path pseudo-header fields, unless it is a CONNECT request. An HTTP request that omits mandatory
- pseudo-header fields is malformed.
-
-
- HTTP/2 does not define a way to carry the version identifier that is included in the
- HTTP/1.1 request line.
-
-
-
-
-
- For HTTP/2 responses, a single :status pseudo-header
- field is defined that carries the HTTP status code field (see ). This pseudo-header field MUST be included in all
- responses, otherwise the response is malformed.
-
-
- HTTP/2 does not define a way to carry the version or reason phrase that is included in
- an HTTP/1.1 status line.
-
-
-
-
-
- The Cookie header field can carry a significant amount of
- redundant data.
-
-
- The Cookie header field uses a semi-colon (";") to delimit cookie-pairs (or "crumbs").
- This header field doesn't follow the list construction rules in HTTP (see ), which prevents cookie-pairs from
- being separated into different name-value pairs. This can significantly reduce
- compression efficiency as individual cookie-pairs are updated.
-
-
- To allow for better compression efficiency, the Cookie header field MAY be split into
- separate header fields, each with one or more cookie-pairs. If there are multiple
- Cookie header fields after decompression, these MUST be concatenated into a single
- octet string using the two octet delimiter of 0x3B, 0x20 (the ASCII string "; ")
- before being passed into a non-HTTP/2 context, such as an HTTP/1.1 connection, or a
- generic HTTP server application.
-
-
-
- Therefore, the following two lists of Cookie header fields are semantically
- equivalent.
-
-
-
-
-
-
-
- A malformed request or response is one that is an otherwise valid sequence of HTTP/2
- frames, but is otherwise invalid due to the presence of extraneous frames, prohibited
- header fields, the absence of mandatory header fields, or the inclusion of uppercase
- header field names.
-
-
- A request or response that includes an entity body can include a content-length header field. A request or response is also
- malformed if the value of a content-length header field
- does not equal the sum of the DATA frame payload lengths that form the
- body. A response that is defined to have no payload, as described in , can have a non-zero
- content-length header field, even though no content is
- included in DATA frames.
-
-
- Intermediaries that process HTTP requests or responses (i.e., any intermediary not
- acting as a tunnel) MUST NOT forward a malformed request or response. Malformed
- requests or responses that are detected MUST be treated as a stream error of type PROTOCOL_ERROR.
-
-
- For malformed requests, a server MAY send an HTTP response prior to closing or
- resetting the stream. Clients MUST NOT accept a malformed response. Note that these
- requirements are intended to protect against several types of common attacks against
- HTTP; they are deliberately strict, because being permissive can expose
- implementations to these vulnerabilities.
-
-
-
-
-
-
- This section shows HTTP/1.1 requests and responses, with illustrations of equivalent
- HTTP/2 requests and responses.
-
-
- An HTTP GET request includes request header fields and no body and is therefore
- transmitted as a single HEADERS frame, followed by zero or more
- CONTINUATION frames containing the serialized block of request header
- fields. The HEADERS frame in the following has both the END_HEADERS and
- END_STREAM flags set; no CONTINUATION frames are sent:
-
-
-
- + END_STREAM
- Accept: image/jpeg + END_HEADERS
- :method = GET
- :scheme = https
- :path = /resource
- host = example.org
- accept = image/jpeg
-]]>
-
-
-
- Similarly, a response that includes only response header fields is transmitted as a
- HEADERS frame (again, followed by zero or more
- CONTINUATION frames) containing the serialized block of response header
- fields.
-
-
-
- + END_STREAM
- Expires: Thu, 23 Jan ... + END_HEADERS
- :status = 304
- etag = "xyzzy"
- expires = Thu, 23 Jan ...
-]]>
-
-
-
- An HTTP POST request that includes request header fields and payload data is transmitted
- as one HEADERS frame, followed by zero or more
- CONTINUATION frames containing the request header fields, followed by one
- or more DATA frames, with the last CONTINUATION (or
- HEADERS) frame having the END_HEADERS flag set and the final
- DATA frame having the END_STREAM flag set:
-
-
-
- - END_STREAM
- Content-Type: image/jpeg - END_HEADERS
- Content-Length: 123 :method = POST
- :path = /resource
- {binary data} :scheme = https
-
- CONTINUATION
- + END_HEADERS
- content-type = image/jpeg
- host = example.org
- content-length = 123
-
- DATA
- + END_STREAM
- {binary data}
-]]>
-
- Note that data contributing to any given header field could be spread between header
- block fragments. The allocation of header fields to frames in this example is
- illustrative only.
-
-
-
-
- A response that includes header fields and payload data is transmitted as a
- HEADERS frame, followed by zero or more CONTINUATION
- frames, followed by one or more DATA frames, with the last
- DATA frame in the sequence having the END_STREAM flag set:
-
-
-
- - END_STREAM
- Content-Length: 123 + END_HEADERS
- :status = 200
- {binary data} content-type = image/jpeg
- content-length = 123
-
- DATA
- + END_STREAM
- {binary data}
-]]>
-
-
-
- Trailing header fields are sent as a header block after both the request or response
- header block and all the DATA frames have been sent. The
- HEADERS frame starting the trailers header block has the END_STREAM flag
- set.
-
-
-
- - END_STREAM
- Transfer-Encoding: chunked + END_HEADERS
- Trailer: Foo :status = 200
- content-length = 123
- 123 content-type = image/jpeg
- {binary data} trailer = Foo
- 0
- Foo: bar DATA
- - END_STREAM
- {binary data}
-
- HEADERS
- + END_STREAM
- + END_HEADERS
- foo = bar
-]]>
-
-
-
-
-
- An informational response using a 1xx status code other than 101 is transmitted as a
- HEADERS frame, followed by zero or more CONTINUATION
- frames:
-
- - END_STREAM
- + END_HEADERS
- :status = 103
- extension-field = bar
-]]>
-
-
-
-
-
- In HTTP/1.1, an HTTP client is unable to retry a non-idempotent request when an error
- occurs, because there is no means to determine the nature of the error. It is possible
- that some server processing occurred prior to the error, which could result in
- undesirable effects if the request were reattempted.
-
-
- HTTP/2 provides two mechanisms for providing a guarantee to a client that a request has
- not been processed:
-
-
- The GOAWAY frame indicates the highest stream number that might have
- been processed. Requests on streams with higher numbers are therefore guaranteed to
- be safe to retry.
-
-
- The REFUSED_STREAM error code can be included in a
- RST_STREAM frame to indicate that the stream is being closed prior to
- any processing having occurred. Any request that was sent on the reset stream can
- be safely retried.
-
-
-
-
- Requests that have not been processed have not failed; clients MAY automatically retry
- them, even those with non-idempotent methods.
-
-
- A server MUST NOT indicate that a stream has not been processed unless it can guarantee
- that fact. If frames that are on a stream are passed to the application layer for any
- stream, then REFUSED_STREAM MUST NOT be used for that stream, and a
- GOAWAY frame MUST include a stream identifier that is greater than or
- equal to the given stream identifier.
-
-
- In addition to these mechanisms, the PING frame provides a way for a
- client to easily test a connection. Connections that remain idle can become broken as
- some middleboxes (for instance, network address translators, or load balancers) silently
- discard connection bindings. The PING frame allows a client to safely
- test whether a connection is still active without sending a request.
-
-
-
-
-
-
- HTTP/2 allows a server to pre-emptively send (or "push") responses (along with
- corresponding "promised" requests) to a client in association with a previous
- client-initiated request. This can be useful when the server knows the client will need
- to have those responses available in order to fully process the response to the original
- request.
-
-
-
- Pushing additional message exchanges in this fashion is optional, and is negotiated
- between individual endpoints. The SETTINGS_ENABLE_PUSH setting can be set
- to 0 to indicate that server push is disabled.
-
-
- Promised requests MUST be cacheable (see ), MUST be safe (see ) and MUST NOT include a request body. Clients that receive a
- promised request that is not cacheable, unsafe or that includes a request body MUST
- reset the stream with a stream error of type
- PROTOCOL_ERROR.
-
-
- Pushed responses that are cacheable (see ) can be stored by the client, if it implements a HTTP
- cache. Pushed responses are considered successfully validated on the origin server (e.g.,
- if the "no-cache" cache response directive is present) while the stream identified by the
- promised stream ID is still open.
-
-
- Pushed responses that are not cacheable MUST NOT be stored by any HTTP cache. They MAY
- be made available to the application separately.
-
-
- An intermediary can receive pushes from the server and choose not to forward them on to
- the client. In other words, how to make use of the pushed information is up to that
- intermediary. Equally, the intermediary might choose to make additional pushes to the
- client, without any action taken by the server.
-
-
- A client cannot push. Thus, servers MUST treat the receipt of a
- PUSH_PROMISE frame as a connection
- error of type PROTOCOL_ERROR. Clients MUST reject any attempt to
- change the SETTINGS_ENABLE_PUSH setting to a value other than 0 by treating
- the message as a connection error of type
- PROTOCOL_ERROR.
-
-
-
-
- Server push is semantically equivalent to a server responding to a request; however, in
- this case that request is also sent by the server, as a PUSH_PROMISE
- frame.
-
-
- The PUSH_PROMISE frame includes a header block that contains a complete
- set of request header fields that the server attributes to the request. It is not
- possible to push a response to a request that includes a request body.
-
-
-
- Pushed responses are always associated with an explicit request from the client. The
- PUSH_PROMISE frames sent by the server are sent on that explicit
- request's stream. The PUSH_PROMISE frame also includes a promised stream
- identifier, chosen from the stream identifiers available to the server (see ).
-
-
-
- The header fields in PUSH_PROMISE and any subsequent
- CONTINUATION frames MUST be a valid and complete set of request header fields. The server MUST include a method in
- the :method header field that is safe and cacheable. If a
- client receives a PUSH_PROMISE that does not include a complete and valid
- set of header fields, or the :method header field identifies
- a method that is not safe, it MUST respond with a stream error of type PROTOCOL_ERROR.
-
-
-
- The server SHOULD send PUSH_PROMISE ()
- frames prior to sending any frames that reference the promised responses. This avoids a
- race where clients issue requests prior to receiving any PUSH_PROMISE
- frames.
-
-
- For example, if the server receives a request for a document containing embedded links
- to multiple image files, and the server chooses to push those additional images to the
- client, sending push promises before the DATA frames that contain the
- image links ensures that the client is able to see the promises before discovering
- embedded links. Similarly, if the server pushes responses referenced by the header block
- (for instance, in Link header fields), sending the push promises before sending the
- header block ensures that clients do not request them.
-
-
-
- PUSH_PROMISE frames MUST NOT be sent by the client.
-
-
- PUSH_PROMISE frames can be sent by the server in response to any
- client-initiated stream, but the stream MUST be in either the "open" or "half closed
- (remote)" state with respect to the server. PUSH_PROMISE frames are
- interspersed with the frames that comprise a response, though they cannot be
- interspersed with HEADERS and CONTINUATION frames that
- comprise a single header block.
-
-
- Sending a PUSH_PROMISE frame creates a new stream and puts the stream
- into the “reserved (local)” state for the server and the “reserved (remote)” state for
- the client.
-
-
-
-
-
- After sending the PUSH_PROMISE frame, the server can begin delivering the
- pushed response as a response on a server-initiated
- stream that uses the promised stream identifier. The server uses this stream to
- transmit an HTTP response, using the same sequence of frames as defined in . This stream becomes "half closed"
- to the client after the initial HEADERS frame is sent.
-
-
-
- Once a client receives a PUSH_PROMISE frame and chooses to accept the
- pushed response, the client SHOULD NOT issue any requests for the promised response
- until after the promised stream has closed.
-
-
-
- If the client determines, for any reason, that it does not wish to receive the pushed
- response from the server, or if the server takes too long to begin sending the promised
- response, the client can send an RST_STREAM frame, using either the
- CANCEL or REFUSED_STREAM codes, and referencing the pushed
- stream's identifier.
-
-
- A client can use the SETTINGS_MAX_CONCURRENT_STREAMS setting to limit the
- number of responses that can be concurrently pushed by a server. Advertising a
- SETTINGS_MAX_CONCURRENT_STREAMS value of zero disables server push by
- preventing the server from creating the necessary streams. This does not prohibit a
- server from sending PUSH_PROMISE frames; clients need to reset any
- promised streams that are not wanted.
-
-
-
- Clients receiving a pushed response MUST validate that either the server is
- authoritative (see ), or the proxy that provided the pushed
- response is configured for the corresponding request. For example, a server that offers
- a certificate for only the example.com DNS-ID or Common Name
- is not permitted to push a response for https://www.example.org/doc.
-
-
- The response for a PUSH_PROMISE stream begins with a
- HEADERS frame, which immediately puts the stream into the “half closed
- (remote)” state for the server and “half closed (local)” state for the client, and ends
- with a frame bearing END_STREAM, which places the stream in the "closed" state.
-
-
- The client never sends a frame with the END_STREAM flag for a server push.
-
-
-
-
-
-
-
-
-
- In HTTP/1.x, the pseudo-method CONNECT () is used to convert an HTTP connection into a tunnel to a remote host.
- CONNECT is primarily used with HTTP proxies to establish a TLS session with an origin
- server for the purposes of interacting with https resources.
-
-
- In HTTP/2, the CONNECT method is used to establish a tunnel over a single HTTP/2 stream to
- a remote host, for similar purposes. The HTTP header field mapping works as defined in
- Request Header Fields, with a few
- differences. Specifically:
-
-
- The :method header field is set to CONNECT.
-
-
- The :scheme and :path header
- fields MUST be omitted.
-
-
- The :authority header field contains the host and port to
- connect to (equivalent to the authority-form of the request-target of CONNECT
- requests, see ).
-
-
-
-
- A proxy that supports CONNECT establishes a TCP connection to
- the server identified in the :authority header field. Once
- this connection is successfully established, the proxy sends a HEADERS
- frame containing a 2xx series status code to the client, as defined in .
-
-
- After the initial HEADERS frame sent by each peer, all subsequent
- DATA frames correspond to data sent on the TCP connection. The payload of
- any DATA frames sent by the client is transmitted by the proxy to the TCP
- server; data received from the TCP server is assembled into DATA frames by
- the proxy. Frame types other than DATA or stream management frames
- (RST_STREAM, WINDOW_UPDATE, and PRIORITY)
- MUST NOT be sent on a connected stream, and MUST be treated as a stream error if received.
-
-
- The TCP connection can be closed by either peer. The END_STREAM flag on a
- DATA frame is treated as being equivalent to the TCP FIN bit. A client is
- expected to send a DATA frame with the END_STREAM flag set after receiving
- a frame bearing the END_STREAM flag. A proxy that receives a DATA frame
- with the END_STREAM flag set sends the attached data with the FIN bit set on the last TCP
- segment. A proxy that receives a TCP segment with the FIN bit set sends a
- DATA frame with the END_STREAM flag set. Note that the final TCP segment
- or DATA frame could be empty.
-
-
- A TCP connection error is signaled with RST_STREAM. A proxy treats any
- error in the TCP connection, which includes receiving a TCP segment with the RST bit set,
- as a stream error of type
- CONNECT_ERROR. Correspondingly, a proxy MUST send a TCP segment with the
- RST bit set if it detects an error with the stream or the HTTP/2 connection.
-
-
-
-
-
-
- This section outlines attributes of the HTTP protocol that improve interoperability, reduce
- exposure to known security vulnerabilities, or reduce the potential for implementation
- variation.
-
-
-
-
- HTTP/2 connections are persistent. For best performance, it is expected clients will not
- close connections until it is determined that no further communication with a server is
- necessary (for example, when a user navigates away from a particular web page), or until
- the server closes the connection.
-
-
- Clients SHOULD NOT open more than one HTTP/2 connection to a given host and port pair,
- where host is derived from a URI, a selected alternative
- service, or a configured proxy.
-
-
- A client can create additional connections as replacements, either to replace connections
- that are near to exhausting the available stream
- identifier space, to refresh the keying material for a TLS connection, or to
- replace connections that have encountered errors.
-
-
- A client MAY open multiple connections to the same IP address and TCP port using different
- Server Name Indication values or to provide different TLS
- client certificates, but SHOULD avoid creating multiple connections with the same
- configuration.
-
-
- Servers are encouraged to maintain open connections for as long as possible, but are
- permitted to terminate idle connections if necessary. When either endpoint chooses to
- close the transport-layer TCP connection, the terminating endpoint SHOULD first send a
- GOAWAY () frame so that both endpoints can reliably
- determine whether previously sent frames have been processed and gracefully complete or
- terminate any necessary remaining tasks.
-
-
-
-
- Connections that are made to an origin servers, either directly or through a tunnel
- created using the CONNECT method MAY be reused for
- requests with multiple different URI authority components. A connection can be reused
- as long as the origin server is authoritative. For
- http resources, this depends on the host having resolved to
- the same IP address.
-
-
- For https resources, connection reuse additionally depends
- on having a certificate that is valid for the host in the URI. An origin server might
- offer a certificate with multiple subjectAltName attributes,
- or names with wildcards, one of which is valid for the authority in the URI. For
- example, a certificate with a subjectAltName of *.example.com might permit the use of the same connection for
- requests to URIs starting with https://a.example.com/ and
- https://b.example.com/.
-
-
- In some deployments, reusing a connection for multiple origins can result in requests
- being directed to the wrong origin server. For example, TLS termination might be
- performed by a middlebox that uses the TLS Server Name Indication
- (SNI) extension to select an origin server. This means that it is possible
- for clients to send confidential information to servers that might not be the intended
- target for the request, even though the server is otherwise authoritative.
-
-
- A server that does not wish clients to reuse connections can indicate that it is not
- authoritative for a request by sending a 421 (Misdirected Request) status code in response
- to the request (see ).
-
-
- A client that is configured to use a proxy over HTTP/2 directs requests to that proxy
- through a single connection. That is, all requests sent via a proxy reuse the
- connection to the proxy.
-
-
-
-
-
- The 421 (Misdirected Request) status code indicates that the request was directed at a
- server that is not able to produce a response. This can be sent by a server that is not
- configured to produce responses for the combination of scheme and authority that are
- included in the request URI.
-
-
- Clients receiving a 421 (Misdirected Request) response from a server MAY retry the
- request - whether the request method is idempotent or not - over a different connection.
- This is possible if a connection is reused () or if an alternative
- service is selected ().
-
-
- This status code MUST NOT be generated by proxies.
-
-
- A 421 response is cacheable by default; i.e., unless otherwise indicated by the method
- definition or explicit cache controls (see ).
-
-
-
-
-
-
- Implementations of HTTP/2 MUST support TLS 1.2 for HTTP/2 over
- TLS. The general TLS usage guidance in SHOULD be followed, with
- some additional restrictions that are specific to HTTP/2.
-
-
-
- An implementation of HTTP/2 over TLS MUST use TLS 1.2 or higher with the restrictions on
- feature set and cipher suite described in this section. Due to implementation
- limitations, it might not be possible to fail TLS negotiation. An endpoint MUST
- immediately terminate an HTTP/2 connection that does not meet these minimum requirements
- with a connection error of type
- INADEQUATE_SECURITY.
-
-
-
-
- The TLS implementation MUST support the Server Name Indication
- (SNI) extension to TLS. HTTP/2 clients MUST indicate the target domain name when
- negotiating TLS.
-
-
- The TLS implementation MUST disable compression. TLS compression can lead to the
- exposure of information that would not otherwise be revealed .
- Generic compression is unnecessary since HTTP/2 provides compression features that are
- more aware of context and therefore likely to be more appropriate for use for
- performance, security or other reasons.
-
-
- The TLS implementation MUST disable renegotiation. An endpoint MUST treat a TLS
- renegotiation as a connection error of type
- PROTOCOL_ERROR. Note that disabling renegotiation can result in
- long-lived connections becoming unusable due to limits on the number of messages the
- underlying cipher suite can encipher.
-
-
- A client MAY use renegotiation to provide confidentiality protection for client
- credentials offered in the handshake, but any renegotiation MUST occur prior to sending
- the connection preface. A server SHOULD request a client certificate if it sees a
- renegotiation request immediately after establishing a connection.
-
-
- This effectively prevents the use of renegotiation in response to a request for a
- specific protected resource. A future specification might provide a way to support this
- use case.
-
-
-
-
-
- The set of TLS cipher suites that are permitted in HTTP/2 is restricted. HTTP/2 MUST
- only be used with cipher suites that have ephemeral key exchange, such as the ephemeral Diffie-Hellman (DHE) or the elliptic curve variant (ECDHE). Ephemeral key exchange MUST
- have a minimum size of 2048 bits for DHE or security level of 128 bits for ECDHE.
- Clients MUST accept DHE sizes of up to 4096 bits. HTTP MUST NOT be used with cipher
- suites that use stream or block ciphers. Authenticated Encryption with Additional Data
- (AEAD) modes, such as the Galois Counter Model (GCM) mode for
- AES are acceptable.
-
-
- The effect of these restrictions is that TLS 1.2 implementations could have
- non-intersecting sets of available cipher suites, since these prevent the use of the
- cipher suite that TLS 1.2 makes mandatory. To avoid this problem, implementations of
- HTTP/2 that use TLS 1.2 MUST support TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 with P256 .
-
-
- Clients MAY advertise support of cipher suites that are prohibited by the above
- restrictions in order to allow for connection to servers that do not support HTTP/2.
- This enables a fallback to protocols without these constraints without the additional
- latency imposed by using a separate connection for fallback.
-
-
-
-
-
-
-
-
- HTTP/2 relies on the HTTP/1.1 definition of authority for determining whether a server is
- authoritative in providing a given response, see . This relies on local name resolution for the "http"
- URI scheme, and the authenticated server identity for the "https" scheme (see ).
-
-
-
-
-
- In a cross-protocol attack, an attacker causes a client to initiate a transaction in one
- protocol toward a server that understands a different protocol. An attacker might be able
- to cause the transaction to appear as valid transaction in the second protocol. In
- combination with the capabilities of the web context, this can be used to interact with
- poorly protected servers in private networks.
-
-
- Completing a TLS handshake with an ALPN identifier for HTTP/2 can be considered sufficient
- protection against cross protocol attacks. ALPN provides a positive indication that a
- server is willing to proceed with HTTP/2, which prevents attacks on other TLS-based
- protocols.
-
-
- The encryption in TLS makes it difficult for attackers to control the data which could be
- used in a cross-protocol attack on a cleartext protocol.
-
-
- The cleartext version of HTTP/2 has minimal protection against cross-protocol attacks.
- The connection preface contains a string that is
- designed to confuse HTTP/1.1 servers, but no special protection is offered for other
- protocols. A server that is willing to ignore parts of an HTTP/1.1 request containing an
- Upgrade header field in addition to the client connection preface could be exposed to a
- cross-protocol attack.
-
-
-
-
-
- HTTP/2 header field names and values are encoded as sequences of octets with a length
- prefix. This enables HTTP/2 to carry any string of octets as the name or value of a
- header field. An intermediary that translates HTTP/2 requests or responses into HTTP/1.1
- directly could permit the creation of corrupted HTTP/1.1 messages. An attacker might
- exploit this behavior to cause the intermediary to create HTTP/1.1 messages with illegal
- header fields, extra header fields, or even new messages that are entirely falsified.
-
-
- Header field names or values that contain characters not permitted by HTTP/1.1, including
- carriage return (ASCII 0xd) or line feed (ASCII 0xa) MUST NOT be translated verbatim by an
- intermediary, as stipulated in .
-
-
- Translation from HTTP/1.x to HTTP/2 does not produce the same opportunity to an attacker.
- Intermediaries that perform translation to HTTP/2 MUST remove any instances of the obs-fold production from header field values.
-
-
-
-
-
- Pushed responses do not have an explicit request from the client; the request
- is provided by the server in the PUSH_PROMISE frame.
-
-
- Caching responses that are pushed is possible based on the guidance provided by the origin
- server in the Cache-Control header field. However, this can cause issues if a single
- server hosts more than one tenant. For example, a server might offer multiple users each
- a small portion of its URI space.
-
-
- Where multiple tenants share space on the same server, that server MUST ensure that
- tenants are not able to push representations of resources that they do not have authority
- over. Failure to enforce this would allow a tenant to provide a representation that would
- be served out of cache, overriding the actual representation that the authoritative tenant
- provides.
-
-
- Pushed responses for which an origin server is not authoritative (see
- ) are never cached or used.
-
-
-
-
-
- An HTTP/2 connection can demand a greater commitment of resources to operate than a
- HTTP/1.1 connection. The use of header compression and flow control depend on a
- commitment of resources for storing a greater amount of state. Settings for these
- features ensure that memory commitments for these features are strictly bounded.
-
-
- The number of PUSH_PROMISE frames is not constrained in the same fashion.
- A client that accepts server push SHOULD limit the number of streams it allows to be in
- the "reserved (remote)" state. Excessive number of server push streams can be treated as
- a stream error of type
- ENHANCE_YOUR_CALM.
-
-
- Processing capacity cannot be guarded as effectively as state capacity.
-
-
- The SETTINGS frame can be abused to cause a peer to expend additional
- processing time. This might be done by pointlessly changing SETTINGS parameters, setting
- multiple undefined parameters, or changing the same setting multiple times in the same
- frame. WINDOW_UPDATE or PRIORITY frames can be abused to
- cause an unnecessary waste of resources.
-
-
- Large numbers of small or empty frames can be abused to cause a peer to expend time
- processing frame headers. Note however that some uses are entirely legitimate, such as
- the sending of an empty DATA frame to end a stream.
-
-
- Header compression also offers some opportunities to waste processing resources; see for more details on potential abuses.
-
-
- Limits in SETTINGS parameters cannot be reduced instantaneously, which
- leaves an endpoint exposed to behavior from a peer that could exceed the new limits. In
- particular, immediately after establishing a connection, limits set by a server are not
- known to clients and could be exceeded without being an obvious protocol violation.
-
-
- All these features - i.e., SETTINGS changes, small frames, header
- compression - have legitimate uses. These features become a burden only when they are
- used unnecessarily or to excess.
-
-
- An endpoint that doesn't monitor this behavior exposes itself to a risk of denial of
- service attack. Implementations SHOULD track the use of these features and set limits on
- their use. An endpoint MAY treat activity that is suspicious as a connection error of type
- ENHANCE_YOUR_CALM.
-
-
-
-
- A large header block can cause an implementation to
- commit a large amount of state. Header fields that are critical for routing can appear
- toward the end of a header block, which prevents streaming of header fields to their
- ultimate destination. For this an other reasons, such as ensuring cache correctness,
- means that an endpoint might need to buffer the entire header block. Since there is no
- hard limit to the size of a header block, some endpoints could be forced commit a large
- amount of available memory for header fields.
-
-
- An endpoint can use the SETTINGS_MAX_HEADER_LIST_SIZE to advise peers of
- limits that might apply on the size of header blocks. This setting is only advisory, so
- endpoints MAY choose to send header blocks that exceed this limit and risk having the
- request or response being treated as malformed. This setting specific to a connection,
- so any request or response could encounter a hop with a lower, unknown limit. An
- intermediary can attempt to avoid this problem by passing on values presented by
- different peers, but they are not obligated to do so.
-
-
- A server that receives a larger header block than it is willing to handle can send an
- HTTP 431 (Request Header Fields Too Large) status code . A
- client can discard responses that it cannot process. The header block MUST be processed
- to ensure a consistent connection state, unless the connection is closed.
-
-
-
-
-
-
- HTTP/2 enables greater use of compression for both header fields () and entity bodies. Compression can allow an attacker to recover
- secret data when it is compressed in the same context as data under attacker control.
-
-
- There are demonstrable attacks on compression that exploit the characteristics of the web
- (e.g., ). The attacker induces multiple requests containing
- varying plaintext, observing the length of the resulting ciphertext in each, which
- reveals a shorter length when a guess about the secret is correct.
-
-
- Implementations communicating on a secure channel MUST NOT compress content that includes
- both confidential and attacker-controlled data unless separate compression dictionaries
- are used for each source of data. Compression MUST NOT be used if the source of data
- cannot be reliably determined. Generic stream compression, such as that provided by TLS
- MUST NOT be used with HTTP/2 ().
-
-
- Further considerations regarding the compression of header fields are described in .
-
-
-
-
-
- Padding within HTTP/2 is not intended as a replacement for general purpose padding, such
- as might be provided by TLS. Redundant padding could even be
- counterproductive. Correct application can depend on having specific knowledge of the
- data that is being padded.
-
-
- To mitigate attacks that rely on compression, disabling or limiting compression might be
- preferable to padding as a countermeasure.
-
-
- Padding can be used to obscure the exact size of frame content, and is provided to
- mitigate specific attacks within HTTP. For example, attacks where compressed content
- includes both attacker-controlled plaintext and secret data (see for example, ).
-
-
- Use of padding can result in less protection than might seem immediately obvious. At
- best, padding only makes it more difficult for an attacker to infer length information by
- increasing the number of frames an attacker has to observe. Incorrectly implemented
- padding schemes can be easily defeated. In particular, randomized padding with a
- predictable distribution provides very little protection; similarly, padding payloads to a
- fixed size exposes information as payload sizes cross the fixed size boundary, which could
- be possible if an attacker can control plaintext.
-
-
- Intermediaries SHOULD retain padding for DATA frames, but MAY drop padding
- for HEADERS and PUSH_PROMISE frames. A valid reason for an
- intermediary to change the amount of padding of frames is to improve the protections that
- padding provides.
-
-
-
-
-
- Several characteristics of HTTP/2 provide an observer an opportunity to correlate actions
- of a single client or server over time. This includes the value of settings, the manner
- in which flow control windows are managed, the way priorities are allocated to streams,
- timing of reactions to stimulus, and handling of any optional features.
-
-
- As far as this creates observable differences in behavior, they could be used as a basis
- for fingerprinting a specific client, as defined in .
-
-
-
-
-
-
- A string for identifying HTTP/2 is entered into the "Application Layer Protocol Negotiation
- (ALPN) Protocol IDs" registry established in .
-
-
- This document establishes a registry for frame types, settings, and error codes. These new
- registries are entered into a new "Hypertext Transfer Protocol (HTTP) 2 Parameters" section.
-
-
- This document registers the HTTP2-Settings header field for
- use in HTTP; and the 421 (Misdirected Request) status code.
-
-
- This document registers the PRI method for use in HTTP, to avoid
- collisions with the connection preface.
-
-
-
-
- This document creates two registrations for the identification of HTTP/2 in the
- "Application Layer Protocol Negotiation (ALPN) Protocol IDs" registry established in .
-
-
- The "h2" string identifies HTTP/2 when used over TLS:
-
- HTTP/2 over TLS
- 0x68 0x32 ("h2")
- This document
-
-
-
- The "h2c" string identifies HTTP/2 when used over cleartext TCP:
-
- HTTP/2 over TCP
- 0x68 0x32 0x63 ("h2c")
- This document
-
-
-
-
-
-
- This document establishes a registry for HTTP/2 frame type codes. The "HTTP/2 Frame
- Type" registry manages an 8-bit space. The "HTTP/2 Frame Type" registry operates under
- either of the "IETF Review" or "IESG Approval" policies for
- values between 0x00 and 0xef, with values between 0xf0 and 0xff being reserved for
- experimental use.
-
-
- New entries in this registry require the following information:
-
-
- A name or label for the frame type.
-
-
- The 8-bit code assigned to the frame type.
-
-
- A reference to a specification that includes a description of the frame layout,
- it's semantics and flags that the frame type uses, including any parts of the frame
- that are conditionally present based on the value of flags.
-
-
-
-
- The entries in the following table are registered by this document.
-
-
- Frame Type
- Code
- Section
- DATA0x0
- HEADERS0x1
- PRIORITY0x2
- RST_STREAM0x3
- SETTINGS0x4
- PUSH_PROMISE0x5
- PING0x6
- GOAWAY0x7
- WINDOW_UPDATE0x8
- CONTINUATION0x9
-
-
-
-
-
- This document establishes a registry for HTTP/2 settings. The "HTTP/2 Settings" registry
- manages a 16-bit space. The "HTTP/2 Settings" registry operates under the "Expert Review" policy for values in the range from 0x0000 to
- 0xefff, with values between and 0xf000 and 0xffff being reserved for experimental use.
-
-
- New registrations are advised to provide the following information:
-
-
- A symbolic name for the setting. Specifying a setting name is optional.
-
-
- The 16-bit code assigned to the setting.
-
-
- An initial value for the setting.
-
-
- An optional reference to a specification that describes the use of the setting.
-
-
-
-
- An initial set of setting registrations can be found in .
-
-
- Name
- Code
- Initial Value
- Specification
- HEADER_TABLE_SIZE
- 0x14096
- ENABLE_PUSH
- 0x21
- MAX_CONCURRENT_STREAMS
- 0x3(infinite)
- INITIAL_WINDOW_SIZE
- 0x465535
- MAX_FRAME_SIZE
- 0x516384
- MAX_HEADER_LIST_SIZE
- 0x6(infinite)
-
-
-
-
-
-
- This document establishes a registry for HTTP/2 error codes. The "HTTP/2 Error Code"
- registry manages a 32-bit space. The "HTTP/2 Error Code" registry operates under the
- "Expert Review" policy.
-
-
- Registrations for error codes are required to include a description of the error code. An
- expert reviewer is advised to examine new registrations for possible duplication with
- existing error codes. Use of existing registrations is to be encouraged, but not
- mandated.
-
-
- New registrations are advised to provide the following information:
-
-
- A name for the error code. Specifying an error code name is optional.
-
-
- The 32-bit error code value.
-
-
- A brief description of the error code semantics, longer if no detailed specification
- is provided.
-
-
- An optional reference for a specification that defines the error code.
-
-
-
-
- The entries in the following table are registered by this document.
-
-
- Name
- Code
- Description
- Specification
- NO_ERROR0x0
- Graceful shutdown
-
- PROTOCOL_ERROR0x1
- Protocol error detected
-
- INTERNAL_ERROR0x2
- Implementation fault
-
- FLOW_CONTROL_ERROR0x3
- Flow control limits exceeded
-
- SETTINGS_TIMEOUT0x4
- Settings not acknowledged
-
- STREAM_CLOSED0x5
- Frame received for closed stream
-
- FRAME_SIZE_ERROR0x6
- Frame size incorrect
-
- REFUSED_STREAM0x7
- Stream not processed
-
- CANCEL0x8
- Stream cancelled
-
- COMPRESSION_ERROR0x9
- Compression state not updated
-
- CONNECT_ERROR0xa
- TCP connection error for CONNECT method
-
- ENHANCE_YOUR_CALM0xb
- Processing capacity exceeded
-
- INADEQUATE_SECURITY0xc
- Negotiated TLS parameters not acceptable
-
-
-
-
-
-
-
- This section registers the HTTP2-Settings header field in the
- Permanent Message Header Field Registry.
-
-
- HTTP2-Settings
-
-
- http
-
-
- standard
-
-
- IETF
-
-
- of this document
-
-
- This header field is only used by an HTTP/2 client for Upgrade-based negotiation.
-
-
-
-
-
-
-
- This section registers the PRI method in the HTTP Method
- Registry ().
-
-
- PRI
-
-
- No
-
-
- No
-
-
- of this document
-
-
- This method is never used by an actual client. This method will appear to be used
- when an HTTP/1.1 server or intermediary attempts to parse an HTTP/2 connection
- preface.
-
-
-
-
-
-
-
- This document registers the 421 (Misdirected Request) HTTP Status code in the Hypertext
- Transfer Protocol (HTTP) Status Code Registry ().
-
-
-
-
- 421
-
-
- Misdirected Request
-
-
- of this document
-
-
-
-
-
-
-
-
-
- This document includes substantial input from the following individuals:
-
-
- Adam Langley, Wan-Teh Chang, Jim Morrison, Mark Nottingham, Alyssa Wilk, Costin
- Manolache, William Chan, Vitaliy Lvin, Joe Chan, Adam Barth, Ryan Hamilton, Gavin
- Peters, Kent Alstad, Kevin Lindsay, Paul Amer, Fan Yang, Jonathan Leighton (SPDY
- contributors).
-
-
- Gabriel Montenegro and Willy Tarreau (Upgrade mechanism).
-
-
- William Chan, Salvatore Loreto, Osama Mazahir, Gabriel Montenegro, Jitu Padhye, Roberto
- Peon, Rob Trace (Flow control).
-
-
- Mike Bishop (Extensibility).
-
-
- Mark Nottingham, Julian Reschke, James Snell, Jeff Pinner, Mike Bishop, Herve Ruellan
- (Substantial editorial contributions).
-
-
- Kari Hurtta, Tatsuhiro Tsujikawa, Greg Wilkins, Poul-Henning Kamp.
-
-
- Alexey Melnikov was an editor of this document during 2013.
-
-
- A substantial proportion of Martin's contribution was supported by Microsoft during his
- employment there.
-
-
-
-
-
-
-
-
-
-
- HPACK - Header Compression for HTTP/2
-
-
-
-
-
-
-
-
-
-
-
- Transmission Control Protocol
-
-
- University of Southern California (USC)/Information Sciences
- Institute
-
-
-
-
-
-
-
-
-
-
- Key words for use in RFCs to Indicate Requirement Levels
-
-
- Harvard University
- sob@harvard.edu
-
-
-
-
-
-
-
-
-
-
- HTTP Over TLS
-
-
-
-
-
-
-
-
-
- Uniform Resource Identifier (URI): Generic
- Syntax
-
-
-
-
-
-
-
-
-
-
-
- The Base16, Base32, and Base64 Data Encodings
-
-
-
-
-
-
-
-
- Guidelines for Writing an IANA Considerations Section in RFCs
-
-
-
-
-
-
-
-
-
-
- Augmented BNF for Syntax Specifications: ABNF
-
-
-
-
-
-
-
-
-
-
- The Transport Layer Security (TLS) Protocol Version 1.2
-
-
-
-
-
-
-
-
-
-
- Transport Layer Security (TLS) Extensions: Extension Definitions
-
-
-
-
-
-
-
-
-
- Transport Layer Security (TLS) Application-Layer Protocol Negotiation Extension
-
-
-
-
-
-
-
-
-
-
-
-
- TLS Elliptic Curve Cipher Suites with SHA-256/384 and AES Galois
- Counter Mode (GCM)
-
-
-
-
-
-
-
-
-
-
- Digital Signature Standard (DSS)
-
- NIST
-
-
-
-
-
-
-
-
- Hypertext Transfer Protocol (HTTP/1.1): Message Syntax and Routing
-
- Adobe Systems Incorporated
- fielding@gbiv.com
-
-
- greenbytes GmbH
- julian.reschke@greenbytes.de
-
-
-
-
-
-
-
-
-
- Hypertext Transfer Protocol (HTTP/1.1): Semantics and Content
-
- Adobe Systems Incorporated
- fielding@gbiv.com
-
-
- greenbytes GmbH
- julian.reschke@greenbytes.de
-
-
-
-
-
-
-
-
- Hypertext Transfer Protocol (HTTP/1.1): Conditional Requests
-
- Adobe Systems Incorporated
- fielding@gbiv.com
-
-
- greenbytes GmbH
- julian.reschke@greenbytes.de
-
-
-
-
-
-
-
- Hypertext Transfer Protocol (HTTP/1.1): Range Requests
-
- Adobe Systems Incorporated
- fielding@gbiv.com
-
-
- World Wide Web Consortium
- ylafon@w3.org
-
-
- greenbytes GmbH
- julian.reschke@greenbytes.de
-
-
-
-
-
-
-
- Hypertext Transfer Protocol (HTTP/1.1): Caching
-
- Adobe Systems Incorporated
- fielding@gbiv.com
-
-
- Akamai
- mnot@mnot.net
-
-
- greenbytes GmbH
- julian.reschke@greenbytes.de
-
-
-
-
-
-
-
-
- Hypertext Transfer Protocol (HTTP/1.1): Authentication
-
- Adobe Systems Incorporated
- fielding@gbiv.com
-
-
- greenbytes GmbH
- julian.reschke@greenbytes.de
-
-
-
-
-
-
-
-
-
- HTTP State Management Mechanism
-
-
-
-
-
-
-
-
-
-
-
- TCP Extensions for High Performance
-
-
-
-
-
-
-
-
-
-
-
- Transport Layer Security Protocol Compression Methods
-
-
-
-
-
-
-
-
- Additional HTTP Status Codes
-
-
-
-
-
-
-
-
-
-
- Elliptic Curve Cryptography (ECC) Cipher Suites for Transport Layer Security (TLS)
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- AES Galois Counter Mode (GCM) Cipher Suites for TLS
-
-
-
-
-
-
-
-
-
-
-
- HTML5
-
-
-
-
-
-
-
-
-
-
- Latest version available at
- .
-
-
-
-
-
-
- Talking to Yourself for Fun and Profit
-
-
-
-
-
-
-
-
-
-
-
-
-
- BREACH: Reviving the CRIME Attack
-
-
-
-
-
-
-
-
-
-
- Registration Procedures for Message Header Fields
-
- Nine by Nine
- GK-IETF@ninebynine.org
-
-
- BEA Systems
- mnot@pobox.com
-
-
- HP Labs
- JeffMogul@acm.org
-
-
-
-
-
-
-
-
-
- Recommendations for Secure Use of TLS and DTLS
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- HTTP Alternative Services
-
-
- Akamai
-
-
- Mozilla
-
-
- greenbytes
-
-
-
-
-
-
-
-
-
-
- This section is to be removed by RFC Editor before publication.
-
-
-
-
- Renamed Not Authoritative status code to Misdirected Request.
-
-
-
-
-
- Pseudo-header fields are now required to appear strictly before regular ones.
-
-
- Restored 1xx series status codes, except 101.
-
-
- Changed frame length field 24-bits. Expanded frame header to 9 octets. Added a setting
- to limit the damage.
-
-
- Added a setting to advise peers of header set size limits.
-
-
- Removed segments.
-
-
- Made non-semantic-bearing HEADERS frames illegal in the HTTP mapping.
-
-
-
-
-
- Restored extensibility options.
-
-
- Restricting TLS cipher suites to AEAD only.
-
-
- Removing Content-Encoding requirements.
-
-
- Permitting the use of PRIORITY after stream close.
-
-
- Removed ALTSVC frame.
-
-
- Removed BLOCKED frame.
-
-
- Reducing the maximum padding size to 256 octets; removing padding from
- CONTINUATION frames.
-
-
- Removed per-frame GZIP compression.
-
-
-
-
-
- Added BLOCKED frame (at risk).
-
-
- Simplified priority scheme.
-
-
- Added DATA per-frame GZIP compression.
-
-
-
-
-
- Changed "connection header" to "connection preface" to avoid confusion.
-
-
- Added dependency-based stream prioritization.
-
-
- Added "h2c" identifier to distinguish between cleartext and secured HTTP/2.
-
-
- Adding missing padding to PUSH_PROMISE.
-
-
- Integrate ALTSVC frame and supporting text.
-
-
- Dropping requirement on "deflate" Content-Encoding.
-
-
- Improving security considerations around use of compression.
-
-
-
-
-
- Adding padding for data frames.
-
-
- Renumbering frame types, error codes, and settings.
-
-
- Adding INADEQUATE_SECURITY error code.
-
-
- Updating TLS usage requirements to 1.2; forbidding TLS compression.
-
-
- Removing extensibility for frames and settings.
-
-
- Changing setting identifier size.
-
-
- Removing the ability to disable flow control.
-
-
- Changing the protocol identification token to "h2".
-
-
- Changing the use of :authority to make it optional and to allow userinfo in non-HTTP
- cases.
-
-
- Allowing split on 0x0 for Cookie.
-
-
- Reserved PRI method in HTTP/1.1 to avoid possible future collisions.
-
-
-
-
-
- Added cookie crumbling for more efficient header compression.
-
-
- Added header field ordering with the value-concatenation mechanism.
-
-
-
-
-
- Marked draft for implementation.
-
-
-
-
-
- Adding definition for CONNECT method.
-
-
- Constraining the use of push to safe, cacheable methods with no request body.
-
-
- Changing from :host to :authority to remove any potential confusion.
-
-
- Adding setting for header compression table size.
-
-
- Adding settings acknowledgement.
-
-
- Removing unnecessary and potentially problematic flags from CONTINUATION.
-
-
- Added denial of service considerations.
-
-
-
-
- Marking the draft ready for implementation.
-
-
- Renumbering END_PUSH_PROMISE flag.
-
-
- Editorial clarifications and changes.
-
-
-
-
-
- Added CONTINUATION frame for HEADERS and PUSH_PROMISE.
-
-
- PUSH_PROMISE is no longer implicitly prohibited if SETTINGS_MAX_CONCURRENT_STREAMS is
- zero.
-
-
- Push expanded to allow all safe methods without a request body.
-
-
- Clarified the use of HTTP header fields in requests and responses. Prohibited HTTP/1.1
- hop-by-hop header fields.
-
-
- Requiring that intermediaries not forward requests with missing or illegal routing
- :-headers.
-
-
- Clarified requirements around handling different frames after stream close, stream reset
- and GOAWAY.
-
-
- Added more specific prohibitions for sending of different frame types in various stream
- states.
-
-
- Making the last received setting value the effective value.
-
-
- Clarified requirements on TLS version, extension and ciphers.
-
-
-
-
-
- Committed major restructuring atrocities.
-
-
- Added reference to first header compression draft.
-
-
- Added more formal description of frame lifecycle.
-
-
- Moved END_STREAM (renamed from FINAL) back to HEADERS/DATA.
-
-
- Removed HEADERS+PRIORITY, added optional priority to HEADERS frame.
-
-
- Added PRIORITY frame.
-
-
-
-
-
- Added continuations to frames carrying header blocks.
-
-
- Replaced use of "session" with "connection" to avoid confusion with other HTTP stateful
- concepts, like cookies.
-
-
- Removed "message".
-
-
- Switched to TLS ALPN from NPN.
-
-
- Editorial changes.
-
-
-
-
-
- Added IANA considerations section for frame types, error codes and settings.
-
-
- Removed data frame compression.
-
-
- Added PUSH_PROMISE.
-
-
- Added globally applicable flags to framing.
-
-
- Removed zlib-based header compression mechanism.
-
-
- Updated references.
-
-
- Clarified stream identifier reuse.
-
-
- Removed CREDENTIALS frame and associated mechanisms.
-
-
- Added advice against naive implementation of flow control.
-
-
- Added session header section.
-
-
- Restructured frame header. Removed distinction between data and control frames.
-
-
- Altered flow control properties to include session-level limits.
-
-
- Added note on cacheability of pushed resources and multiple tenant servers.
-
-
- Changed protocol label form based on discussions.
-
-
-
-
-
- Changed title throughout.
-
-
- Removed section on Incompatibilities with SPDY draft#2.
-
-
- Changed INTERNAL_ERROR on GOAWAY to have a value of 2 .
-
-
- Replaced abstract and introduction.
-
-
- Added section on starting HTTP/2.0, including upgrade mechanism.
-
-
- Removed unused references.
-
-
- Added flow control principles based on .
-
-
-
-
-
- Adopted as base for draft-ietf-httpbis-http2.
-
-
- Updated authors/editors list.
-
-
- Added status note.
-
-
-
-
-
-
-
diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/transport_test.go b/Godeps/_workspace/src/github.com/bradfitz/http2/transport_test.go
deleted file mode 100644
index 0622923ceb..0000000000
--- a/Godeps/_workspace/src/github.com/bradfitz/http2/transport_test.go
+++ /dev/null
@@ -1,168 +0,0 @@
-// Copyright 2015 The Go Authors.
-// See https://go.googlesource.com/go/+/master/CONTRIBUTORS
-// Licensed under the same terms as Go itself:
-// https://go.googlesource.com/go/+/master/LICENSE
-
-package http2
-
-import (
- "flag"
- "io"
- "io/ioutil"
- "net/http"
- "os"
- "reflect"
- "strings"
- "testing"
- "time"
-)
-
-var (
- extNet = flag.Bool("extnet", false, "do external network tests")
- transportHost = flag.String("transporthost", "http2.golang.org", "hostname to use for TestTransport")
- insecure = flag.Bool("insecure", false, "insecure TLS dials")
-)
-
-func TestTransportExternal(t *testing.T) {
- if !*extNet {
- t.Skip("skipping external network test")
- }
- req, _ := http.NewRequest("GET", "https://"+*transportHost+"/", nil)
- rt := &Transport{
- InsecureTLSDial: *insecure,
- }
- res, err := rt.RoundTrip(req)
- if err != nil {
- t.Fatalf("%v", err)
- }
- res.Write(os.Stdout)
-}
-
-func TestTransport(t *testing.T) {
- const body = "sup"
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
- io.WriteString(w, body)
- })
- defer st.Close()
-
- tr := &Transport{InsecureTLSDial: true}
- defer tr.CloseIdleConnections()
-
- req, err := http.NewRequest("GET", st.ts.URL, nil)
- if err != nil {
- t.Fatal(err)
- }
- res, err := tr.RoundTrip(req)
- if err != nil {
- t.Fatal(err)
- }
- defer res.Body.Close()
-
- t.Logf("Got res: %+v", res)
- if g, w := res.StatusCode, 200; g != w {
- t.Errorf("StatusCode = %v; want %v", g, w)
- }
- if g, w := res.Status, "200 OK"; g != w {
- t.Errorf("Status = %q; want %q", g, w)
- }
- wantHeader := http.Header{
- "Content-Length": []string{"3"},
- "Content-Type": []string{"text/plain; charset=utf-8"},
- }
- if !reflect.DeepEqual(res.Header, wantHeader) {
- t.Errorf("res Header = %v; want %v", res.Header, wantHeader)
- }
- if res.Request != req {
- t.Errorf("Response.Request = %p; want %p", res.Request, req)
- }
- if res.TLS == nil {
- t.Errorf("Response.TLS = nil; want non-nil", res.TLS)
- }
- slurp, err := ioutil.ReadAll(res.Body)
- if err != nil {
- t.Error("Body read: %v", err)
- } else if string(slurp) != body {
- t.Errorf("Body = %q; want %q", slurp, body)
- }
-
-}
-
-func TestTransportReusesConns(t *testing.T) {
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
- io.WriteString(w, r.RemoteAddr)
- }, optOnlyServer)
- defer st.Close()
- tr := &Transport{InsecureTLSDial: true}
- defer tr.CloseIdleConnections()
- get := func() string {
- req, err := http.NewRequest("GET", st.ts.URL, nil)
- if err != nil {
- t.Fatal(err)
- }
- res, err := tr.RoundTrip(req)
- if err != nil {
- t.Fatal(err)
- }
- defer res.Body.Close()
- slurp, err := ioutil.ReadAll(res.Body)
- if err != nil {
- t.Fatalf("Body read: %v", err)
- }
- addr := strings.TrimSpace(string(slurp))
- if addr == "" {
- t.Fatalf("didn't get an addr in response")
- }
- return addr
- }
- first := get()
- second := get()
- if first != second {
- t.Errorf("first and second responses were on different connections: %q vs %q", first, second)
- }
-}
-
-func TestTransportAbortClosesPipes(t *testing.T) {
- shutdown := make(chan struct{})
- st := newServerTester(t,
- func(w http.ResponseWriter, r *http.Request) {
- w.(http.Flusher).Flush()
- <-shutdown
- },
- optOnlyServer,
- )
- defer st.Close()
- defer close(shutdown) // we must shutdown before st.Close() to avoid hanging
-
- done := make(chan struct{})
- requestMade := make(chan struct{})
- go func() {
- defer close(done)
- tr := &Transport{
- InsecureTLSDial: true,
- }
- req, err := http.NewRequest("GET", st.ts.URL, nil)
- if err != nil {
- t.Fatal(err)
- }
- res, err := tr.RoundTrip(req)
- if err != nil {
- t.Fatal(err)
- }
- defer res.Body.Close()
- close(requestMade)
- _, err = ioutil.ReadAll(res.Body)
- if err == nil {
- t.Error("expected error from res.Body.Read")
- }
- }()
-
- <-requestMade
- // Now force the serve loop to end, via closing the connection.
- st.closeConn()
- // deadlock? that's a bug.
- select {
- case <-done:
- case <-time.After(3 * time.Second):
- t.Fatal("timeout")
- }
-}
diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/z_spec_test.go b/Godeps/_workspace/src/github.com/bradfitz/http2/z_spec_test.go
deleted file mode 100644
index 07b53e3ae9..0000000000
--- a/Godeps/_workspace/src/github.com/bradfitz/http2/z_spec_test.go
+++ /dev/null
@@ -1,357 +0,0 @@
-// Copyright 2014 The Go Authors.
-// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
-// Licensed under the same terms as Go itself:
-// https://code.google.com/p/go/source/browse/LICENSE
-
-package http2
-
-import (
- "bytes"
- "encoding/xml"
- "flag"
- "fmt"
- "io"
- "os"
- "reflect"
- "regexp"
- "sort"
- "strconv"
- "strings"
- "sync"
- "testing"
-)
-
-var coverSpec = flag.Bool("coverspec", false, "Run spec coverage tests")
-
-// The global map of sentence coverage for the http2 spec.
-var defaultSpecCoverage specCoverage
-
-var loadSpecOnce sync.Once
-
-func loadSpec() {
- if f, err := os.Open("testdata/draft-ietf-httpbis-http2.xml"); err != nil {
- panic(err)
- } else {
- defaultSpecCoverage = readSpecCov(f)
- f.Close()
- }
-}
-
-// covers marks all sentences for section sec in defaultSpecCoverage. Sentences not
-// "covered" will be included in report outputed by TestSpecCoverage.
-func covers(sec, sentences string) {
- loadSpecOnce.Do(loadSpec)
- defaultSpecCoverage.cover(sec, sentences)
-}
-
-type specPart struct {
- section string
- sentence string
-}
-
-func (ss specPart) Less(oo specPart) bool {
- atoi := func(s string) int {
- n, err := strconv.Atoi(s)
- if err != nil {
- panic(err)
- }
- return n
- }
- a := strings.Split(ss.section, ".")
- b := strings.Split(oo.section, ".")
- for len(a) > 0 {
- if len(b) == 0 {
- return false
- }
- x, y := atoi(a[0]), atoi(b[0])
- if x == y {
- a, b = a[1:], b[1:]
- continue
- }
- return x < y
- }
- if len(b) > 0 {
- return true
- }
- return false
-}
-
-type bySpecSection []specPart
-
-func (a bySpecSection) Len() int { return len(a) }
-func (a bySpecSection) Less(i, j int) bool { return a[i].Less(a[j]) }
-func (a bySpecSection) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-
-type specCoverage struct {
- coverage map[specPart]bool
- d *xml.Decoder
-}
-
-func joinSection(sec []int) string {
- s := fmt.Sprintf("%d", sec[0])
- for _, n := range sec[1:] {
- s = fmt.Sprintf("%s.%d", s, n)
- }
- return s
-}
-
-func (sc specCoverage) readSection(sec []int) {
- var (
- buf = new(bytes.Buffer)
- sub = 0
- )
- for {
- tk, err := sc.d.Token()
- if err != nil {
- if err == io.EOF {
- return
- }
- panic(err)
- }
- switch v := tk.(type) {
- case xml.StartElement:
- if skipElement(v) {
- if err := sc.d.Skip(); err != nil {
- panic(err)
- }
- if v.Name.Local == "section" {
- sub++
- }
- break
- }
- switch v.Name.Local {
- case "section":
- sub++
- sc.readSection(append(sec, sub))
- case "xref":
- buf.Write(sc.readXRef(v))
- }
- case xml.CharData:
- if len(sec) == 0 {
- break
- }
- buf.Write(v)
- case xml.EndElement:
- if v.Name.Local == "section" {
- sc.addSentences(joinSection(sec), buf.String())
- return
- }
- }
- }
-}
-
-func (sc specCoverage) readXRef(se xml.StartElement) []byte {
- var b []byte
- for {
- tk, err := sc.d.Token()
- if err != nil {
- panic(err)
- }
- switch v := tk.(type) {
- case xml.CharData:
- if b != nil {
- panic("unexpected CharData")
- }
- b = []byte(string(v))
- case xml.EndElement:
- if v.Name.Local != "xref" {
- panic("expected ")
- }
- if b != nil {
- return b
- }
- sig := attrSig(se)
- switch sig {
- case "target":
- return []byte(fmt.Sprintf("[%s]", attrValue(se, "target")))
- case "fmt-of,rel,target", "fmt-,,rel,target":
- return []byte(fmt.Sprintf("[%s, %s]", attrValue(se, "target"), attrValue(se, "rel")))
- case "fmt-of,sec,target", "fmt-,,sec,target":
- return []byte(fmt.Sprintf("[section %s of %s]", attrValue(se, "sec"), attrValue(se, "target")))
- case "fmt-of,rel,sec,target":
- return []byte(fmt.Sprintf("[section %s of %s, %s]", attrValue(se, "sec"), attrValue(se, "target"), attrValue(se, "rel")))
- default:
- panic(fmt.Sprintf("unknown attribute signature %q in %#v", sig, fmt.Sprintf("%#v", se)))
- }
- default:
- panic(fmt.Sprintf("unexpected tag %q", v))
- }
- }
-}
-
-var skipAnchor = map[string]bool{
- "intro": true,
- "Overview": true,
-}
-
-var skipTitle = map[string]bool{
- "Acknowledgements": true,
- "Change Log": true,
- "Document Organization": true,
- "Conventions and Terminology": true,
-}
-
-func skipElement(s xml.StartElement) bool {
- switch s.Name.Local {
- case "artwork":
- return true
- case "section":
- for _, attr := range s.Attr {
- switch attr.Name.Local {
- case "anchor":
- if skipAnchor[attr.Value] || strings.HasPrefix(attr.Value, "changes.since.") {
- return true
- }
- case "title":
- if skipTitle[attr.Value] {
- return true
- }
- }
- }
- }
- return false
-}
-
-func readSpecCov(r io.Reader) specCoverage {
- sc := specCoverage{
- coverage: map[specPart]bool{},
- d: xml.NewDecoder(r)}
- sc.readSection(nil)
- return sc
-}
-
-func (sc specCoverage) addSentences(sec string, sentence string) {
- for _, s := range parseSentences(sentence) {
- sc.coverage[specPart{sec, s}] = false
- }
-}
-
-func (sc specCoverage) cover(sec string, sentence string) {
- for _, s := range parseSentences(sentence) {
- p := specPart{sec, s}
- if _, ok := sc.coverage[p]; !ok {
- panic(fmt.Sprintf("Not found in spec: %q, %q", sec, s))
- }
- sc.coverage[specPart{sec, s}] = true
- }
-
-}
-
-var whitespaceRx = regexp.MustCompile(`\s+`)
-
-func parseSentences(sens string) []string {
- sens = strings.TrimSpace(sens)
- if sens == "" {
- return nil
- }
- ss := strings.Split(whitespaceRx.ReplaceAllString(sens, " "), ". ")
- for i, s := range ss {
- s = strings.TrimSpace(s)
- if !strings.HasSuffix(s, ".") {
- s += "."
- }
- ss[i] = s
- }
- return ss
-}
-
-func TestSpecParseSentences(t *testing.T) {
- tests := []struct {
- ss string
- want []string
- }{
- {"Sentence 1. Sentence 2.",
- []string{
- "Sentence 1.",
- "Sentence 2.",
- }},
- {"Sentence 1. \nSentence 2.\tSentence 3.",
- []string{
- "Sentence 1.",
- "Sentence 2.",
- "Sentence 3.",
- }},
- }
-
- for i, tt := range tests {
- got := parseSentences(tt.ss)
- if !reflect.DeepEqual(got, tt.want) {
- t.Errorf("%d: got = %q, want %q", i, got, tt.want)
- }
- }
-}
-
-func TestSpecCoverage(t *testing.T) {
- if !*coverSpec {
- t.Skip()
- }
-
- loadSpecOnce.Do(loadSpec)
-
- var (
- list []specPart
- cv = defaultSpecCoverage.coverage
- total = len(cv)
- complete = 0
- )
-
- for sp, touched := range defaultSpecCoverage.coverage {
- if touched {
- complete++
- } else {
- list = append(list, sp)
- }
- }
- sort.Stable(bySpecSection(list))
-
- if testing.Short() && len(list) > 5 {
- list = list[:5]
- }
-
- for _, p := range list {
- t.Errorf("\tSECTION %s: %s", p.section, p.sentence)
- }
-
- t.Logf("%d/%d (%d%%) sentances covered", complete, total, (complete/total)*100)
-}
-
-func attrSig(se xml.StartElement) string {
- var names []string
- for _, attr := range se.Attr {
- if attr.Name.Local == "fmt" {
- names = append(names, "fmt-"+attr.Value)
- } else {
- names = append(names, attr.Name.Local)
- }
- }
- sort.Strings(names)
- return strings.Join(names, ",")
-}
-
-func attrValue(se xml.StartElement, attr string) string {
- for _, a := range se.Attr {
- if a.Name.Local == attr {
- return a.Value
- }
- }
- panic("unknown attribute " + attr)
-}
-
-func TestSpecPartLess(t *testing.T) {
- tests := []struct {
- sec1, sec2 string
- want bool
- }{
- {"6.2.1", "6.2", false},
- {"6.2", "6.2.1", true},
- {"6.10", "6.10.1", true},
- {"6.10", "6.1.1", false}, // 10, not 1
- {"6.1", "6.1", false}, // equal, so not less
- }
- for _, tt := range tests {
- got := (specPart{tt.sec1, "foo"}).Less(specPart{tt.sec2, "foo"})
- if got != tt.want {
- t.Errorf("Less(%q, %q) = %v; want %v", tt.sec1, tt.sec2, got, tt.want)
- }
- }
-}
diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/all_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/all_test.go
deleted file mode 100644
index 8784187cde..0000000000
--- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/all_test.go
+++ /dev/null
@@ -1,2071 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto_test
-
-import (
- "bytes"
- "encoding/json"
- "errors"
- "fmt"
- "math"
- "math/rand"
- "reflect"
- "runtime/debug"
- "strings"
- "testing"
- "time"
-
- . "github.com/golang/protobuf/proto"
- . "github.com/golang/protobuf/proto/testdata"
-)
-
-var globalO *Buffer
-
-func old() *Buffer {
- if globalO == nil {
- globalO = NewBuffer(nil)
- }
- globalO.Reset()
- return globalO
-}
-
-func equalbytes(b1, b2 []byte, t *testing.T) {
- if len(b1) != len(b2) {
- t.Errorf("wrong lengths: 2*%d != %d", len(b1), len(b2))
- return
- }
- for i := 0; i < len(b1); i++ {
- if b1[i] != b2[i] {
- t.Errorf("bad byte[%d]:%x %x: %s %s", i, b1[i], b2[i], b1, b2)
- }
- }
-}
-
-func initGoTestField() *GoTestField {
- f := new(GoTestField)
- f.Label = String("label")
- f.Type = String("type")
- return f
-}
-
-// These are all structurally equivalent but the tag numbers differ.
-// (It's remarkable that required, optional, and repeated all have
-// 8 letters.)
-func initGoTest_RequiredGroup() *GoTest_RequiredGroup {
- return &GoTest_RequiredGroup{
- RequiredField: String("required"),
- }
-}
-
-func initGoTest_OptionalGroup() *GoTest_OptionalGroup {
- return &GoTest_OptionalGroup{
- RequiredField: String("optional"),
- }
-}
-
-func initGoTest_RepeatedGroup() *GoTest_RepeatedGroup {
- return &GoTest_RepeatedGroup{
- RequiredField: String("repeated"),
- }
-}
-
-func initGoTest(setdefaults bool) *GoTest {
- pb := new(GoTest)
- if setdefaults {
- pb.F_BoolDefaulted = Bool(Default_GoTest_F_BoolDefaulted)
- pb.F_Int32Defaulted = Int32(Default_GoTest_F_Int32Defaulted)
- pb.F_Int64Defaulted = Int64(Default_GoTest_F_Int64Defaulted)
- pb.F_Fixed32Defaulted = Uint32(Default_GoTest_F_Fixed32Defaulted)
- pb.F_Fixed64Defaulted = Uint64(Default_GoTest_F_Fixed64Defaulted)
- pb.F_Uint32Defaulted = Uint32(Default_GoTest_F_Uint32Defaulted)
- pb.F_Uint64Defaulted = Uint64(Default_GoTest_F_Uint64Defaulted)
- pb.F_FloatDefaulted = Float32(Default_GoTest_F_FloatDefaulted)
- pb.F_DoubleDefaulted = Float64(Default_GoTest_F_DoubleDefaulted)
- pb.F_StringDefaulted = String(Default_GoTest_F_StringDefaulted)
- pb.F_BytesDefaulted = Default_GoTest_F_BytesDefaulted
- pb.F_Sint32Defaulted = Int32(Default_GoTest_F_Sint32Defaulted)
- pb.F_Sint64Defaulted = Int64(Default_GoTest_F_Sint64Defaulted)
- }
-
- pb.Kind = GoTest_TIME.Enum()
- pb.RequiredField = initGoTestField()
- pb.F_BoolRequired = Bool(true)
- pb.F_Int32Required = Int32(3)
- pb.F_Int64Required = Int64(6)
- pb.F_Fixed32Required = Uint32(32)
- pb.F_Fixed64Required = Uint64(64)
- pb.F_Uint32Required = Uint32(3232)
- pb.F_Uint64Required = Uint64(6464)
- pb.F_FloatRequired = Float32(3232)
- pb.F_DoubleRequired = Float64(6464)
- pb.F_StringRequired = String("string")
- pb.F_BytesRequired = []byte("bytes")
- pb.F_Sint32Required = Int32(-32)
- pb.F_Sint64Required = Int64(-64)
- pb.Requiredgroup = initGoTest_RequiredGroup()
-
- return pb
-}
-
-func fail(msg string, b *bytes.Buffer, s string, t *testing.T) {
- data := b.Bytes()
- ld := len(data)
- ls := len(s) / 2
-
- fmt.Printf("fail %s ld=%d ls=%d\n", msg, ld, ls)
-
- // find the interesting spot - n
- n := ls
- if ld < ls {
- n = ld
- }
- j := 0
- for i := 0; i < n; i++ {
- bs := hex(s[j])*16 + hex(s[j+1])
- j += 2
- if data[i] == bs {
- continue
- }
- n = i
- break
- }
- l := n - 10
- if l < 0 {
- l = 0
- }
- h := n + 10
-
- // find the interesting spot - n
- fmt.Printf("is[%d]:", l)
- for i := l; i < h; i++ {
- if i >= ld {
- fmt.Printf(" --")
- continue
- }
- fmt.Printf(" %.2x", data[i])
- }
- fmt.Printf("\n")
-
- fmt.Printf("sb[%d]:", l)
- for i := l; i < h; i++ {
- if i >= ls {
- fmt.Printf(" --")
- continue
- }
- bs := hex(s[j])*16 + hex(s[j+1])
- j += 2
- fmt.Printf(" %.2x", bs)
- }
- fmt.Printf("\n")
-
- t.Fail()
-
- // t.Errorf("%s: \ngood: %s\nbad: %x", msg, s, b.Bytes())
- // Print the output in a partially-decoded format; can
- // be helpful when updating the test. It produces the output
- // that is pasted, with minor edits, into the argument to verify().
- // data := b.Bytes()
- // nesting := 0
- // for b.Len() > 0 {
- // start := len(data) - b.Len()
- // var u uint64
- // u, err := DecodeVarint(b)
- // if err != nil {
- // fmt.Printf("decode error on varint:", err)
- // return
- // }
- // wire := u & 0x7
- // tag := u >> 3
- // switch wire {
- // case WireVarint:
- // v, err := DecodeVarint(b)
- // if err != nil {
- // fmt.Printf("decode error on varint:", err)
- // return
- // }
- // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n",
- // data[start:len(data)-b.Len()], tag, wire, v)
- // case WireFixed32:
- // v, err := DecodeFixed32(b)
- // if err != nil {
- // fmt.Printf("decode error on fixed32:", err)
- // return
- // }
- // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n",
- // data[start:len(data)-b.Len()], tag, wire, v)
- // case WireFixed64:
- // v, err := DecodeFixed64(b)
- // if err != nil {
- // fmt.Printf("decode error on fixed64:", err)
- // return
- // }
- // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n",
- // data[start:len(data)-b.Len()], tag, wire, v)
- // case WireBytes:
- // nb, err := DecodeVarint(b)
- // if err != nil {
- // fmt.Printf("decode error on bytes:", err)
- // return
- // }
- // after_tag := len(data) - b.Len()
- // str := make([]byte, nb)
- // _, err = b.Read(str)
- // if err != nil {
- // fmt.Printf("decode error on bytes:", err)
- // return
- // }
- // fmt.Printf("\t\t\"%x\" \"%x\" // field %d, encoding %d (FIELD)\n",
- // data[start:after_tag], str, tag, wire)
- // case WireStartGroup:
- // nesting++
- // fmt.Printf("\t\t\"%x\"\t\t// start group field %d level %d\n",
- // data[start:len(data)-b.Len()], tag, nesting)
- // case WireEndGroup:
- // fmt.Printf("\t\t\"%x\"\t\t// end group field %d level %d\n",
- // data[start:len(data)-b.Len()], tag, nesting)
- // nesting--
- // default:
- // fmt.Printf("unrecognized wire type %d\n", wire)
- // return
- // }
- // }
-}
-
-func hex(c uint8) uint8 {
- if '0' <= c && c <= '9' {
- return c - '0'
- }
- if 'a' <= c && c <= 'f' {
- return 10 + c - 'a'
- }
- if 'A' <= c && c <= 'F' {
- return 10 + c - 'A'
- }
- return 0
-}
-
-func equal(b []byte, s string, t *testing.T) bool {
- if 2*len(b) != len(s) {
- // fail(fmt.Sprintf("wrong lengths: 2*%d != %d", len(b), len(s)), b, s, t)
- fmt.Printf("wrong lengths: 2*%d != %d\n", len(b), len(s))
- return false
- }
- for i, j := 0, 0; i < len(b); i, j = i+1, j+2 {
- x := hex(s[j])*16 + hex(s[j+1])
- if b[i] != x {
- // fail(fmt.Sprintf("bad byte[%d]:%x %x", i, b[i], x), b, s, t)
- fmt.Printf("bad byte[%d]:%x %x", i, b[i], x)
- return false
- }
- }
- return true
-}
-
-func overify(t *testing.T, pb *GoTest, expected string) {
- o := old()
- err := o.Marshal(pb)
- if err != nil {
- fmt.Printf("overify marshal-1 err = %v", err)
- o.DebugPrint("", o.Bytes())
- t.Fatalf("expected = %s", expected)
- }
- if !equal(o.Bytes(), expected, t) {
- o.DebugPrint("overify neq 1", o.Bytes())
- t.Fatalf("expected = %s", expected)
- }
-
- // Now test Unmarshal by recreating the original buffer.
- pbd := new(GoTest)
- err = o.Unmarshal(pbd)
- if err != nil {
- t.Fatalf("overify unmarshal err = %v", err)
- o.DebugPrint("", o.Bytes())
- t.Fatalf("string = %s", expected)
- }
- o.Reset()
- err = o.Marshal(pbd)
- if err != nil {
- t.Errorf("overify marshal-2 err = %v", err)
- o.DebugPrint("", o.Bytes())
- t.Fatalf("string = %s", expected)
- }
- if !equal(o.Bytes(), expected, t) {
- o.DebugPrint("overify neq 2", o.Bytes())
- t.Fatalf("string = %s", expected)
- }
-}
-
-// Simple tests for numeric encode/decode primitives (varint, etc.)
-func TestNumericPrimitives(t *testing.T) {
- for i := uint64(0); i < 1e6; i += 111 {
- o := old()
- if o.EncodeVarint(i) != nil {
- t.Error("EncodeVarint")
- break
- }
- x, e := o.DecodeVarint()
- if e != nil {
- t.Fatal("DecodeVarint")
- }
- if x != i {
- t.Fatal("varint decode fail:", i, x)
- }
-
- o = old()
- if o.EncodeFixed32(i) != nil {
- t.Fatal("encFixed32")
- }
- x, e = o.DecodeFixed32()
- if e != nil {
- t.Fatal("decFixed32")
- }
- if x != i {
- t.Fatal("fixed32 decode fail:", i, x)
- }
-
- o = old()
- if o.EncodeFixed64(i*1234567) != nil {
- t.Error("encFixed64")
- break
- }
- x, e = o.DecodeFixed64()
- if e != nil {
- t.Error("decFixed64")
- break
- }
- if x != i*1234567 {
- t.Error("fixed64 decode fail:", i*1234567, x)
- break
- }
-
- o = old()
- i32 := int32(i - 12345)
- if o.EncodeZigzag32(uint64(i32)) != nil {
- t.Fatal("EncodeZigzag32")
- }
- x, e = o.DecodeZigzag32()
- if e != nil {
- t.Fatal("DecodeZigzag32")
- }
- if x != uint64(uint32(i32)) {
- t.Fatal("zigzag32 decode fail:", i32, x)
- }
-
- o = old()
- i64 := int64(i - 12345)
- if o.EncodeZigzag64(uint64(i64)) != nil {
- t.Fatal("EncodeZigzag64")
- }
- x, e = o.DecodeZigzag64()
- if e != nil {
- t.Fatal("DecodeZigzag64")
- }
- if x != uint64(i64) {
- t.Fatal("zigzag64 decode fail:", i64, x)
- }
- }
-}
-
-// fakeMarshaler is a simple struct implementing Marshaler and Message interfaces.
-type fakeMarshaler struct {
- b []byte
- err error
-}
-
-func (f fakeMarshaler) Marshal() ([]byte, error) {
- return f.b, f.err
-}
-
-func (f fakeMarshaler) String() string {
- return fmt.Sprintf("Bytes: %v Error: %v", f.b, f.err)
-}
-
-func (f fakeMarshaler) ProtoMessage() {}
-
-func (f fakeMarshaler) Reset() {}
-
-// Simple tests for proto messages that implement the Marshaler interface.
-func TestMarshalerEncoding(t *testing.T) {
- tests := []struct {
- name string
- m Message
- want []byte
- wantErr error
- }{
- {
- name: "Marshaler that fails",
- m: fakeMarshaler{
- err: errors.New("some marshal err"),
- b: []byte{5, 6, 7},
- },
- // Since there's an error, nothing should be written to buffer.
- want: nil,
- wantErr: errors.New("some marshal err"),
- },
- {
- name: "Marshaler that succeeds",
- m: fakeMarshaler{
- b: []byte{0, 1, 2, 3, 4, 127, 255},
- },
- want: []byte{0, 1, 2, 3, 4, 127, 255},
- wantErr: nil,
- },
- }
- for _, test := range tests {
- b := NewBuffer(nil)
- err := b.Marshal(test.m)
- if !reflect.DeepEqual(test.wantErr, err) {
- t.Errorf("%s: got err %v wanted %v", test.name, err, test.wantErr)
- }
- if !reflect.DeepEqual(test.want, b.Bytes()) {
- t.Errorf("%s: got bytes %v wanted %v", test.name, b.Bytes(), test.want)
- }
- }
-}
-
-// Simple tests for bytes
-func TestBytesPrimitives(t *testing.T) {
- o := old()
- bytes := []byte{'n', 'o', 'w', ' ', 'i', 's', ' ', 't', 'h', 'e', ' ', 't', 'i', 'm', 'e'}
- if o.EncodeRawBytes(bytes) != nil {
- t.Error("EncodeRawBytes")
- }
- decb, e := o.DecodeRawBytes(false)
- if e != nil {
- t.Error("DecodeRawBytes")
- }
- equalbytes(bytes, decb, t)
-}
-
-// Simple tests for strings
-func TestStringPrimitives(t *testing.T) {
- o := old()
- s := "now is the time"
- if o.EncodeStringBytes(s) != nil {
- t.Error("enc_string")
- }
- decs, e := o.DecodeStringBytes()
- if e != nil {
- t.Error("dec_string")
- }
- if s != decs {
- t.Error("string encode/decode fail:", s, decs)
- }
-}
-
-// Do we catch the "required bit not set" case?
-func TestRequiredBit(t *testing.T) {
- o := old()
- pb := new(GoTest)
- err := o.Marshal(pb)
- if err == nil {
- t.Error("did not catch missing required fields")
- } else if strings.Index(err.Error(), "Kind") < 0 {
- t.Error("wrong error type:", err)
- }
-}
-
-// Check that all fields are nil.
-// Clearly silly, and a residue from a more interesting test with an earlier,
-// different initialization property, but it once caught a compiler bug so
-// it lives.
-func checkInitialized(pb *GoTest, t *testing.T) {
- if pb.F_BoolDefaulted != nil {
- t.Error("New or Reset did not set boolean:", *pb.F_BoolDefaulted)
- }
- if pb.F_Int32Defaulted != nil {
- t.Error("New or Reset did not set int32:", *pb.F_Int32Defaulted)
- }
- if pb.F_Int64Defaulted != nil {
- t.Error("New or Reset did not set int64:", *pb.F_Int64Defaulted)
- }
- if pb.F_Fixed32Defaulted != nil {
- t.Error("New or Reset did not set fixed32:", *pb.F_Fixed32Defaulted)
- }
- if pb.F_Fixed64Defaulted != nil {
- t.Error("New or Reset did not set fixed64:", *pb.F_Fixed64Defaulted)
- }
- if pb.F_Uint32Defaulted != nil {
- t.Error("New or Reset did not set uint32:", *pb.F_Uint32Defaulted)
- }
- if pb.F_Uint64Defaulted != nil {
- t.Error("New or Reset did not set uint64:", *pb.F_Uint64Defaulted)
- }
- if pb.F_FloatDefaulted != nil {
- t.Error("New or Reset did not set float:", *pb.F_FloatDefaulted)
- }
- if pb.F_DoubleDefaulted != nil {
- t.Error("New or Reset did not set double:", *pb.F_DoubleDefaulted)
- }
- if pb.F_StringDefaulted != nil {
- t.Error("New or Reset did not set string:", *pb.F_StringDefaulted)
- }
- if pb.F_BytesDefaulted != nil {
- t.Error("New or Reset did not set bytes:", string(pb.F_BytesDefaulted))
- }
- if pb.F_Sint32Defaulted != nil {
- t.Error("New or Reset did not set int32:", *pb.F_Sint32Defaulted)
- }
- if pb.F_Sint64Defaulted != nil {
- t.Error("New or Reset did not set int64:", *pb.F_Sint64Defaulted)
- }
-}
-
-// Does Reset() reset?
-func TestReset(t *testing.T) {
- pb := initGoTest(true)
- // muck with some values
- pb.F_BoolDefaulted = Bool(false)
- pb.F_Int32Defaulted = Int32(237)
- pb.F_Int64Defaulted = Int64(12346)
- pb.F_Fixed32Defaulted = Uint32(32000)
- pb.F_Fixed64Defaulted = Uint64(666)
- pb.F_Uint32Defaulted = Uint32(323232)
- pb.F_Uint64Defaulted = nil
- pb.F_FloatDefaulted = nil
- pb.F_DoubleDefaulted = Float64(0)
- pb.F_StringDefaulted = String("gotcha")
- pb.F_BytesDefaulted = []byte("asdfasdf")
- pb.F_Sint32Defaulted = Int32(123)
- pb.F_Sint64Defaulted = Int64(789)
- pb.Reset()
- checkInitialized(pb, t)
-}
-
-// All required fields set, no defaults provided.
-func TestEncodeDecode1(t *testing.T) {
- pb := initGoTest(false)
- overify(t, pb,
- "0807"+ // field 1, encoding 0, value 7
- "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField)
- "5001"+ // field 10, encoding 0, value 1
- "5803"+ // field 11, encoding 0, value 3
- "6006"+ // field 12, encoding 0, value 6
- "6d20000000"+ // field 13, encoding 5, value 0x20
- "714000000000000000"+ // field 14, encoding 1, value 0x40
- "78a019"+ // field 15, encoding 0, value 0xca0 = 3232
- "8001c032"+ // field 16, encoding 0, value 0x1940 = 6464
- "8d0100004a45"+ // field 17, encoding 5, value 3232.0
- "9101000000000040b940"+ // field 18, encoding 1, value 6464.0
- "9a0106"+"737472696e67"+ // field 19, encoding 2, string "string"
- "b304"+ // field 70, encoding 3, start group
- "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required"
- "b404"+ // field 70, encoding 4, end group
- "aa0605"+"6279746573"+ // field 101, encoding 2, string "bytes"
- "b0063f"+ // field 102, encoding 0, 0x3f zigzag32
- "b8067f") // field 103, encoding 0, 0x7f zigzag64
-}
-
-// All required fields set, defaults provided.
-func TestEncodeDecode2(t *testing.T) {
- pb := initGoTest(true)
- overify(t, pb,
- "0807"+ // field 1, encoding 0, value 7
- "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField)
- "5001"+ // field 10, encoding 0, value 1
- "5803"+ // field 11, encoding 0, value 3
- "6006"+ // field 12, encoding 0, value 6
- "6d20000000"+ // field 13, encoding 5, value 32
- "714000000000000000"+ // field 14, encoding 1, value 64
- "78a019"+ // field 15, encoding 0, value 3232
- "8001c032"+ // field 16, encoding 0, value 6464
- "8d0100004a45"+ // field 17, encoding 5, value 3232.0
- "9101000000000040b940"+ // field 18, encoding 1, value 6464.0
- "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string"
- "c00201"+ // field 40, encoding 0, value 1
- "c80220"+ // field 41, encoding 0, value 32
- "d00240"+ // field 42, encoding 0, value 64
- "dd0240010000"+ // field 43, encoding 5, value 320
- "e1028002000000000000"+ // field 44, encoding 1, value 640
- "e8028019"+ // field 45, encoding 0, value 3200
- "f0028032"+ // field 46, encoding 0, value 6400
- "fd02e0659948"+ // field 47, encoding 5, value 314159.0
- "81030000000050971041"+ // field 48, encoding 1, value 271828.0
- "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n"
- "b304"+ // start group field 70 level 1
- "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required"
- "b404"+ // end group field 70 level 1
- "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes"
- "b0063f"+ // field 102, encoding 0, 0x3f zigzag32
- "b8067f"+ // field 103, encoding 0, 0x7f zigzag64
- "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose"
- "90193f"+ // field 402, encoding 0, value 63
- "98197f") // field 403, encoding 0, value 127
-
-}
-
-// All default fields set to their default value by hand
-func TestEncodeDecode3(t *testing.T) {
- pb := initGoTest(false)
- pb.F_BoolDefaulted = Bool(true)
- pb.F_Int32Defaulted = Int32(32)
- pb.F_Int64Defaulted = Int64(64)
- pb.F_Fixed32Defaulted = Uint32(320)
- pb.F_Fixed64Defaulted = Uint64(640)
- pb.F_Uint32Defaulted = Uint32(3200)
- pb.F_Uint64Defaulted = Uint64(6400)
- pb.F_FloatDefaulted = Float32(314159)
- pb.F_DoubleDefaulted = Float64(271828)
- pb.F_StringDefaulted = String("hello, \"world!\"\n")
- pb.F_BytesDefaulted = []byte("Bignose")
- pb.F_Sint32Defaulted = Int32(-32)
- pb.F_Sint64Defaulted = Int64(-64)
-
- overify(t, pb,
- "0807"+ // field 1, encoding 0, value 7
- "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField)
- "5001"+ // field 10, encoding 0, value 1
- "5803"+ // field 11, encoding 0, value 3
- "6006"+ // field 12, encoding 0, value 6
- "6d20000000"+ // field 13, encoding 5, value 32
- "714000000000000000"+ // field 14, encoding 1, value 64
- "78a019"+ // field 15, encoding 0, value 3232
- "8001c032"+ // field 16, encoding 0, value 6464
- "8d0100004a45"+ // field 17, encoding 5, value 3232.0
- "9101000000000040b940"+ // field 18, encoding 1, value 6464.0
- "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string"
- "c00201"+ // field 40, encoding 0, value 1
- "c80220"+ // field 41, encoding 0, value 32
- "d00240"+ // field 42, encoding 0, value 64
- "dd0240010000"+ // field 43, encoding 5, value 320
- "e1028002000000000000"+ // field 44, encoding 1, value 640
- "e8028019"+ // field 45, encoding 0, value 3200
- "f0028032"+ // field 46, encoding 0, value 6400
- "fd02e0659948"+ // field 47, encoding 5, value 314159.0
- "81030000000050971041"+ // field 48, encoding 1, value 271828.0
- "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n"
- "b304"+ // start group field 70 level 1
- "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required"
- "b404"+ // end group field 70 level 1
- "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes"
- "b0063f"+ // field 102, encoding 0, 0x3f zigzag32
- "b8067f"+ // field 103, encoding 0, 0x7f zigzag64
- "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose"
- "90193f"+ // field 402, encoding 0, value 63
- "98197f") // field 403, encoding 0, value 127
-
-}
-
-// All required fields set, defaults provided, all non-defaulted optional fields have values.
-func TestEncodeDecode4(t *testing.T) {
- pb := initGoTest(true)
- pb.Table = String("hello")
- pb.Param = Int32(7)
- pb.OptionalField = initGoTestField()
- pb.F_BoolOptional = Bool(true)
- pb.F_Int32Optional = Int32(32)
- pb.F_Int64Optional = Int64(64)
- pb.F_Fixed32Optional = Uint32(3232)
- pb.F_Fixed64Optional = Uint64(6464)
- pb.F_Uint32Optional = Uint32(323232)
- pb.F_Uint64Optional = Uint64(646464)
- pb.F_FloatOptional = Float32(32.)
- pb.F_DoubleOptional = Float64(64.)
- pb.F_StringOptional = String("hello")
- pb.F_BytesOptional = []byte("Bignose")
- pb.F_Sint32Optional = Int32(-32)
- pb.F_Sint64Optional = Int64(-64)
- pb.Optionalgroup = initGoTest_OptionalGroup()
-
- overify(t, pb,
- "0807"+ // field 1, encoding 0, value 7
- "1205"+"68656c6c6f"+ // field 2, encoding 2, string "hello"
- "1807"+ // field 3, encoding 0, value 7
- "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField)
- "320d"+"0a056c6162656c120474797065"+ // field 6, encoding 2 (GoTestField)
- "5001"+ // field 10, encoding 0, value 1
- "5803"+ // field 11, encoding 0, value 3
- "6006"+ // field 12, encoding 0, value 6
- "6d20000000"+ // field 13, encoding 5, value 32
- "714000000000000000"+ // field 14, encoding 1, value 64
- "78a019"+ // field 15, encoding 0, value 3232
- "8001c032"+ // field 16, encoding 0, value 6464
- "8d0100004a45"+ // field 17, encoding 5, value 3232.0
- "9101000000000040b940"+ // field 18, encoding 1, value 6464.0
- "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string"
- "f00101"+ // field 30, encoding 0, value 1
- "f80120"+ // field 31, encoding 0, value 32
- "800240"+ // field 32, encoding 0, value 64
- "8d02a00c0000"+ // field 33, encoding 5, value 3232
- "91024019000000000000"+ // field 34, encoding 1, value 6464
- "9802a0dd13"+ // field 35, encoding 0, value 323232
- "a002c0ba27"+ // field 36, encoding 0, value 646464
- "ad0200000042"+ // field 37, encoding 5, value 32.0
- "b1020000000000005040"+ // field 38, encoding 1, value 64.0
- "ba0205"+"68656c6c6f"+ // field 39, encoding 2, string "hello"
- "c00201"+ // field 40, encoding 0, value 1
- "c80220"+ // field 41, encoding 0, value 32
- "d00240"+ // field 42, encoding 0, value 64
- "dd0240010000"+ // field 43, encoding 5, value 320
- "e1028002000000000000"+ // field 44, encoding 1, value 640
- "e8028019"+ // field 45, encoding 0, value 3200
- "f0028032"+ // field 46, encoding 0, value 6400
- "fd02e0659948"+ // field 47, encoding 5, value 314159.0
- "81030000000050971041"+ // field 48, encoding 1, value 271828.0
- "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n"
- "b304"+ // start group field 70 level 1
- "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required"
- "b404"+ // end group field 70 level 1
- "d305"+ // start group field 90 level 1
- "da0508"+"6f7074696f6e616c"+ // field 91, encoding 2, string "optional"
- "d405"+ // end group field 90 level 1
- "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes"
- "b0063f"+ // field 102, encoding 0, 0x3f zigzag32
- "b8067f"+ // field 103, encoding 0, 0x7f zigzag64
- "ea1207"+"4269676e6f7365"+ // field 301, encoding 2, string "Bignose"
- "f0123f"+ // field 302, encoding 0, value 63
- "f8127f"+ // field 303, encoding 0, value 127
- "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose"
- "90193f"+ // field 402, encoding 0, value 63
- "98197f") // field 403, encoding 0, value 127
-
-}
-
-// All required fields set, defaults provided, all repeated fields given two values.
-func TestEncodeDecode5(t *testing.T) {
- pb := initGoTest(true)
- pb.RepeatedField = []*GoTestField{initGoTestField(), initGoTestField()}
- pb.F_BoolRepeated = []bool{false, true}
- pb.F_Int32Repeated = []int32{32, 33}
- pb.F_Int64Repeated = []int64{64, 65}
- pb.F_Fixed32Repeated = []uint32{3232, 3333}
- pb.F_Fixed64Repeated = []uint64{6464, 6565}
- pb.F_Uint32Repeated = []uint32{323232, 333333}
- pb.F_Uint64Repeated = []uint64{646464, 656565}
- pb.F_FloatRepeated = []float32{32., 33.}
- pb.F_DoubleRepeated = []float64{64., 65.}
- pb.F_StringRepeated = []string{"hello", "sailor"}
- pb.F_BytesRepeated = [][]byte{[]byte("big"), []byte("nose")}
- pb.F_Sint32Repeated = []int32{32, -32}
- pb.F_Sint64Repeated = []int64{64, -64}
- pb.Repeatedgroup = []*GoTest_RepeatedGroup{initGoTest_RepeatedGroup(), initGoTest_RepeatedGroup()}
-
- overify(t, pb,
- "0807"+ // field 1, encoding 0, value 7
- "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField)
- "2a0d"+"0a056c6162656c120474797065"+ // field 5, encoding 2 (GoTestField)
- "2a0d"+"0a056c6162656c120474797065"+ // field 5, encoding 2 (GoTestField)
- "5001"+ // field 10, encoding 0, value 1
- "5803"+ // field 11, encoding 0, value 3
- "6006"+ // field 12, encoding 0, value 6
- "6d20000000"+ // field 13, encoding 5, value 32
- "714000000000000000"+ // field 14, encoding 1, value 64
- "78a019"+ // field 15, encoding 0, value 3232
- "8001c032"+ // field 16, encoding 0, value 6464
- "8d0100004a45"+ // field 17, encoding 5, value 3232.0
- "9101000000000040b940"+ // field 18, encoding 1, value 6464.0
- "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string"
- "a00100"+ // field 20, encoding 0, value 0
- "a00101"+ // field 20, encoding 0, value 1
- "a80120"+ // field 21, encoding 0, value 32
- "a80121"+ // field 21, encoding 0, value 33
- "b00140"+ // field 22, encoding 0, value 64
- "b00141"+ // field 22, encoding 0, value 65
- "bd01a00c0000"+ // field 23, encoding 5, value 3232
- "bd01050d0000"+ // field 23, encoding 5, value 3333
- "c1014019000000000000"+ // field 24, encoding 1, value 6464
- "c101a519000000000000"+ // field 24, encoding 1, value 6565
- "c801a0dd13"+ // field 25, encoding 0, value 323232
- "c80195ac14"+ // field 25, encoding 0, value 333333
- "d001c0ba27"+ // field 26, encoding 0, value 646464
- "d001b58928"+ // field 26, encoding 0, value 656565
- "dd0100000042"+ // field 27, encoding 5, value 32.0
- "dd0100000442"+ // field 27, encoding 5, value 33.0
- "e1010000000000005040"+ // field 28, encoding 1, value 64.0
- "e1010000000000405040"+ // field 28, encoding 1, value 65.0
- "ea0105"+"68656c6c6f"+ // field 29, encoding 2, string "hello"
- "ea0106"+"7361696c6f72"+ // field 29, encoding 2, string "sailor"
- "c00201"+ // field 40, encoding 0, value 1
- "c80220"+ // field 41, encoding 0, value 32
- "d00240"+ // field 42, encoding 0, value 64
- "dd0240010000"+ // field 43, encoding 5, value 320
- "e1028002000000000000"+ // field 44, encoding 1, value 640
- "e8028019"+ // field 45, encoding 0, value 3200
- "f0028032"+ // field 46, encoding 0, value 6400
- "fd02e0659948"+ // field 47, encoding 5, value 314159.0
- "81030000000050971041"+ // field 48, encoding 1, value 271828.0
- "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n"
- "b304"+ // start group field 70 level 1
- "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required"
- "b404"+ // end group field 70 level 1
- "8305"+ // start group field 80 level 1
- "8a0508"+"7265706561746564"+ // field 81, encoding 2, string "repeated"
- "8405"+ // end group field 80 level 1
- "8305"+ // start group field 80 level 1
- "8a0508"+"7265706561746564"+ // field 81, encoding 2, string "repeated"
- "8405"+ // end group field 80 level 1
- "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes"
- "b0063f"+ // field 102, encoding 0, 0x3f zigzag32
- "b8067f"+ // field 103, encoding 0, 0x7f zigzag64
- "ca0c03"+"626967"+ // field 201, encoding 2, string "big"
- "ca0c04"+"6e6f7365"+ // field 201, encoding 2, string "nose"
- "d00c40"+ // field 202, encoding 0, value 32
- "d00c3f"+ // field 202, encoding 0, value -32
- "d80c8001"+ // field 203, encoding 0, value 64
- "d80c7f"+ // field 203, encoding 0, value -64
- "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose"
- "90193f"+ // field 402, encoding 0, value 63
- "98197f") // field 403, encoding 0, value 127
-
-}
-
-// All required fields set, all packed repeated fields given two values.
-func TestEncodeDecode6(t *testing.T) {
- pb := initGoTest(false)
- pb.F_BoolRepeatedPacked = []bool{false, true}
- pb.F_Int32RepeatedPacked = []int32{32, 33}
- pb.F_Int64RepeatedPacked = []int64{64, 65}
- pb.F_Fixed32RepeatedPacked = []uint32{3232, 3333}
- pb.F_Fixed64RepeatedPacked = []uint64{6464, 6565}
- pb.F_Uint32RepeatedPacked = []uint32{323232, 333333}
- pb.F_Uint64RepeatedPacked = []uint64{646464, 656565}
- pb.F_FloatRepeatedPacked = []float32{32., 33.}
- pb.F_DoubleRepeatedPacked = []float64{64., 65.}
- pb.F_Sint32RepeatedPacked = []int32{32, -32}
- pb.F_Sint64RepeatedPacked = []int64{64, -64}
-
- overify(t, pb,
- "0807"+ // field 1, encoding 0, value 7
- "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField)
- "5001"+ // field 10, encoding 0, value 1
- "5803"+ // field 11, encoding 0, value 3
- "6006"+ // field 12, encoding 0, value 6
- "6d20000000"+ // field 13, encoding 5, value 32
- "714000000000000000"+ // field 14, encoding 1, value 64
- "78a019"+ // field 15, encoding 0, value 3232
- "8001c032"+ // field 16, encoding 0, value 6464
- "8d0100004a45"+ // field 17, encoding 5, value 3232.0
- "9101000000000040b940"+ // field 18, encoding 1, value 6464.0
- "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string"
- "9203020001"+ // field 50, encoding 2, 2 bytes, value 0, value 1
- "9a03022021"+ // field 51, encoding 2, 2 bytes, value 32, value 33
- "a203024041"+ // field 52, encoding 2, 2 bytes, value 64, value 65
- "aa0308"+ // field 53, encoding 2, 8 bytes
- "a00c0000050d0000"+ // value 3232, value 3333
- "b20310"+ // field 54, encoding 2, 16 bytes
- "4019000000000000a519000000000000"+ // value 6464, value 6565
- "ba0306"+ // field 55, encoding 2, 6 bytes
- "a0dd1395ac14"+ // value 323232, value 333333
- "c20306"+ // field 56, encoding 2, 6 bytes
- "c0ba27b58928"+ // value 646464, value 656565
- "ca0308"+ // field 57, encoding 2, 8 bytes
- "0000004200000442"+ // value 32.0, value 33.0
- "d20310"+ // field 58, encoding 2, 16 bytes
- "00000000000050400000000000405040"+ // value 64.0, value 65.0
- "b304"+ // start group field 70 level 1
- "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required"
- "b404"+ // end group field 70 level 1
- "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes"
- "b0063f"+ // field 102, encoding 0, 0x3f zigzag32
- "b8067f"+ // field 103, encoding 0, 0x7f zigzag64
- "b21f02"+ // field 502, encoding 2, 2 bytes
- "403f"+ // value 32, value -32
- "ba1f03"+ // field 503, encoding 2, 3 bytes
- "80017f") // value 64, value -64
-}
-
-// Test that we can encode empty bytes fields.
-func TestEncodeDecodeBytes1(t *testing.T) {
- pb := initGoTest(false)
-
- // Create our bytes
- pb.F_BytesRequired = []byte{}
- pb.F_BytesRepeated = [][]byte{{}}
- pb.F_BytesOptional = []byte{}
-
- d, err := Marshal(pb)
- if err != nil {
- t.Error(err)
- }
-
- pbd := new(GoTest)
- if err := Unmarshal(d, pbd); err != nil {
- t.Error(err)
- }
-
- if pbd.F_BytesRequired == nil || len(pbd.F_BytesRequired) != 0 {
- t.Error("required empty bytes field is incorrect")
- }
- if pbd.F_BytesRepeated == nil || len(pbd.F_BytesRepeated) == 1 && pbd.F_BytesRepeated[0] == nil {
- t.Error("repeated empty bytes field is incorrect")
- }
- if pbd.F_BytesOptional == nil || len(pbd.F_BytesOptional) != 0 {
- t.Error("optional empty bytes field is incorrect")
- }
-}
-
-// Test that we encode nil-valued fields of a repeated bytes field correctly.
-// Since entries in a repeated field cannot be nil, nil must mean empty value.
-func TestEncodeDecodeBytes2(t *testing.T) {
- pb := initGoTest(false)
-
- // Create our bytes
- pb.F_BytesRepeated = [][]byte{nil}
-
- d, err := Marshal(pb)
- if err != nil {
- t.Error(err)
- }
-
- pbd := new(GoTest)
- if err := Unmarshal(d, pbd); err != nil {
- t.Error(err)
- }
-
- if len(pbd.F_BytesRepeated) != 1 || pbd.F_BytesRepeated[0] == nil {
- t.Error("Unexpected value for repeated bytes field")
- }
-}
-
-// All required fields set, defaults provided, all repeated fields given two values.
-func TestSkippingUnrecognizedFields(t *testing.T) {
- o := old()
- pb := initGoTestField()
-
- // Marshal it normally.
- o.Marshal(pb)
-
- // Now new a GoSkipTest record.
- skip := &GoSkipTest{
- SkipInt32: Int32(32),
- SkipFixed32: Uint32(3232),
- SkipFixed64: Uint64(6464),
- SkipString: String("skipper"),
- Skipgroup: &GoSkipTest_SkipGroup{
- GroupInt32: Int32(75),
- GroupString: String("wxyz"),
- },
- }
-
- // Marshal it into same buffer.
- o.Marshal(skip)
-
- pbd := new(GoTestField)
- o.Unmarshal(pbd)
-
- // The __unrecognized field should be a marshaling of GoSkipTest
- skipd := new(GoSkipTest)
-
- o.SetBuf(pbd.XXX_unrecognized)
- o.Unmarshal(skipd)
-
- if *skipd.SkipInt32 != *skip.SkipInt32 {
- t.Error("skip int32", skipd.SkipInt32)
- }
- if *skipd.SkipFixed32 != *skip.SkipFixed32 {
- t.Error("skip fixed32", skipd.SkipFixed32)
- }
- if *skipd.SkipFixed64 != *skip.SkipFixed64 {
- t.Error("skip fixed64", skipd.SkipFixed64)
- }
- if *skipd.SkipString != *skip.SkipString {
- t.Error("skip string", *skipd.SkipString)
- }
- if *skipd.Skipgroup.GroupInt32 != *skip.Skipgroup.GroupInt32 {
- t.Error("skip group int32", skipd.Skipgroup.GroupInt32)
- }
- if *skipd.Skipgroup.GroupString != *skip.Skipgroup.GroupString {
- t.Error("skip group string", *skipd.Skipgroup.GroupString)
- }
-}
-
-// Check that unrecognized fields of a submessage are preserved.
-func TestSubmessageUnrecognizedFields(t *testing.T) {
- nm := &NewMessage{
- Nested: &NewMessage_Nested{
- Name: String("Nigel"),
- FoodGroup: String("carbs"),
- },
- }
- b, err := Marshal(nm)
- if err != nil {
- t.Fatalf("Marshal of NewMessage: %v", err)
- }
-
- // Unmarshal into an OldMessage.
- om := new(OldMessage)
- if err := Unmarshal(b, om); err != nil {
- t.Fatalf("Unmarshal to OldMessage: %v", err)
- }
- exp := &OldMessage{
- Nested: &OldMessage_Nested{
- Name: String("Nigel"),
- // normal protocol buffer users should not do this
- XXX_unrecognized: []byte("\x12\x05carbs"),
- },
- }
- if !Equal(om, exp) {
- t.Errorf("om = %v, want %v", om, exp)
- }
-
- // Clone the OldMessage.
- om = Clone(om).(*OldMessage)
- if !Equal(om, exp) {
- t.Errorf("Clone(om) = %v, want %v", om, exp)
- }
-
- // Marshal the OldMessage, then unmarshal it into an empty NewMessage.
- if b, err = Marshal(om); err != nil {
- t.Fatalf("Marshal of OldMessage: %v", err)
- }
- t.Logf("Marshal(%v) -> %q", om, b)
- nm2 := new(NewMessage)
- if err := Unmarshal(b, nm2); err != nil {
- t.Fatalf("Unmarshal to NewMessage: %v", err)
- }
- if !Equal(nm, nm2) {
- t.Errorf("NewMessage round-trip: %v => %v", nm, nm2)
- }
-}
-
-// Check that an int32 field can be upgraded to an int64 field.
-func TestNegativeInt32(t *testing.T) {
- om := &OldMessage{
- Num: Int32(-1),
- }
- b, err := Marshal(om)
- if err != nil {
- t.Fatalf("Marshal of OldMessage: %v", err)
- }
-
- // Check the size. It should be 11 bytes;
- // 1 for the field/wire type, and 10 for the negative number.
- if len(b) != 11 {
- t.Errorf("%v marshaled as %q, wanted 11 bytes", om, b)
- }
-
- // Unmarshal into a NewMessage.
- nm := new(NewMessage)
- if err := Unmarshal(b, nm); err != nil {
- t.Fatalf("Unmarshal to NewMessage: %v", err)
- }
- want := &NewMessage{
- Num: Int64(-1),
- }
- if !Equal(nm, want) {
- t.Errorf("nm = %v, want %v", nm, want)
- }
-}
-
-// Check that we can grow an array (repeated field) to have many elements.
-// This test doesn't depend only on our encoding; for variety, it makes sure
-// we create, encode, and decode the correct contents explicitly. It's therefore
-// a bit messier.
-// This test also uses (and hence tests) the Marshal/Unmarshal functions
-// instead of the methods.
-func TestBigRepeated(t *testing.T) {
- pb := initGoTest(true)
-
- // Create the arrays
- const N = 50 // Internally the library starts much smaller.
- pb.Repeatedgroup = make([]*GoTest_RepeatedGroup, N)
- pb.F_Sint64Repeated = make([]int64, N)
- pb.F_Sint32Repeated = make([]int32, N)
- pb.F_BytesRepeated = make([][]byte, N)
- pb.F_StringRepeated = make([]string, N)
- pb.F_DoubleRepeated = make([]float64, N)
- pb.F_FloatRepeated = make([]float32, N)
- pb.F_Uint64Repeated = make([]uint64, N)
- pb.F_Uint32Repeated = make([]uint32, N)
- pb.F_Fixed64Repeated = make([]uint64, N)
- pb.F_Fixed32Repeated = make([]uint32, N)
- pb.F_Int64Repeated = make([]int64, N)
- pb.F_Int32Repeated = make([]int32, N)
- pb.F_BoolRepeated = make([]bool, N)
- pb.RepeatedField = make([]*GoTestField, N)
-
- // Fill in the arrays with checkable values.
- igtf := initGoTestField()
- igtrg := initGoTest_RepeatedGroup()
- for i := 0; i < N; i++ {
- pb.Repeatedgroup[i] = igtrg
- pb.F_Sint64Repeated[i] = int64(i)
- pb.F_Sint32Repeated[i] = int32(i)
- s := fmt.Sprint(i)
- pb.F_BytesRepeated[i] = []byte(s)
- pb.F_StringRepeated[i] = s
- pb.F_DoubleRepeated[i] = float64(i)
- pb.F_FloatRepeated[i] = float32(i)
- pb.F_Uint64Repeated[i] = uint64(i)
- pb.F_Uint32Repeated[i] = uint32(i)
- pb.F_Fixed64Repeated[i] = uint64(i)
- pb.F_Fixed32Repeated[i] = uint32(i)
- pb.F_Int64Repeated[i] = int64(i)
- pb.F_Int32Repeated[i] = int32(i)
- pb.F_BoolRepeated[i] = i%2 == 0
- pb.RepeatedField[i] = igtf
- }
-
- // Marshal.
- buf, _ := Marshal(pb)
-
- // Now test Unmarshal by recreating the original buffer.
- pbd := new(GoTest)
- Unmarshal(buf, pbd)
-
- // Check the checkable values
- for i := uint64(0); i < N; i++ {
- if pbd.Repeatedgroup[i] == nil { // TODO: more checking?
- t.Error("pbd.Repeatedgroup bad")
- }
- var x uint64
- x = uint64(pbd.F_Sint64Repeated[i])
- if x != i {
- t.Error("pbd.F_Sint64Repeated bad", x, i)
- }
- x = uint64(pbd.F_Sint32Repeated[i])
- if x != i {
- t.Error("pbd.F_Sint32Repeated bad", x, i)
- }
- s := fmt.Sprint(i)
- equalbytes(pbd.F_BytesRepeated[i], []byte(s), t)
- if pbd.F_StringRepeated[i] != s {
- t.Error("pbd.F_Sint32Repeated bad", pbd.F_StringRepeated[i], i)
- }
- x = uint64(pbd.F_DoubleRepeated[i])
- if x != i {
- t.Error("pbd.F_DoubleRepeated bad", x, i)
- }
- x = uint64(pbd.F_FloatRepeated[i])
- if x != i {
- t.Error("pbd.F_FloatRepeated bad", x, i)
- }
- x = pbd.F_Uint64Repeated[i]
- if x != i {
- t.Error("pbd.F_Uint64Repeated bad", x, i)
- }
- x = uint64(pbd.F_Uint32Repeated[i])
- if x != i {
- t.Error("pbd.F_Uint32Repeated bad", x, i)
- }
- x = pbd.F_Fixed64Repeated[i]
- if x != i {
- t.Error("pbd.F_Fixed64Repeated bad", x, i)
- }
- x = uint64(pbd.F_Fixed32Repeated[i])
- if x != i {
- t.Error("pbd.F_Fixed32Repeated bad", x, i)
- }
- x = uint64(pbd.F_Int64Repeated[i])
- if x != i {
- t.Error("pbd.F_Int64Repeated bad", x, i)
- }
- x = uint64(pbd.F_Int32Repeated[i])
- if x != i {
- t.Error("pbd.F_Int32Repeated bad", x, i)
- }
- if pbd.F_BoolRepeated[i] != (i%2 == 0) {
- t.Error("pbd.F_BoolRepeated bad", x, i)
- }
- if pbd.RepeatedField[i] == nil { // TODO: more checking?
- t.Error("pbd.RepeatedField bad")
- }
- }
-}
-
-// Verify we give a useful message when decoding to the wrong structure type.
-func TestTypeMismatch(t *testing.T) {
- pb1 := initGoTest(true)
-
- // Marshal
- o := old()
- o.Marshal(pb1)
-
- // Now Unmarshal it to the wrong type.
- pb2 := initGoTestField()
- err := o.Unmarshal(pb2)
- if err == nil {
- t.Error("expected error, got no error")
- } else if !strings.Contains(err.Error(), "bad wiretype") {
- t.Error("expected bad wiretype error, got", err)
- }
-}
-
-func encodeDecode(t *testing.T, in, out Message, msg string) {
- buf, err := Marshal(in)
- if err != nil {
- t.Fatalf("failed marshaling %v: %v", msg, err)
- }
- if err := Unmarshal(buf, out); err != nil {
- t.Fatalf("failed unmarshaling %v: %v", msg, err)
- }
-}
-
-func TestPackedNonPackedDecoderSwitching(t *testing.T) {
- np, p := new(NonPackedTest), new(PackedTest)
-
- // non-packed -> packed
- np.A = []int32{0, 1, 1, 2, 3, 5}
- encodeDecode(t, np, p, "non-packed -> packed")
- if !reflect.DeepEqual(np.A, p.B) {
- t.Errorf("failed non-packed -> packed; np.A=%+v, p.B=%+v", np.A, p.B)
- }
-
- // packed -> non-packed
- np.Reset()
- p.B = []int32{3, 1, 4, 1, 5, 9}
- encodeDecode(t, p, np, "packed -> non-packed")
- if !reflect.DeepEqual(p.B, np.A) {
- t.Errorf("failed packed -> non-packed; p.B=%+v, np.A=%+v", p.B, np.A)
- }
-}
-
-func TestProto1RepeatedGroup(t *testing.T) {
- pb := &MessageList{
- Message: []*MessageList_Message{
- {
- Name: String("blah"),
- Count: Int32(7),
- },
- // NOTE: pb.Message[1] is a nil
- nil,
- },
- }
-
- o := old()
- err := o.Marshal(pb)
- if err == nil || !strings.Contains(err.Error(), "repeated field Message has nil") {
- t.Fatalf("unexpected or no error when marshaling: %v", err)
- }
-}
-
-// Test that enums work. Checks for a bug introduced by making enums
-// named types instead of int32: newInt32FromUint64 would crash with
-// a type mismatch in reflect.PointTo.
-func TestEnum(t *testing.T) {
- pb := new(GoEnum)
- pb.Foo = FOO_FOO1.Enum()
- o := old()
- if err := o.Marshal(pb); err != nil {
- t.Fatal("error encoding enum:", err)
- }
- pb1 := new(GoEnum)
- if err := o.Unmarshal(pb1); err != nil {
- t.Fatal("error decoding enum:", err)
- }
- if *pb1.Foo != FOO_FOO1 {
- t.Error("expected 7 but got ", *pb1.Foo)
- }
-}
-
-// Enum types have String methods. Check that enum fields can be printed.
-// We don't care what the value actually is, just as long as it doesn't crash.
-func TestPrintingNilEnumFields(t *testing.T) {
- pb := new(GoEnum)
- fmt.Sprintf("%+v", pb)
-}
-
-// Verify that absent required fields cause Marshal/Unmarshal to return errors.
-func TestRequiredFieldEnforcement(t *testing.T) {
- pb := new(GoTestField)
- _, err := Marshal(pb)
- if err == nil {
- t.Error("marshal: expected error, got nil")
- } else if strings.Index(err.Error(), "Label") < 0 {
- t.Errorf("marshal: bad error type: %v", err)
- }
-
- // A slightly sneaky, yet valid, proto. It encodes the same required field twice,
- // so simply counting the required fields is insufficient.
- // field 1, encoding 2, value "hi"
- buf := []byte("\x0A\x02hi\x0A\x02hi")
- err = Unmarshal(buf, pb)
- if err == nil {
- t.Error("unmarshal: expected error, got nil")
- } else if strings.Index(err.Error(), "{Unknown}") < 0 {
- t.Errorf("unmarshal: bad error type: %v", err)
- }
-}
-
-func TestTypedNilMarshal(t *testing.T) {
- // A typed nil should return ErrNil and not crash.
- _, err := Marshal((*GoEnum)(nil))
- if err != ErrNil {
- t.Errorf("Marshal: got err %v, want ErrNil", err)
- }
-}
-
-// A type that implements the Marshaler interface, but is not nillable.
-type nonNillableInt uint64
-
-func (nni nonNillableInt) Marshal() ([]byte, error) {
- return EncodeVarint(uint64(nni)), nil
-}
-
-type NNIMessage struct {
- nni nonNillableInt
-}
-
-func (*NNIMessage) Reset() {}
-func (*NNIMessage) String() string { return "" }
-func (*NNIMessage) ProtoMessage() {}
-
-// A type that implements the Marshaler interface and is nillable.
-type nillableMessage struct {
- x uint64
-}
-
-func (nm *nillableMessage) Marshal() ([]byte, error) {
- return EncodeVarint(nm.x), nil
-}
-
-type NMMessage struct {
- nm *nillableMessage
-}
-
-func (*NMMessage) Reset() {}
-func (*NMMessage) String() string { return "" }
-func (*NMMessage) ProtoMessage() {}
-
-// Verify a type that uses the Marshaler interface, but has a nil pointer.
-func TestNilMarshaler(t *testing.T) {
- // Try a struct with a Marshaler field that is nil.
- // It should be directly marshable.
- nmm := new(NMMessage)
- if _, err := Marshal(nmm); err != nil {
- t.Error("unexpected error marshaling nmm: ", err)
- }
-
- // Try a struct with a Marshaler field that is not nillable.
- nnim := new(NNIMessage)
- nnim.nni = 7
- var _ Marshaler = nnim.nni // verify it is truly a Marshaler
- if _, err := Marshal(nnim); err != nil {
- t.Error("unexpected error marshaling nnim: ", err)
- }
-}
-
-func TestAllSetDefaults(t *testing.T) {
- // Exercise SetDefaults with all scalar field types.
- m := &Defaults{
- // NaN != NaN, so override that here.
- F_Nan: Float32(1.7),
- }
- expected := &Defaults{
- F_Bool: Bool(true),
- F_Int32: Int32(32),
- F_Int64: Int64(64),
- F_Fixed32: Uint32(320),
- F_Fixed64: Uint64(640),
- F_Uint32: Uint32(3200),
- F_Uint64: Uint64(6400),
- F_Float: Float32(314159),
- F_Double: Float64(271828),
- F_String: String(`hello, "world!"` + "\n"),
- F_Bytes: []byte("Bignose"),
- F_Sint32: Int32(-32),
- F_Sint64: Int64(-64),
- F_Enum: Defaults_GREEN.Enum(),
- F_Pinf: Float32(float32(math.Inf(1))),
- F_Ninf: Float32(float32(math.Inf(-1))),
- F_Nan: Float32(1.7),
- StrZero: String(""),
- }
- SetDefaults(m)
- if !Equal(m, expected) {
- t.Errorf("SetDefaults failed\n got %v\nwant %v", m, expected)
- }
-}
-
-func TestSetDefaultsWithSetField(t *testing.T) {
- // Check that a set value is not overridden.
- m := &Defaults{
- F_Int32: Int32(12),
- }
- SetDefaults(m)
- if v := m.GetF_Int32(); v != 12 {
- t.Errorf("m.FInt32 = %v, want 12", v)
- }
-}
-
-func TestSetDefaultsWithSubMessage(t *testing.T) {
- m := &OtherMessage{
- Key: Int64(123),
- Inner: &InnerMessage{
- Host: String("gopher"),
- },
- }
- expected := &OtherMessage{
- Key: Int64(123),
- Inner: &InnerMessage{
- Host: String("gopher"),
- Port: Int32(4000),
- },
- }
- SetDefaults(m)
- if !Equal(m, expected) {
- t.Errorf("\n got %v\nwant %v", m, expected)
- }
-}
-
-func TestSetDefaultsWithRepeatedSubMessage(t *testing.T) {
- m := &MyMessage{
- RepInner: []*InnerMessage{{}},
- }
- expected := &MyMessage{
- RepInner: []*InnerMessage{{
- Port: Int32(4000),
- }},
- }
- SetDefaults(m)
- if !Equal(m, expected) {
- t.Errorf("\n got %v\nwant %v", m, expected)
- }
-}
-
-func TestSetDefaultWithRepeatedNonMessage(t *testing.T) {
- m := &MyMessage{
- Pet: []string{"turtle", "wombat"},
- }
- expected := Clone(m)
- SetDefaults(m)
- if !Equal(m, expected) {
- t.Errorf("\n got %v\nwant %v", m, expected)
- }
-}
-
-func TestMaximumTagNumber(t *testing.T) {
- m := &MaxTag{
- LastField: String("natural goat essence"),
- }
- buf, err := Marshal(m)
- if err != nil {
- t.Fatalf("proto.Marshal failed: %v", err)
- }
- m2 := new(MaxTag)
- if err := Unmarshal(buf, m2); err != nil {
- t.Fatalf("proto.Unmarshal failed: %v", err)
- }
- if got, want := m2.GetLastField(), *m.LastField; got != want {
- t.Errorf("got %q, want %q", got, want)
- }
-}
-
-func TestJSON(t *testing.T) {
- m := &MyMessage{
- Count: Int32(4),
- Pet: []string{"bunny", "kitty"},
- Inner: &InnerMessage{
- Host: String("cauchy"),
- },
- Bikeshed: MyMessage_GREEN.Enum(),
- }
- const expected = `{"count":4,"pet":["bunny","kitty"],"inner":{"host":"cauchy"},"bikeshed":1}`
-
- b, err := json.Marshal(m)
- if err != nil {
- t.Fatalf("json.Marshal failed: %v", err)
- }
- s := string(b)
- if s != expected {
- t.Errorf("got %s\nwant %s", s, expected)
- }
-
- received := new(MyMessage)
- if err := json.Unmarshal(b, received); err != nil {
- t.Fatalf("json.Unmarshal failed: %v", err)
- }
- if !Equal(received, m) {
- t.Fatalf("got %s, want %s", received, m)
- }
-
- // Test unmarshalling of JSON with symbolic enum name.
- const old = `{"count":4,"pet":["bunny","kitty"],"inner":{"host":"cauchy"},"bikeshed":"GREEN"}`
- received.Reset()
- if err := json.Unmarshal([]byte(old), received); err != nil {
- t.Fatalf("json.Unmarshal failed: %v", err)
- }
- if !Equal(received, m) {
- t.Fatalf("got %s, want %s", received, m)
- }
-}
-
-func TestBadWireType(t *testing.T) {
- b := []byte{7<<3 | 6} // field 7, wire type 6
- pb := new(OtherMessage)
- if err := Unmarshal(b, pb); err == nil {
- t.Errorf("Unmarshal did not fail")
- } else if !strings.Contains(err.Error(), "unknown wire type") {
- t.Errorf("wrong error: %v", err)
- }
-}
-
-func TestBytesWithInvalidLength(t *testing.T) {
- // If a byte sequence has an invalid (negative) length, Unmarshal should not panic.
- b := []byte{2<<3 | WireBytes, 0xff, 0xff, 0xff, 0xff, 0xff, 0}
- Unmarshal(b, new(MyMessage))
-}
-
-func TestLengthOverflow(t *testing.T) {
- // Overflowing a length should not panic.
- b := []byte{2<<3 | WireBytes, 1, 1, 3<<3 | WireBytes, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, 0x01}
- Unmarshal(b, new(MyMessage))
-}
-
-func TestVarintOverflow(t *testing.T) {
- // Overflowing a 64-bit length should not be allowed.
- b := []byte{1<<3 | WireVarint, 0x01, 3<<3 | WireBytes, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x01}
- if err := Unmarshal(b, new(MyMessage)); err == nil {
- t.Fatalf("Overflowed uint64 length without error")
- }
-}
-
-func TestUnmarshalFuzz(t *testing.T) {
- const N = 1000
- seed := time.Now().UnixNano()
- t.Logf("RNG seed is %d", seed)
- rng := rand.New(rand.NewSource(seed))
- buf := make([]byte, 20)
- for i := 0; i < N; i++ {
- for j := range buf {
- buf[j] = byte(rng.Intn(256))
- }
- fuzzUnmarshal(t, buf)
- }
-}
-
-func TestMergeMessages(t *testing.T) {
- pb := &MessageList{Message: []*MessageList_Message{{Name: String("x"), Count: Int32(1)}}}
- data, err := Marshal(pb)
- if err != nil {
- t.Fatalf("Marshal: %v", err)
- }
-
- pb1 := new(MessageList)
- if err := Unmarshal(data, pb1); err != nil {
- t.Fatalf("first Unmarshal: %v", err)
- }
- if err := Unmarshal(data, pb1); err != nil {
- t.Fatalf("second Unmarshal: %v", err)
- }
- if len(pb1.Message) != 1 {
- t.Errorf("two Unmarshals produced %d Messages, want 1", len(pb1.Message))
- }
-
- pb2 := new(MessageList)
- if err := UnmarshalMerge(data, pb2); err != nil {
- t.Fatalf("first UnmarshalMerge: %v", err)
- }
- if err := UnmarshalMerge(data, pb2); err != nil {
- t.Fatalf("second UnmarshalMerge: %v", err)
- }
- if len(pb2.Message) != 2 {
- t.Errorf("two UnmarshalMerges produced %d Messages, want 2", len(pb2.Message))
- }
-}
-
-func TestExtensionMarshalOrder(t *testing.T) {
- m := &MyMessage{Count: Int(123)}
- if err := SetExtension(m, E_Ext_More, &Ext{Data: String("alpha")}); err != nil {
- t.Fatalf("SetExtension: %v", err)
- }
- if err := SetExtension(m, E_Ext_Text, String("aleph")); err != nil {
- t.Fatalf("SetExtension: %v", err)
- }
- if err := SetExtension(m, E_Ext_Number, Int32(1)); err != nil {
- t.Fatalf("SetExtension: %v", err)
- }
-
- // Serialize m several times, and check we get the same bytes each time.
- var orig []byte
- for i := 0; i < 100; i++ {
- b, err := Marshal(m)
- if err != nil {
- t.Fatalf("Marshal: %v", err)
- }
- if i == 0 {
- orig = b
- continue
- }
- if !bytes.Equal(b, orig) {
- t.Errorf("Bytes differ on attempt #%d", i)
- }
- }
-}
-
-// Many extensions, because small maps might not iterate differently on each iteration.
-var exts = []*ExtensionDesc{
- E_X201,
- E_X202,
- E_X203,
- E_X204,
- E_X205,
- E_X206,
- E_X207,
- E_X208,
- E_X209,
- E_X210,
- E_X211,
- E_X212,
- E_X213,
- E_X214,
- E_X215,
- E_X216,
- E_X217,
- E_X218,
- E_X219,
- E_X220,
- E_X221,
- E_X222,
- E_X223,
- E_X224,
- E_X225,
- E_X226,
- E_X227,
- E_X228,
- E_X229,
- E_X230,
- E_X231,
- E_X232,
- E_X233,
- E_X234,
- E_X235,
- E_X236,
- E_X237,
- E_X238,
- E_X239,
- E_X240,
- E_X241,
- E_X242,
- E_X243,
- E_X244,
- E_X245,
- E_X246,
- E_X247,
- E_X248,
- E_X249,
- E_X250,
-}
-
-func TestMessageSetMarshalOrder(t *testing.T) {
- m := &MyMessageSet{}
- for _, x := range exts {
- if err := SetExtension(m, x, &Empty{}); err != nil {
- t.Fatalf("SetExtension: %v", err)
- }
- }
-
- buf, err := Marshal(m)
- if err != nil {
- t.Fatalf("Marshal: %v", err)
- }
-
- // Serialize m several times, and check we get the same bytes each time.
- for i := 0; i < 10; i++ {
- b1, err := Marshal(m)
- if err != nil {
- t.Fatalf("Marshal: %v", err)
- }
- if !bytes.Equal(b1, buf) {
- t.Errorf("Bytes differ on re-Marshal #%d", i)
- }
-
- m2 := &MyMessageSet{}
- if err := Unmarshal(buf, m2); err != nil {
- t.Errorf("Unmarshal: %v", err)
- }
- b2, err := Marshal(m2)
- if err != nil {
- t.Errorf("re-Marshal: %v", err)
- }
- if !bytes.Equal(b2, buf) {
- t.Errorf("Bytes differ on round-trip #%d", i)
- }
- }
-}
-
-func TestUnmarshalMergesMessages(t *testing.T) {
- // If a nested message occurs twice in the input,
- // the fields should be merged when decoding.
- a := &OtherMessage{
- Key: Int64(123),
- Inner: &InnerMessage{
- Host: String("polhode"),
- Port: Int32(1234),
- },
- }
- aData, err := Marshal(a)
- if err != nil {
- t.Fatalf("Marshal(a): %v", err)
- }
- b := &OtherMessage{
- Weight: Float32(1.2),
- Inner: &InnerMessage{
- Host: String("herpolhode"),
- Connected: Bool(true),
- },
- }
- bData, err := Marshal(b)
- if err != nil {
- t.Fatalf("Marshal(b): %v", err)
- }
- want := &OtherMessage{
- Key: Int64(123),
- Weight: Float32(1.2),
- Inner: &InnerMessage{
- Host: String("herpolhode"),
- Port: Int32(1234),
- Connected: Bool(true),
- },
- }
- got := new(OtherMessage)
- if err := Unmarshal(append(aData, bData...), got); err != nil {
- t.Fatalf("Unmarshal: %v", err)
- }
- if !Equal(got, want) {
- t.Errorf("\n got %v\nwant %v", got, want)
- }
-}
-
-func TestEncodingSizes(t *testing.T) {
- tests := []struct {
- m Message
- n int
- }{
- {&Defaults{F_Int32: Int32(math.MaxInt32)}, 6},
- {&Defaults{F_Int32: Int32(math.MinInt32)}, 11},
- {&Defaults{F_Uint32: Uint32(uint32(math.MaxInt32) + 1)}, 6},
- {&Defaults{F_Uint32: Uint32(math.MaxUint32)}, 6},
- }
- for _, test := range tests {
- b, err := Marshal(test.m)
- if err != nil {
- t.Errorf("Marshal(%v): %v", test.m, err)
- continue
- }
- if len(b) != test.n {
- t.Errorf("Marshal(%v) yielded %d bytes, want %d bytes", test.m, len(b), test.n)
- }
- }
-}
-
-func TestRequiredNotSetError(t *testing.T) {
- pb := initGoTest(false)
- pb.RequiredField.Label = nil
- pb.F_Int32Required = nil
- pb.F_Int64Required = nil
-
- expected := "0807" + // field 1, encoding 0, value 7
- "2206" + "120474797065" + // field 4, encoding 2 (GoTestField)
- "5001" + // field 10, encoding 0, value 1
- "6d20000000" + // field 13, encoding 5, value 0x20
- "714000000000000000" + // field 14, encoding 1, value 0x40
- "78a019" + // field 15, encoding 0, value 0xca0 = 3232
- "8001c032" + // field 16, encoding 0, value 0x1940 = 6464
- "8d0100004a45" + // field 17, encoding 5, value 3232.0
- "9101000000000040b940" + // field 18, encoding 1, value 6464.0
- "9a0106" + "737472696e67" + // field 19, encoding 2, string "string"
- "b304" + // field 70, encoding 3, start group
- "ba0408" + "7265717569726564" + // field 71, encoding 2, string "required"
- "b404" + // field 70, encoding 4, end group
- "aa0605" + "6279746573" + // field 101, encoding 2, string "bytes"
- "b0063f" + // field 102, encoding 0, 0x3f zigzag32
- "b8067f" // field 103, encoding 0, 0x7f zigzag64
-
- o := old()
- bytes, err := Marshal(pb)
- if _, ok := err.(*RequiredNotSetError); !ok {
- fmt.Printf("marshal-1 err = %v, want *RequiredNotSetError", err)
- o.DebugPrint("", bytes)
- t.Fatalf("expected = %s", expected)
- }
- if strings.Index(err.Error(), "RequiredField.Label") < 0 {
- t.Errorf("marshal-1 wrong err msg: %v", err)
- }
- if !equal(bytes, expected, t) {
- o.DebugPrint("neq 1", bytes)
- t.Fatalf("expected = %s", expected)
- }
-
- // Now test Unmarshal by recreating the original buffer.
- pbd := new(GoTest)
- err = Unmarshal(bytes, pbd)
- if _, ok := err.(*RequiredNotSetError); !ok {
- t.Fatalf("unmarshal err = %v, want *RequiredNotSetError", err)
- o.DebugPrint("", bytes)
- t.Fatalf("string = %s", expected)
- }
- if strings.Index(err.Error(), "RequiredField.{Unknown}") < 0 {
- t.Errorf("unmarshal wrong err msg: %v", err)
- }
- bytes, err = Marshal(pbd)
- if _, ok := err.(*RequiredNotSetError); !ok {
- t.Errorf("marshal-2 err = %v, want *RequiredNotSetError", err)
- o.DebugPrint("", bytes)
- t.Fatalf("string = %s", expected)
- }
- if strings.Index(err.Error(), "RequiredField.Label") < 0 {
- t.Errorf("marshal-2 wrong err msg: %v", err)
- }
- if !equal(bytes, expected, t) {
- o.DebugPrint("neq 2", bytes)
- t.Fatalf("string = %s", expected)
- }
-}
-
-func fuzzUnmarshal(t *testing.T, data []byte) {
- defer func() {
- if e := recover(); e != nil {
- t.Errorf("These bytes caused a panic: %+v", data)
- t.Logf("Stack:\n%s", debug.Stack())
- t.FailNow()
- }
- }()
-
- pb := new(MyMessage)
- Unmarshal(data, pb)
-}
-
-func TestMapFieldMarshal(t *testing.T) {
- m := &MessageWithMap{
- NameMapping: map[int32]string{
- 1: "Rob",
- 4: "Ian",
- 8: "Dave",
- },
- }
- b, err := Marshal(m)
- if err != nil {
- t.Fatalf("Marshal: %v", err)
- }
-
- // b should be the concatenation of these three byte sequences in some order.
- parts := []string{
- "\n\a\b\x01\x12\x03Rob",
- "\n\a\b\x04\x12\x03Ian",
- "\n\b\b\x08\x12\x04Dave",
- }
- ok := false
- for i := range parts {
- for j := range parts {
- if j == i {
- continue
- }
- for k := range parts {
- if k == i || k == j {
- continue
- }
- try := parts[i] + parts[j] + parts[k]
- if bytes.Equal(b, []byte(try)) {
- ok = true
- break
- }
- }
- }
- }
- if !ok {
- t.Fatalf("Incorrect Marshal output.\n got %q\nwant %q (or a permutation of that)", b, parts[0]+parts[1]+parts[2])
- }
- t.Logf("FYI b: %q", b)
-
- (new(Buffer)).DebugPrint("Dump of b", b)
-}
-
-func TestMapFieldRoundTrips(t *testing.T) {
- m := &MessageWithMap{
- NameMapping: map[int32]string{
- 1: "Rob",
- 4: "Ian",
- 8: "Dave",
- },
- MsgMapping: map[int64]*FloatingPoint{
- 0x7001: &FloatingPoint{F: Float64(2.0)},
- },
- ByteMapping: map[bool][]byte{
- false: []byte("that's not right!"),
- true: []byte("aye, 'tis true!"),
- },
- }
- b, err := Marshal(m)
- if err != nil {
- t.Fatalf("Marshal: %v", err)
- }
- t.Logf("FYI b: %q", b)
- m2 := new(MessageWithMap)
- if err := Unmarshal(b, m2); err != nil {
- t.Fatalf("Unmarshal: %v", err)
- }
- for _, pair := range [][2]interface{}{
- {m.NameMapping, m2.NameMapping},
- {m.MsgMapping, m2.MsgMapping},
- {m.ByteMapping, m2.ByteMapping},
- } {
- if !reflect.DeepEqual(pair[0], pair[1]) {
- t.Errorf("Map did not survive a round trip.\ninitial: %v\n final: %v", pair[0], pair[1])
- }
- }
-}
-
-// Benchmarks
-
-func testMsg() *GoTest {
- pb := initGoTest(true)
- const N = 1000 // Internally the library starts much smaller.
- pb.F_Int32Repeated = make([]int32, N)
- pb.F_DoubleRepeated = make([]float64, N)
- for i := 0; i < N; i++ {
- pb.F_Int32Repeated[i] = int32(i)
- pb.F_DoubleRepeated[i] = float64(i)
- }
- return pb
-}
-
-func bytesMsg() *GoTest {
- pb := initGoTest(true)
- buf := make([]byte, 4000)
- for i := range buf {
- buf[i] = byte(i)
- }
- pb.F_BytesDefaulted = buf
- return pb
-}
-
-func benchmarkMarshal(b *testing.B, pb Message, marshal func(Message) ([]byte, error)) {
- d, _ := marshal(pb)
- b.SetBytes(int64(len(d)))
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- marshal(pb)
- }
-}
-
-func benchmarkBufferMarshal(b *testing.B, pb Message) {
- p := NewBuffer(nil)
- benchmarkMarshal(b, pb, func(pb0 Message) ([]byte, error) {
- p.Reset()
- err := p.Marshal(pb0)
- return p.Bytes(), err
- })
-}
-
-func benchmarkSize(b *testing.B, pb Message) {
- benchmarkMarshal(b, pb, func(pb0 Message) ([]byte, error) {
- Size(pb)
- return nil, nil
- })
-}
-
-func newOf(pb Message) Message {
- in := reflect.ValueOf(pb)
- if in.IsNil() {
- return pb
- }
- return reflect.New(in.Type().Elem()).Interface().(Message)
-}
-
-func benchmarkUnmarshal(b *testing.B, pb Message, unmarshal func([]byte, Message) error) {
- d, _ := Marshal(pb)
- b.SetBytes(int64(len(d)))
- pbd := newOf(pb)
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- unmarshal(d, pbd)
- }
-}
-
-func benchmarkBufferUnmarshal(b *testing.B, pb Message) {
- p := NewBuffer(nil)
- benchmarkUnmarshal(b, pb, func(d []byte, pb0 Message) error {
- p.SetBuf(d)
- return p.Unmarshal(pb0)
- })
-}
-
-// Benchmark{Marshal,BufferMarshal,Size,Unmarshal,BufferUnmarshal}{,Bytes}
-
-func BenchmarkMarshal(b *testing.B) {
- benchmarkMarshal(b, testMsg(), Marshal)
-}
-
-func BenchmarkBufferMarshal(b *testing.B) {
- benchmarkBufferMarshal(b, testMsg())
-}
-
-func BenchmarkSize(b *testing.B) {
- benchmarkSize(b, testMsg())
-}
-
-func BenchmarkUnmarshal(b *testing.B) {
- benchmarkUnmarshal(b, testMsg(), Unmarshal)
-}
-
-func BenchmarkBufferUnmarshal(b *testing.B) {
- benchmarkBufferUnmarshal(b, testMsg())
-}
-
-func BenchmarkMarshalBytes(b *testing.B) {
- benchmarkMarshal(b, bytesMsg(), Marshal)
-}
-
-func BenchmarkBufferMarshalBytes(b *testing.B) {
- benchmarkBufferMarshal(b, bytesMsg())
-}
-
-func BenchmarkSizeBytes(b *testing.B) {
- benchmarkSize(b, bytesMsg())
-}
-
-func BenchmarkUnmarshalBytes(b *testing.B) {
- benchmarkUnmarshal(b, bytesMsg(), Unmarshal)
-}
-
-func BenchmarkBufferUnmarshalBytes(b *testing.B) {
- benchmarkBufferUnmarshal(b, bytesMsg())
-}
-
-func BenchmarkUnmarshalUnrecognizedFields(b *testing.B) {
- b.StopTimer()
- pb := initGoTestField()
- skip := &GoSkipTest{
- SkipInt32: Int32(32),
- SkipFixed32: Uint32(3232),
- SkipFixed64: Uint64(6464),
- SkipString: String("skipper"),
- Skipgroup: &GoSkipTest_SkipGroup{
- GroupInt32: Int32(75),
- GroupString: String("wxyz"),
- },
- }
-
- pbd := new(GoTestField)
- p := NewBuffer(nil)
- p.Marshal(pb)
- p.Marshal(skip)
- p2 := NewBuffer(nil)
-
- b.StartTimer()
- for i := 0; i < b.N; i++ {
- p2.SetBuf(p.Bytes())
- p2.Unmarshal(pbd)
- }
-}
diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/clone.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/clone.go
index 6c6a7d95f2..84bbaf4335 100644
--- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/clone.go
+++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/clone.go
@@ -75,12 +75,13 @@ func Merge(dst, src Message) {
}
func mergeStruct(out, in reflect.Value) {
+ sprop := GetProperties(in.Type())
for i := 0; i < in.NumField(); i++ {
f := in.Type().Field(i)
if strings.HasPrefix(f.Name, "XXX_") {
continue
}
- mergeAny(out.Field(i), in.Field(i))
+ mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
}
if emIn, ok := in.Addr().Interface().(extendableProto); ok {
@@ -98,7 +99,10 @@ func mergeStruct(out, in reflect.Value) {
}
}
-func mergeAny(out, in reflect.Value) {
+// mergeAny performs a merge between two values of the same type.
+// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
+// prop is set if this is a struct field (it may be nil).
+func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
if in.Type() == protoMessageType {
if !in.IsNil() {
if out.IsNil() {
@@ -112,7 +116,21 @@ func mergeAny(out, in reflect.Value) {
switch in.Kind() {
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
reflect.String, reflect.Uint32, reflect.Uint64:
+ if !viaPtr && isProto3Zero(in) {
+ return
+ }
out.Set(in)
+ case reflect.Interface:
+ // Probably a oneof field; copy non-nil values.
+ if in.IsNil() {
+ return
+ }
+ // Allocate destination if it is not set, or set to a different type.
+ // Otherwise we will merge as normal.
+ if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
+ out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
+ }
+ mergeAny(out.Elem(), in.Elem(), false, nil)
case reflect.Map:
if in.Len() == 0 {
return
@@ -127,7 +145,7 @@ func mergeAny(out, in reflect.Value) {
switch elemKind {
case reflect.Ptr:
val = reflect.New(in.Type().Elem().Elem())
- mergeAny(val, in.MapIndex(key))
+ mergeAny(val, in.MapIndex(key), false, nil)
case reflect.Slice:
val = in.MapIndex(key)
val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
@@ -143,13 +161,21 @@ func mergeAny(out, in reflect.Value) {
if out.IsNil() {
out.Set(reflect.New(in.Elem().Type()))
}
- mergeAny(out.Elem(), in.Elem())
+ mergeAny(out.Elem(), in.Elem(), true, nil)
case reflect.Slice:
if in.IsNil() {
return
}
if in.Type().Elem().Kind() == reflect.Uint8 {
// []byte is a scalar bytes field, not a repeated field.
+
+ // Edge case: if this is in a proto3 message, a zero length
+ // bytes field is considered the zero value, and should not
+ // be merged.
+ if prop != nil && prop.proto3 && in.Len() == 0 {
+ return
+ }
+
// Make a deep copy.
// Append to []byte{} instead of []byte(nil) so that we never end up
// with a nil result.
@@ -167,7 +193,7 @@ func mergeAny(out, in reflect.Value) {
default:
for i := 0; i < n; i++ {
x := reflect.Indirect(reflect.New(in.Type().Elem()))
- mergeAny(x, in.Index(i))
+ mergeAny(x, in.Index(i), false, nil)
out.Set(reflect.Append(out, x))
}
}
@@ -184,7 +210,7 @@ func mergeExtension(out, in map[int32]Extension) {
eOut := Extension{desc: eIn.desc}
if eIn.value != nil {
v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
- mergeAny(v, reflect.ValueOf(eIn.value))
+ mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
eOut.value = v.Interface()
}
if eIn.enc != nil {
diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/clone_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/clone_test.go
deleted file mode 100644
index 9db0fb65cf..0000000000
--- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/clone_test.go
+++ /dev/null
@@ -1,227 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2011 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto_test
-
-import (
- "testing"
-
- "github.com/golang/protobuf/proto"
-
- pb "github.com/golang/protobuf/proto/testdata"
-)
-
-var cloneTestMessage = &pb.MyMessage{
- Count: proto.Int32(42),
- Name: proto.String("Dave"),
- Pet: []string{"bunny", "kitty", "horsey"},
- Inner: &pb.InnerMessage{
- Host: proto.String("niles"),
- Port: proto.Int32(9099),
- Connected: proto.Bool(true),
- },
- Others: []*pb.OtherMessage{
- {
- Value: []byte("some bytes"),
- },
- },
- Somegroup: &pb.MyMessage_SomeGroup{
- GroupField: proto.Int32(6),
- },
- RepBytes: [][]byte{[]byte("sham"), []byte("wow")},
-}
-
-func init() {
- ext := &pb.Ext{
- Data: proto.String("extension"),
- }
- if err := proto.SetExtension(cloneTestMessage, pb.E_Ext_More, ext); err != nil {
- panic("SetExtension: " + err.Error())
- }
-}
-
-func TestClone(t *testing.T) {
- m := proto.Clone(cloneTestMessage).(*pb.MyMessage)
- if !proto.Equal(m, cloneTestMessage) {
- t.Errorf("Clone(%v) = %v", cloneTestMessage, m)
- }
-
- // Verify it was a deep copy.
- *m.Inner.Port++
- if proto.Equal(m, cloneTestMessage) {
- t.Error("Mutating clone changed the original")
- }
- // Byte fields and repeated fields should be copied.
- if &m.Pet[0] == &cloneTestMessage.Pet[0] {
- t.Error("Pet: repeated field not copied")
- }
- if &m.Others[0] == &cloneTestMessage.Others[0] {
- t.Error("Others: repeated field not copied")
- }
- if &m.Others[0].Value[0] == &cloneTestMessage.Others[0].Value[0] {
- t.Error("Others[0].Value: bytes field not copied")
- }
- if &m.RepBytes[0] == &cloneTestMessage.RepBytes[0] {
- t.Error("RepBytes: repeated field not copied")
- }
- if &m.RepBytes[0][0] == &cloneTestMessage.RepBytes[0][0] {
- t.Error("RepBytes[0]: bytes field not copied")
- }
-}
-
-func TestCloneNil(t *testing.T) {
- var m *pb.MyMessage
- if c := proto.Clone(m); !proto.Equal(m, c) {
- t.Errorf("Clone(%v) = %v", m, c)
- }
-}
-
-var mergeTests = []struct {
- src, dst, want proto.Message
-}{
- {
- src: &pb.MyMessage{
- Count: proto.Int32(42),
- },
- dst: &pb.MyMessage{
- Name: proto.String("Dave"),
- },
- want: &pb.MyMessage{
- Count: proto.Int32(42),
- Name: proto.String("Dave"),
- },
- },
- {
- src: &pb.MyMessage{
- Inner: &pb.InnerMessage{
- Host: proto.String("hey"),
- Connected: proto.Bool(true),
- },
- Pet: []string{"horsey"},
- Others: []*pb.OtherMessage{
- {
- Value: []byte("some bytes"),
- },
- },
- },
- dst: &pb.MyMessage{
- Inner: &pb.InnerMessage{
- Host: proto.String("niles"),
- Port: proto.Int32(9099),
- },
- Pet: []string{"bunny", "kitty"},
- Others: []*pb.OtherMessage{
- {
- Key: proto.Int64(31415926535),
- },
- {
- // Explicitly test a src=nil field
- Inner: nil,
- },
- },
- },
- want: &pb.MyMessage{
- Inner: &pb.InnerMessage{
- Host: proto.String("hey"),
- Connected: proto.Bool(true),
- Port: proto.Int32(9099),
- },
- Pet: []string{"bunny", "kitty", "horsey"},
- Others: []*pb.OtherMessage{
- {
- Key: proto.Int64(31415926535),
- },
- {},
- {
- Value: []byte("some bytes"),
- },
- },
- },
- },
- {
- src: &pb.MyMessage{
- RepBytes: [][]byte{[]byte("wow")},
- },
- dst: &pb.MyMessage{
- Somegroup: &pb.MyMessage_SomeGroup{
- GroupField: proto.Int32(6),
- },
- RepBytes: [][]byte{[]byte("sham")},
- },
- want: &pb.MyMessage{
- Somegroup: &pb.MyMessage_SomeGroup{
- GroupField: proto.Int32(6),
- },
- RepBytes: [][]byte{[]byte("sham"), []byte("wow")},
- },
- },
- // Check that a scalar bytes field replaces rather than appends.
- {
- src: &pb.OtherMessage{Value: []byte("foo")},
- dst: &pb.OtherMessage{Value: []byte("bar")},
- want: &pb.OtherMessage{Value: []byte("foo")},
- },
- {
- src: &pb.MessageWithMap{
- NameMapping: map[int32]string{6: "Nigel"},
- MsgMapping: map[int64]*pb.FloatingPoint{
- 0x4001: &pb.FloatingPoint{F: proto.Float64(2.0)},
- },
- ByteMapping: map[bool][]byte{true: []byte("wowsa")},
- },
- dst: &pb.MessageWithMap{
- NameMapping: map[int32]string{
- 6: "Bruce", // should be overwritten
- 7: "Andrew",
- },
- },
- want: &pb.MessageWithMap{
- NameMapping: map[int32]string{
- 6: "Nigel",
- 7: "Andrew",
- },
- MsgMapping: map[int64]*pb.FloatingPoint{
- 0x4001: &pb.FloatingPoint{F: proto.Float64(2.0)},
- },
- ByteMapping: map[bool][]byte{true: []byte("wowsa")},
- },
- },
-}
-
-func TestMerge(t *testing.T) {
- for _, m := range mergeTests {
- got := proto.Clone(m.dst)
- proto.Merge(got, m.src)
- if !proto.Equal(got, m.want) {
- t.Errorf("Merge(%v, %v)\n got %v\nwant %v\n", m.dst, m.src, got, m.want)
- }
- }
-}
diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/decode.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/decode.go
index 129792ed12..5810782fd8 100644
--- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/decode.go
+++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/decode.go
@@ -46,6 +46,10 @@ import (
// errOverflow is returned when an integer is too large to be represented.
var errOverflow = errors.New("proto: integer overflow")
+// ErrInternalBadWireType is returned by generated code when an incorrect
+// wire type is encountered. It does not get returned to user code.
+var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
+
// The fundamental decoders that interpret bytes on the wire.
// Those that take integer types all return uint64 and are
// therefore of type valueDecoder.
@@ -314,6 +318,24 @@ func UnmarshalMerge(buf []byte, pb Message) error {
return NewBuffer(buf).Unmarshal(pb)
}
+// DecodeMessage reads a count-delimited message from the Buffer.
+func (p *Buffer) DecodeMessage(pb Message) error {
+ enc, err := p.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+ return NewBuffer(enc).Unmarshal(pb)
+}
+
+// DecodeGroup reads a tag-delimited group from the Buffer.
+func (p *Buffer) DecodeGroup(pb Message) error {
+ typ, base, err := getbase(pb)
+ if err != nil {
+ return err
+ }
+ return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base)
+}
+
// Unmarshal parses the protocol buffer representation in the
// Buffer and places the decoded result in pb. If the struct
// underlying pb does not match the data in the buffer, the results can be
@@ -377,6 +399,20 @@ func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group
continue
}
}
+ // Maybe it's a oneof?
+ if prop.oneofUnmarshaler != nil {
+ m := structPointer_Interface(base, st).(Message)
+ // First return value indicates whether tag is a oneof field.
+ ok, err = prop.oneofUnmarshaler(m, tag, wire, o)
+ if err == ErrInternalBadWireType {
+ // Map the error to something more descriptive.
+ // Do the formatting here to save generated code space.
+ err = fmt.Errorf("bad wiretype for oneof field in %T", m)
+ }
+ if ok {
+ continue
+ }
+ }
err = o.skipAndSave(st, tag, wire, base, prop.unrecField)
continue
}
@@ -561,9 +597,13 @@ func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error
return err
}
nb := int(nn) // number of bytes of encoded bools
+ fin := o.index + nb
+ if fin < o.index {
+ return errOverflow
+ }
y := *v
- for i := 0; i < nb; i++ {
+ for o.index < fin {
u, err := p.valDec(o)
if err != nil {
return err
@@ -675,7 +715,7 @@ func (o *Buffer) dec_new_map(p *Properties, base structPointer) error {
oi := o.index // index at the end of this map entry
o.index -= len(raw) // move buffer back to start of map entry
- mptr := structPointer_Map(base, p.field, p.mtype) // *map[K]V
+ mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V
if mptr.Elem().IsNil() {
mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem()))
}
@@ -727,8 +767,14 @@ func (o *Buffer) dec_new_map(p *Properties, base structPointer) error {
return fmt.Errorf("proto: bad map data tag %d", raw[0])
}
}
+ keyelem, valelem := keyptr.Elem(), valptr.Elem()
+ if !keyelem.IsValid() || !valelem.IsValid() {
+ // We did not decode the key or the value in the map entry.
+ // Either way, it's an invalid map entry.
+ return fmt.Errorf("proto: bad map data: missing key/val")
+ }
- v.SetMapIndex(keyptr.Elem(), valptr.Elem())
+ v.SetMapIndex(keyelem, valelem)
return nil
}
diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/encode.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/encode.go
index d1abc3328d..89d0caa826 100644
--- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/encode.go
+++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/encode.go
@@ -228,6 +228,20 @@ func Marshal(pb Message) ([]byte, error) {
return p.buf, err
}
+// EncodeMessage writes the protocol buffer to the Buffer,
+// prefixed by a varint-encoded length.
+func (p *Buffer) EncodeMessage(pb Message) error {
+ t, base, err := getbase(pb)
+ if structPointer_IsNil(base) {
+ return ErrNil
+ }
+ if err == nil {
+ var state errorState
+ err = p.enc_len_struct(GetProperties(t.Elem()), base, &state)
+ }
+ return err
+}
+
// Marshal takes the protocol buffer
// and encodes it into the wire format, writing the result to the
// Buffer.
@@ -318,7 +332,7 @@ func size_bool(p *Properties, base structPointer) int {
func size_proto3_bool(p *Properties, base structPointer) int {
v := *structPointer_BoolVal(base, p.field)
- if !v {
+ if !v && !p.oneof {
return 0
}
return len(p.tagcode) + 1 // each bool takes exactly one byte
@@ -361,7 +375,7 @@ func size_int32(p *Properties, base structPointer) (n int) {
func size_proto3_int32(p *Properties, base structPointer) (n int) {
v := structPointer_Word32Val(base, p.field)
x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range
- if x == 0 {
+ if x == 0 && !p.oneof {
return 0
}
n += len(p.tagcode)
@@ -407,7 +421,7 @@ func size_uint32(p *Properties, base structPointer) (n int) {
func size_proto3_uint32(p *Properties, base structPointer) (n int) {
v := structPointer_Word32Val(base, p.field)
x := word32Val_Get(v)
- if x == 0 {
+ if x == 0 && !p.oneof {
return 0
}
n += len(p.tagcode)
@@ -452,7 +466,7 @@ func size_int64(p *Properties, base structPointer) (n int) {
func size_proto3_int64(p *Properties, base structPointer) (n int) {
v := structPointer_Word64Val(base, p.field)
x := word64Val_Get(v)
- if x == 0 {
+ if x == 0 && !p.oneof {
return 0
}
n += len(p.tagcode)
@@ -495,7 +509,7 @@ func size_string(p *Properties, base structPointer) (n int) {
func size_proto3_string(p *Properties, base structPointer) (n int) {
v := *structPointer_StringVal(base, p.field)
- if v == "" {
+ if v == "" && !p.oneof {
return 0
}
n += len(p.tagcode)
@@ -529,7 +543,7 @@ func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error {
}
o.buf = append(o.buf, p.tagcode...)
o.EncodeRawBytes(data)
- return nil
+ return state.err
}
o.buf = append(o.buf, p.tagcode...)
@@ -667,7 +681,7 @@ func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error
func size_slice_byte(p *Properties, base structPointer) (n int) {
s := *structPointer_Bytes(base, p.field)
- if s == nil {
+ if s == nil && !p.oneof {
return 0
}
n += len(p.tagcode)
@@ -677,7 +691,7 @@ func size_slice_byte(p *Properties, base structPointer) (n int) {
func size_proto3_slice_byte(p *Properties, base structPointer) (n int) {
s := *structPointer_Bytes(base, p.field)
- if len(s) == 0 {
+ if len(s) == 0 && !p.oneof {
return 0
}
n += len(p.tagcode)
@@ -1084,7 +1098,7 @@ func (o *Buffer) enc_new_map(p *Properties, base structPointer) error {
repeated MapFieldEntry map_field = N;
*/
- v := structPointer_Map(base, p.field, p.mtype).Elem() // map[K]V
+ v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V
if v.Len() == 0 {
return nil
}
@@ -1106,6 +1120,11 @@ func (o *Buffer) enc_new_map(p *Properties, base structPointer) error {
for _, key := range keys {
val := v.MapIndex(key)
+ // The only illegal map entry values are nil message pointers.
+ if val.Kind() == reflect.Ptr && val.IsNil() {
+ return errors.New("proto: map has nil element")
+ }
+
keycopy.Set(key)
valcopy.Set(val)
@@ -1118,7 +1137,7 @@ func (o *Buffer) enc_new_map(p *Properties, base structPointer) error {
}
func size_new_map(p *Properties, base structPointer) int {
- v := structPointer_Map(base, p.field, p.mtype).Elem() // map[K]V
+ v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V
keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype)
@@ -1196,6 +1215,14 @@ func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error {
}
}
+ // Do oneof fields.
+ if prop.oneofMarshaler != nil {
+ m := structPointer_Interface(base, prop.stype).(Message)
+ if err := prop.oneofMarshaler(m, o); err != nil {
+ return err
+ }
+ }
+
// Add unrecognized fields at the end.
if prop.unrecField.IsValid() {
v := *structPointer_Bytes(base, prop.unrecField)
@@ -1221,6 +1248,27 @@ func size_struct(prop *StructProperties, base structPointer) (n int) {
n += len(v)
}
+ // Factor in any oneof fields.
+ // TODO: This could be faster and use less reflection.
+ if prop.oneofMarshaler != nil {
+ sv := reflect.ValueOf(structPointer_Interface(base, prop.stype)).Elem()
+ for i := 0; i < prop.stype.NumField(); i++ {
+ fv := sv.Field(i)
+ if fv.Kind() != reflect.Interface || fv.IsNil() {
+ continue
+ }
+ if prop.stype.Field(i).Tag.Get("protobuf_oneof") == "" {
+ continue
+ }
+ spv := fv.Elem() // interface -> *T
+ sv := spv.Elem() // *T -> T
+ sf := sv.Type().Field(0) // StructField inside T
+ var prop Properties
+ prop.Init(sf.Type, "whatever", sf.Tag.Get("protobuf"), &sf)
+ n += prop.size(&prop, toStructPointer(spv))
+ }
+ }
+
return
}
diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/equal.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/equal.go
index d8673a3e97..5475c3d959 100644
--- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/equal.go
+++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/equal.go
@@ -154,6 +154,17 @@ func equalAny(v1, v2 reflect.Value) bool {
return v1.Float() == v2.Float()
case reflect.Int32, reflect.Int64:
return v1.Int() == v2.Int()
+ case reflect.Interface:
+ // Probably a oneof field; compare the inner values.
+ n1, n2 := v1.IsNil(), v2.IsNil()
+ if n1 || n2 {
+ return n1 == n2
+ }
+ e1, e2 := v1.Elem(), v2.Elem()
+ if e1.Type() != e2.Type() {
+ return false
+ }
+ return equalAny(e1, e2)
case reflect.Map:
if v1.Len() != v2.Len() {
return false
diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/equal_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/equal_test.go
deleted file mode 100644
index b322f65ab6..0000000000
--- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/equal_test.go
+++ /dev/null
@@ -1,191 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2011 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto_test
-
-import (
- "testing"
-
- . "github.com/golang/protobuf/proto"
- pb "github.com/golang/protobuf/proto/testdata"
-)
-
-// Four identical base messages.
-// The init function adds extensions to some of them.
-var messageWithoutExtension = &pb.MyMessage{Count: Int32(7)}
-var messageWithExtension1a = &pb.MyMessage{Count: Int32(7)}
-var messageWithExtension1b = &pb.MyMessage{Count: Int32(7)}
-var messageWithExtension2 = &pb.MyMessage{Count: Int32(7)}
-
-// Two messages with non-message extensions.
-var messageWithInt32Extension1 = &pb.MyMessage{Count: Int32(8)}
-var messageWithInt32Extension2 = &pb.MyMessage{Count: Int32(8)}
-
-func init() {
- ext1 := &pb.Ext{Data: String("Kirk")}
- ext2 := &pb.Ext{Data: String("Picard")}
-
- // messageWithExtension1a has ext1, but never marshals it.
- if err := SetExtension(messageWithExtension1a, pb.E_Ext_More, ext1); err != nil {
- panic("SetExtension on 1a failed: " + err.Error())
- }
-
- // messageWithExtension1b is the unmarshaled form of messageWithExtension1a.
- if err := SetExtension(messageWithExtension1b, pb.E_Ext_More, ext1); err != nil {
- panic("SetExtension on 1b failed: " + err.Error())
- }
- buf, err := Marshal(messageWithExtension1b)
- if err != nil {
- panic("Marshal of 1b failed: " + err.Error())
- }
- messageWithExtension1b.Reset()
- if err := Unmarshal(buf, messageWithExtension1b); err != nil {
- panic("Unmarshal of 1b failed: " + err.Error())
- }
-
- // messageWithExtension2 has ext2.
- if err := SetExtension(messageWithExtension2, pb.E_Ext_More, ext2); err != nil {
- panic("SetExtension on 2 failed: " + err.Error())
- }
-
- if err := SetExtension(messageWithInt32Extension1, pb.E_Ext_Number, Int32(23)); err != nil {
- panic("SetExtension on Int32-1 failed: " + err.Error())
- }
- if err := SetExtension(messageWithInt32Extension1, pb.E_Ext_Number, Int32(24)); err != nil {
- panic("SetExtension on Int32-2 failed: " + err.Error())
- }
-}
-
-var EqualTests = []struct {
- desc string
- a, b Message
- exp bool
-}{
- {"different types", &pb.GoEnum{}, &pb.GoTestField{}, false},
- {"equal empty", &pb.GoEnum{}, &pb.GoEnum{}, true},
- {"nil vs nil", nil, nil, true},
- {"typed nil vs typed nil", (*pb.GoEnum)(nil), (*pb.GoEnum)(nil), true},
- {"typed nil vs empty", (*pb.GoEnum)(nil), &pb.GoEnum{}, false},
- {"different typed nil", (*pb.GoEnum)(nil), (*pb.GoTestField)(nil), false},
-
- {"one set field, one unset field", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{}, false},
- {"one set field zero, one unset field", &pb.GoTest{Param: Int32(0)}, &pb.GoTest{}, false},
- {"different set fields", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{Label: String("bar")}, false},
- {"equal set", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{Label: String("foo")}, true},
-
- {"repeated, one set", &pb.GoTest{F_Int32Repeated: []int32{2, 3}}, &pb.GoTest{}, false},
- {"repeated, different length", &pb.GoTest{F_Int32Repeated: []int32{2, 3}}, &pb.GoTest{F_Int32Repeated: []int32{2}}, false},
- {"repeated, different value", &pb.GoTest{F_Int32Repeated: []int32{2}}, &pb.GoTest{F_Int32Repeated: []int32{3}}, false},
- {"repeated, equal", &pb.GoTest{F_Int32Repeated: []int32{2, 4}}, &pb.GoTest{F_Int32Repeated: []int32{2, 4}}, true},
- {"repeated, nil equal nil", &pb.GoTest{F_Int32Repeated: nil}, &pb.GoTest{F_Int32Repeated: nil}, true},
- {"repeated, nil equal empty", &pb.GoTest{F_Int32Repeated: nil}, &pb.GoTest{F_Int32Repeated: []int32{}}, true},
- {"repeated, empty equal nil", &pb.GoTest{F_Int32Repeated: []int32{}}, &pb.GoTest{F_Int32Repeated: nil}, true},
-
- {
- "nested, different",
- &pb.GoTest{RequiredField: &pb.GoTestField{Label: String("foo")}},
- &pb.GoTest{RequiredField: &pb.GoTestField{Label: String("bar")}},
- false,
- },
- {
- "nested, equal",
- &pb.GoTest{RequiredField: &pb.GoTestField{Label: String("wow")}},
- &pb.GoTest{RequiredField: &pb.GoTestField{Label: String("wow")}},
- true,
- },
-
- {"bytes", &pb.OtherMessage{Value: []byte("foo")}, &pb.OtherMessage{Value: []byte("foo")}, true},
- {"bytes, empty", &pb.OtherMessage{Value: []byte{}}, &pb.OtherMessage{Value: []byte{}}, true},
- {"bytes, empty vs nil", &pb.OtherMessage{Value: []byte{}}, &pb.OtherMessage{Value: nil}, false},
- {
- "repeated bytes",
- &pb.MyMessage{RepBytes: [][]byte{[]byte("sham"), []byte("wow")}},
- &pb.MyMessage{RepBytes: [][]byte{[]byte("sham"), []byte("wow")}},
- true,
- },
-
- {"extension vs. no extension", messageWithoutExtension, messageWithExtension1a, false},
- {"extension vs. same extension", messageWithExtension1a, messageWithExtension1b, true},
- {"extension vs. different extension", messageWithExtension1a, messageWithExtension2, false},
-
- {"int32 extension vs. itself", messageWithInt32Extension1, messageWithInt32Extension1, true},
- {"int32 extension vs. a different int32", messageWithInt32Extension1, messageWithInt32Extension2, false},
-
- {
- "message with group",
- &pb.MyMessage{
- Count: Int32(1),
- Somegroup: &pb.MyMessage_SomeGroup{
- GroupField: Int32(5),
- },
- },
- &pb.MyMessage{
- Count: Int32(1),
- Somegroup: &pb.MyMessage_SomeGroup{
- GroupField: Int32(5),
- },
- },
- true,
- },
-
- {
- "map same",
- &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}},
- &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}},
- true,
- },
- {
- "map different entry",
- &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}},
- &pb.MessageWithMap{NameMapping: map[int32]string{2: "Rob"}},
- false,
- },
- {
- "map different key only",
- &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}},
- &pb.MessageWithMap{NameMapping: map[int32]string{2: "Ken"}},
- false,
- },
- {
- "map different value only",
- &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}},
- &pb.MessageWithMap{NameMapping: map[int32]string{1: "Rob"}},
- false,
- },
-}
-
-func TestEqual(t *testing.T) {
- for _, tc := range EqualTests {
- if res := Equal(tc.a, tc.b); res != tc.exp {
- t.Errorf("%v: Equal(%v, %v) = %v, want %v", tc.desc, tc.a, tc.b, res, tc.exp)
- }
- }
-}
diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions.go
index 5f62dff247..e591ccef79 100644
--- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions.go
+++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions.go
@@ -222,7 +222,7 @@ func ClearExtension(pb extendableProto, extension *ExtensionDesc) {
}
// GetExtension parses and returns the given extension of pb.
-// If the extension is not present it returns ErrMissingExtension.
+// If the extension is not present and has no default value it returns ErrMissingExtension.
func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, error) {
if err := checkExtensionTypes(pb, extension); err != nil {
return nil, err
@@ -231,8 +231,11 @@ func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, er
emap := pb.ExtensionMap()
e, ok := emap[extension.Field]
if !ok {
- return nil, ErrMissingExtension
+ // defaultExtensionValue returns the default value or
+ // ErrMissingExtension if there is no default.
+ return defaultExtensionValue(extension)
}
+
if e.value != nil {
// Already decoded. Check the descriptor, though.
if e.desc != extension {
@@ -258,6 +261,41 @@ func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, er
return e.value, nil
}
+// defaultExtensionValue returns the default value for extension.
+// If no default for an extension is defined ErrMissingExtension is returned.
+func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
+ t := reflect.TypeOf(extension.ExtensionType)
+ props := extensionProperties(extension)
+
+ sf, _, err := fieldDefault(t, props)
+ if err != nil {
+ return nil, err
+ }
+
+ if sf == nil || sf.value == nil {
+ // There is no default value.
+ return nil, ErrMissingExtension
+ }
+
+ if t.Kind() != reflect.Ptr {
+ // We do not need to return a Ptr, we can directly return sf.value.
+ return sf.value, nil
+ }
+
+ // We need to return an interface{} that is a pointer to sf.value.
+ value := reflect.New(t).Elem()
+ value.Set(reflect.New(value.Type().Elem()))
+ if sf.kind == reflect.Int32 {
+ // We may have an int32 or an enum, but the underlying data is int32.
+ // Since we can't set an int32 into a non int32 reflect.value directly
+ // set it as a int32.
+ value.Elem().SetInt(int64(sf.value.(int32)))
+ } else {
+ value.Elem().Set(reflect.ValueOf(sf.value))
+ }
+ return value.Interface(), nil
+}
+
// decodeExtension decodes an extension encoded in b.
func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
o := NewBuffer(b)
diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions_test.go
deleted file mode 100644
index 92d13341d6..0000000000
--- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions_test.go
+++ /dev/null
@@ -1,153 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2014 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto_test
-
-import (
- "testing"
-
- "github.com/golang/protobuf/proto"
- pb "github.com/golang/protobuf/proto/testdata"
-)
-
-func TestGetExtensionsWithMissingExtensions(t *testing.T) {
- msg := &pb.MyMessage{}
- ext1 := &pb.Ext{}
- if err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil {
- t.Fatalf("Could not set ext1: %s", ext1)
- }
- exts, err := proto.GetExtensions(msg, []*proto.ExtensionDesc{
- pb.E_Ext_More,
- pb.E_Ext_Text,
- })
- if err != nil {
- t.Fatalf("GetExtensions() failed: %s", err)
- }
- if exts[0] != ext1 {
- t.Errorf("ext1 not in returned extensions: %T %v", exts[0], exts[0])
- }
- if exts[1] != nil {
- t.Errorf("ext2 in returned extensions: %T %v", exts[1], exts[1])
- }
-}
-
-func TestGetExtensionStability(t *testing.T) {
- check := func(m *pb.MyMessage) bool {
- ext1, err := proto.GetExtension(m, pb.E_Ext_More)
- if err != nil {
- t.Fatalf("GetExtension() failed: %s", err)
- }
- ext2, err := proto.GetExtension(m, pb.E_Ext_More)
- if err != nil {
- t.Fatalf("GetExtension() failed: %s", err)
- }
- return ext1 == ext2
- }
- msg := &pb.MyMessage{Count: proto.Int32(4)}
- ext0 := &pb.Ext{}
- if err := proto.SetExtension(msg, pb.E_Ext_More, ext0); err != nil {
- t.Fatalf("Could not set ext1: %s", ext0)
- }
- if !check(msg) {
- t.Errorf("GetExtension() not stable before marshaling")
- }
- bb, err := proto.Marshal(msg)
- if err != nil {
- t.Fatalf("Marshal() failed: %s", err)
- }
- msg1 := &pb.MyMessage{}
- err = proto.Unmarshal(bb, msg1)
- if err != nil {
- t.Fatalf("Unmarshal() failed: %s", err)
- }
- if !check(msg1) {
- t.Errorf("GetExtension() not stable after unmarshaling")
- }
-}
-
-func TestExtensionsRoundTrip(t *testing.T) {
- msg := &pb.MyMessage{}
- ext1 := &pb.Ext{
- Data: proto.String("hi"),
- }
- ext2 := &pb.Ext{
- Data: proto.String("there"),
- }
- exists := proto.HasExtension(msg, pb.E_Ext_More)
- if exists {
- t.Error("Extension More present unexpectedly")
- }
- if err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil {
- t.Error(err)
- }
- if err := proto.SetExtension(msg, pb.E_Ext_More, ext2); err != nil {
- t.Error(err)
- }
- e, err := proto.GetExtension(msg, pb.E_Ext_More)
- if err != nil {
- t.Error(err)
- }
- x, ok := e.(*pb.Ext)
- if !ok {
- t.Errorf("e has type %T, expected testdata.Ext", e)
- } else if *x.Data != "there" {
- t.Errorf("SetExtension failed to overwrite, got %+v, not 'there'", x)
- }
- proto.ClearExtension(msg, pb.E_Ext_More)
- if _, err = proto.GetExtension(msg, pb.E_Ext_More); err != proto.ErrMissingExtension {
- t.Errorf("got %v, expected ErrMissingExtension", e)
- }
- if _, err := proto.GetExtension(msg, pb.E_X215); err == nil {
- t.Error("expected bad extension error, got nil")
- }
- if err := proto.SetExtension(msg, pb.E_X215, 12); err == nil {
- t.Error("expected extension err")
- }
- if err := proto.SetExtension(msg, pb.E_Ext_More, 12); err == nil {
- t.Error("expected some sort of type mismatch error, got nil")
- }
-}
-
-func TestNilExtension(t *testing.T) {
- msg := &pb.MyMessage{
- Count: proto.Int32(1),
- }
- if err := proto.SetExtension(msg, pb.E_Ext_Text, proto.String("hello")); err != nil {
- t.Fatal(err)
- }
- if err := proto.SetExtension(msg, pb.E_Ext_More, (*pb.Ext)(nil)); err == nil {
- t.Error("expected SetExtension to fail due to a nil extension")
- } else if want := "proto: SetExtension called with nil value of type *testdata.Ext"; err.Error() != want {
- t.Errorf("expected error %v, got %v", want, err)
- }
- // Note: if the behavior of Marshal is ever changed to ignore nil extensions, update
- // this test to verify that E_Ext_Text is properly propagated through marshal->unmarshal.
-}
diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/lib.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/lib.go
index fb139f6b74..abbf7583ed 100644
--- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/lib.go
+++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/lib.go
@@ -30,179 +30,230 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/*
- Package proto converts data structures to and from the wire format of
- protocol buffers. It works in concert with the Go source code generated
- for .proto files by the protocol compiler.
+Package proto converts data structures to and from the wire format of
+protocol buffers. It works in concert with the Go source code generated
+for .proto files by the protocol compiler.
- A summary of the properties of the protocol buffer interface
- for a protocol buffer variable v:
+A summary of the properties of the protocol buffer interface
+for a protocol buffer variable v:
- - Names are turned from camel_case to CamelCase for export.
- - There are no methods on v to set fields; just treat
- them as structure fields.
- - There are getters that return a field's value if set,
- and return the field's default value if unset.
- The getters work even if the receiver is a nil message.
- - The zero value for a struct is its correct initialization state.
- All desired fields must be set before marshaling.
- - A Reset() method will restore a protobuf struct to its zero state.
- - Non-repeated fields are pointers to the values; nil means unset.
- That is, optional or required field int32 f becomes F *int32.
- - Repeated fields are slices.
- - Helper functions are available to aid the setting of fields.
- msg.Foo = proto.String("hello") // set field
- - Constants are defined to hold the default values of all fields that
- have them. They have the form Default_StructName_FieldName.
- Because the getter methods handle defaulted values,
- direct use of these constants should be rare.
- - Enums are given type names and maps from names to values.
- Enum values are prefixed by the enclosing message's name, or by the
- enum's type name if it is a top-level enum. Enum types have a String
- method, and a Enum method to assist in message construction.
- - Nested messages, groups and enums have type names prefixed with the name of
- the surrounding message type.
- - Extensions are given descriptor names that start with E_,
- followed by an underscore-delimited list of the nested messages
- that contain it (if any) followed by the CamelCased name of the
- extension field itself. HasExtension, ClearExtension, GetExtension
- and SetExtension are functions for manipulating extensions.
- - Marshal and Unmarshal are functions to encode and decode the wire format.
+ - Names are turned from camel_case to CamelCase for export.
+ - There are no methods on v to set fields; just treat
+ them as structure fields.
+ - There are getters that return a field's value if set,
+ and return the field's default value if unset.
+ The getters work even if the receiver is a nil message.
+ - The zero value for a struct is its correct initialization state.
+ All desired fields must be set before marshaling.
+ - A Reset() method will restore a protobuf struct to its zero state.
+ - Non-repeated fields are pointers to the values; nil means unset.
+ That is, optional or required field int32 f becomes F *int32.
+ - Repeated fields are slices.
+ - Helper functions are available to aid the setting of fields.
+ msg.Foo = proto.String("hello") // set field
+ - Constants are defined to hold the default values of all fields that
+ have them. They have the form Default_StructName_FieldName.
+ Because the getter methods handle defaulted values,
+ direct use of these constants should be rare.
+ - Enums are given type names and maps from names to values.
+ Enum values are prefixed by the enclosing message's name, or by the
+ enum's type name if it is a top-level enum. Enum types have a String
+ method, and a Enum method to assist in message construction.
+ - Nested messages, groups and enums have type names prefixed with the name of
+ the surrounding message type.
+ - Extensions are given descriptor names that start with E_,
+ followed by an underscore-delimited list of the nested messages
+ that contain it (if any) followed by the CamelCased name of the
+ extension field itself. HasExtension, ClearExtension, GetExtension
+ and SetExtension are functions for manipulating extensions.
+ - Oneof field sets are given a single field in their message,
+ with distinguished wrapper types for each possible field value.
+ - Marshal and Unmarshal are functions to encode and decode the wire format.
- The simplest way to describe this is to see an example.
- Given file test.proto, containing
+The simplest way to describe this is to see an example.
+Given file test.proto, containing
- package example;
+ package example;
- enum FOO { X = 17; }
+ enum FOO { X = 17; }
- message Test {
- required string label = 1;
- optional int32 type = 2 [default=77];
- repeated int64 reps = 3;
- optional group OptionalGroup = 4 {
- required string RequiredField = 5;
- }
+ message Test {
+ required string label = 1;
+ optional int32 type = 2 [default=77];
+ repeated int64 reps = 3;
+ optional group OptionalGroup = 4 {
+ required string RequiredField = 5;
+ }
+ oneof union {
+ int32 number = 6;
+ string name = 7;
+ }
+ }
+
+The resulting file, test.pb.go, is:
+
+ package example
+
+ import proto "github.com/golang/protobuf/proto"
+ import math "math"
+
+ type FOO int32
+ const (
+ FOO_X FOO = 17
+ )
+ var FOO_name = map[int32]string{
+ 17: "X",
+ }
+ var FOO_value = map[string]int32{
+ "X": 17,
+ }
+
+ func (x FOO) Enum() *FOO {
+ p := new(FOO)
+ *p = x
+ return p
+ }
+ func (x FOO) String() string {
+ return proto.EnumName(FOO_name, int32(x))
+ }
+ func (x *FOO) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(FOO_value, data)
+ if err != nil {
+ return err
}
+ *x = FOO(value)
+ return nil
+ }
- The resulting file, test.pb.go, is:
+ type Test struct {
+ Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
+ Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
+ Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
+ Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
+ // Types that are valid to be assigned to Union:
+ // *Test_Number
+ // *Test_Name
+ Union isTest_Union `protobuf_oneof:"union"`
+ XXX_unrecognized []byte `json:"-"`
+ }
+ func (m *Test) Reset() { *m = Test{} }
+ func (m *Test) String() string { return proto.CompactTextString(m) }
+ func (*Test) ProtoMessage() {}
- package example
+ type isTest_Union interface {
+ isTest_Union()
+ }
- import proto "github.com/golang/protobuf/proto"
- import math "math"
+ type Test_Number struct {
+ Number int32 `protobuf:"varint,6,opt,name=number"`
+ }
+ type Test_Name struct {
+ Name string `protobuf:"bytes,7,opt,name=name"`
+ }
- type FOO int32
- const (
- FOO_X FOO = 17
- )
- var FOO_name = map[int32]string{
- 17: "X",
+ func (*Test_Number) isTest_Union() {}
+ func (*Test_Name) isTest_Union() {}
+
+ func (m *Test) GetUnion() isTest_Union {
+ if m != nil {
+ return m.Union
}
- var FOO_value = map[string]int32{
- "X": 17,
+ return nil
+ }
+ const Default_Test_Type int32 = 77
+
+ func (m *Test) GetLabel() string {
+ if m != nil && m.Label != nil {
+ return *m.Label
}
+ return ""
+ }
- func (x FOO) Enum() *FOO {
- p := new(FOO)
- *p = x
- return p
+ func (m *Test) GetType() int32 {
+ if m != nil && m.Type != nil {
+ return *m.Type
}
- func (x FOO) String() string {
- return proto.EnumName(FOO_name, int32(x))
+ return Default_Test_Type
+ }
+
+ func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
+ if m != nil {
+ return m.Optionalgroup
}
- func (x *FOO) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(FOO_value, data)
- if err != nil {
- return err
- }
- *x = FOO(value)
- return nil
+ return nil
+ }
+
+ type Test_OptionalGroup struct {
+ RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
+ }
+ func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} }
+ func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
+
+ func (m *Test_OptionalGroup) GetRequiredField() string {
+ if m != nil && m.RequiredField != nil {
+ return *m.RequiredField
}
+ return ""
+ }
- type Test struct {
- Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
- Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
- Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
- Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ func (m *Test) GetNumber() int32 {
+ if x, ok := m.GetUnion().(*Test_Number); ok {
+ return x.Number
}
- func (m *Test) Reset() { *m = Test{} }
- func (m *Test) String() string { return proto.CompactTextString(m) }
- func (*Test) ProtoMessage() {}
- const Default_Test_Type int32 = 77
+ return 0
+ }
- func (m *Test) GetLabel() string {
- if m != nil && m.Label != nil {
- return *m.Label
- }
- return ""
+ func (m *Test) GetName() string {
+ if x, ok := m.GetUnion().(*Test_Name); ok {
+ return x.Name
}
+ return ""
+ }
- func (m *Test) GetType() int32 {
- if m != nil && m.Type != nil {
- return *m.Type
- }
- return Default_Test_Type
+ func init() {
+ proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
+ }
+
+To create and play with a Test object:
+
+package main
+
+ import (
+ "log"
+
+ "github.com/golang/protobuf/proto"
+ pb "./example.pb"
+ )
+
+ func main() {
+ test := &pb.Test{
+ Label: proto.String("hello"),
+ Type: proto.Int32(17),
+ Optionalgroup: &pb.Test_OptionalGroup{
+ RequiredField: proto.String("good bye"),
+ },
+ Union: &pb.Test_Name{"fred"},
}
-
- func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
- if m != nil {
- return m.Optionalgroup
- }
- return nil
+ data, err := proto.Marshal(test)
+ if err != nil {
+ log.Fatal("marshaling error: ", err)
}
-
- type Test_OptionalGroup struct {
- RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
+ newTest := &pb.Test{}
+ err = proto.Unmarshal(data, newTest)
+ if err != nil {
+ log.Fatal("unmarshaling error: ", err)
}
- func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} }
- func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
-
- func (m *Test_OptionalGroup) GetRequiredField() string {
- if m != nil && m.RequiredField != nil {
- return *m.RequiredField
- }
- return ""
+ // Now test and newTest contain the same data.
+ if test.GetLabel() != newTest.GetLabel() {
+ log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
}
-
- func init() {
- proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
- }
-
- To create and play with a Test object:
-
- package main
-
- import (
- "log"
-
- "github.com/golang/protobuf/proto"
- pb "./example.pb"
- )
-
- func main() {
- test := &pb.Test{
- Label: proto.String("hello"),
- Type: proto.Int32(17),
- Optionalgroup: &pb.Test_OptionalGroup{
- RequiredField: proto.String("good bye"),
- },
- }
- data, err := proto.Marshal(test)
- if err != nil {
- log.Fatal("marshaling error: ", err)
- }
- newTest := &pb.Test{}
- err = proto.Unmarshal(data, newTest)
- if err != nil {
- log.Fatal("unmarshaling error: ", err)
- }
- // Now test and newTest contain the same data.
- if test.GetLabel() != newTest.GetLabel() {
- log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
- }
- // etc.
+ // Use a type switch to determine which oneof was set.
+ switch u := test.Union.(type) {
+ case *pb.Test_Number: // u.Number contains the number.
+ case *pb.Test_Name: // u.Name contains the string.
}
+ // etc.
+ }
*/
package proto
@@ -211,6 +262,7 @@ import (
"fmt"
"log"
"reflect"
+ "sort"
"strconv"
"sync"
)
@@ -385,13 +437,13 @@ func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32,
// DebugPrint dumps the encoded data in b in a debugging format with a header
// including the string s. Used in testing but made available for general debugging.
-func (o *Buffer) DebugPrint(s string, b []byte) {
+func (p *Buffer) DebugPrint(s string, b []byte) {
var u uint64
- obuf := o.buf
- index := o.index
- o.buf = b
- o.index = 0
+ obuf := p.buf
+ index := p.index
+ p.buf = b
+ p.index = 0
depth := 0
fmt.Printf("\n--- %s ---\n", s)
@@ -402,12 +454,12 @@ out:
fmt.Print(" ")
}
- index := o.index
- if index == len(o.buf) {
+ index := p.index
+ if index == len(p.buf) {
break
}
- op, err := o.DecodeVarint()
+ op, err := p.DecodeVarint()
if err != nil {
fmt.Printf("%3d: fetching op err %v\n", index, err)
break out
@@ -424,7 +476,7 @@ out:
case WireBytes:
var r []byte
- r, err = o.DecodeRawBytes(false)
+ r, err = p.DecodeRawBytes(false)
if err != nil {
break out
}
@@ -445,7 +497,7 @@ out:
fmt.Printf("\n")
case WireFixed32:
- u, err = o.DecodeFixed32()
+ u, err = p.DecodeFixed32()
if err != nil {
fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
break out
@@ -453,16 +505,15 @@ out:
fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
case WireFixed64:
- u, err = o.DecodeFixed64()
+ u, err = p.DecodeFixed64()
if err != nil {
fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
break out
}
fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
- break
case WireVarint:
- u, err = o.DecodeVarint()
+ u, err = p.DecodeVarint()
if err != nil {
fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
break out
@@ -470,30 +521,22 @@ out:
fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
case WireStartGroup:
- if err != nil {
- fmt.Printf("%3d: t=%3d start err %v\n", index, tag, err)
- break out
- }
fmt.Printf("%3d: t=%3d start\n", index, tag)
depth++
case WireEndGroup:
depth--
- if err != nil {
- fmt.Printf("%3d: t=%3d end err %v\n", index, tag, err)
- break out
- }
fmt.Printf("%3d: t=%3d end\n", index, tag)
}
}
if depth != 0 {
- fmt.Printf("%3d: start-end not balanced %d\n", o.index, depth)
+ fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
}
fmt.Printf("\n")
- o.buf = obuf
- o.index = index
+ p.buf = obuf
+ p.index = index
}
// SetDefaults sets unset protocol buffer fields to their default values.
@@ -668,123 +711,173 @@ func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
}
ft := t.Field(fi).Type
- var canHaveDefault, nestedMessage bool
- switch ft.Kind() {
- case reflect.Ptr:
- if ft.Elem().Kind() == reflect.Struct {
- nestedMessage = true
- } else {
- canHaveDefault = true // proto2 scalar field
- }
-
- case reflect.Slice:
- switch ft.Elem().Kind() {
- case reflect.Ptr:
- nestedMessage = true // repeated message
- case reflect.Uint8:
- canHaveDefault = true // bytes field
- }
-
- case reflect.Map:
- if ft.Elem().Kind() == reflect.Ptr {
- nestedMessage = true // map with message values
- }
+ sf, nested, err := fieldDefault(ft, prop)
+ switch {
+ case err != nil:
+ log.Print(err)
+ case nested:
+ dm.nested = append(dm.nested, fi)
+ case sf != nil:
+ sf.index = fi
+ dm.scalars = append(dm.scalars, *sf)
}
-
- if !canHaveDefault {
- if nestedMessage {
- dm.nested = append(dm.nested, fi)
- }
- continue
- }
-
- sf := scalarField{
- index: fi,
- kind: ft.Elem().Kind(),
- }
-
- // scalar fields without defaults
- if !prop.HasDefault {
- dm.scalars = append(dm.scalars, sf)
- continue
- }
-
- // a scalar field: either *T or []byte
- switch ft.Elem().Kind() {
- case reflect.Bool:
- x, err := strconv.ParseBool(prop.Default)
- if err != nil {
- log.Printf("proto: bad default bool %q: %v", prop.Default, err)
- continue
- }
- sf.value = x
- case reflect.Float32:
- x, err := strconv.ParseFloat(prop.Default, 32)
- if err != nil {
- log.Printf("proto: bad default float32 %q: %v", prop.Default, err)
- continue
- }
- sf.value = float32(x)
- case reflect.Float64:
- x, err := strconv.ParseFloat(prop.Default, 64)
- if err != nil {
- log.Printf("proto: bad default float64 %q: %v", prop.Default, err)
- continue
- }
- sf.value = x
- case reflect.Int32:
- x, err := strconv.ParseInt(prop.Default, 10, 32)
- if err != nil {
- log.Printf("proto: bad default int32 %q: %v", prop.Default, err)
- continue
- }
- sf.value = int32(x)
- case reflect.Int64:
- x, err := strconv.ParseInt(prop.Default, 10, 64)
- if err != nil {
- log.Printf("proto: bad default int64 %q: %v", prop.Default, err)
- continue
- }
- sf.value = x
- case reflect.String:
- sf.value = prop.Default
- case reflect.Uint8:
- // []byte (not *uint8)
- sf.value = []byte(prop.Default)
- case reflect.Uint32:
- x, err := strconv.ParseUint(prop.Default, 10, 32)
- if err != nil {
- log.Printf("proto: bad default uint32 %q: %v", prop.Default, err)
- continue
- }
- sf.value = uint32(x)
- case reflect.Uint64:
- x, err := strconv.ParseUint(prop.Default, 10, 64)
- if err != nil {
- log.Printf("proto: bad default uint64 %q: %v", prop.Default, err)
- continue
- }
- sf.value = x
- default:
- log.Printf("proto: unhandled def kind %v", ft.Elem().Kind())
- continue
- }
-
- dm.scalars = append(dm.scalars, sf)
}
return dm
}
+// fieldDefault returns the scalarField for field type ft.
+// sf will be nil if the field can not have a default.
+// nestedMessage will be true if this is a nested message.
+// Note that sf.index is not set on return.
+func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
+ var canHaveDefault bool
+ switch ft.Kind() {
+ case reflect.Ptr:
+ if ft.Elem().Kind() == reflect.Struct {
+ nestedMessage = true
+ } else {
+ canHaveDefault = true // proto2 scalar field
+ }
+
+ case reflect.Slice:
+ switch ft.Elem().Kind() {
+ case reflect.Ptr:
+ nestedMessage = true // repeated message
+ case reflect.Uint8:
+ canHaveDefault = true // bytes field
+ }
+
+ case reflect.Map:
+ if ft.Elem().Kind() == reflect.Ptr {
+ nestedMessage = true // map with message values
+ }
+ }
+
+ if !canHaveDefault {
+ if nestedMessage {
+ return nil, true, nil
+ }
+ return nil, false, nil
+ }
+
+ // We now know that ft is a pointer or slice.
+ sf = &scalarField{kind: ft.Elem().Kind()}
+
+ // scalar fields without defaults
+ if !prop.HasDefault {
+ return sf, false, nil
+ }
+
+ // a scalar field: either *T or []byte
+ switch ft.Elem().Kind() {
+ case reflect.Bool:
+ x, err := strconv.ParseBool(prop.Default)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.Float32:
+ x, err := strconv.ParseFloat(prop.Default, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
+ }
+ sf.value = float32(x)
+ case reflect.Float64:
+ x, err := strconv.ParseFloat(prop.Default, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.Int32:
+ x, err := strconv.ParseInt(prop.Default, 10, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
+ }
+ sf.value = int32(x)
+ case reflect.Int64:
+ x, err := strconv.ParseInt(prop.Default, 10, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.String:
+ sf.value = prop.Default
+ case reflect.Uint8:
+ // []byte (not *uint8)
+ sf.value = []byte(prop.Default)
+ case reflect.Uint32:
+ x, err := strconv.ParseUint(prop.Default, 10, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
+ }
+ sf.value = uint32(x)
+ case reflect.Uint64:
+ x, err := strconv.ParseUint(prop.Default, 10, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ default:
+ return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
+ }
+
+ return sf, false, nil
+}
+
// Map fields may have key types of non-float scalars, strings and enums.
// The easiest way to sort them in some deterministic order is to use fmt.
// If this turns out to be inefficient we can always consider other options,
// such as doing a Schwartzian transform.
-type mapKeys []reflect.Value
+func mapKeys(vs []reflect.Value) sort.Interface {
+ s := mapKeySorter{
+ vs: vs,
+ // default Less function: textual comparison
+ less: func(a, b reflect.Value) bool {
+ return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface())
+ },
+ }
-func (s mapKeys) Len() int { return len(s) }
-func (s mapKeys) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-func (s mapKeys) Less(i, j int) bool {
- return fmt.Sprint(s[i].Interface()) < fmt.Sprint(s[j].Interface())
+ // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps;
+ // numeric keys are sorted numerically.
+ if len(vs) == 0 {
+ return s
+ }
+ switch vs[0].Kind() {
+ case reflect.Int32, reflect.Int64:
+ s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
+ case reflect.Uint32, reflect.Uint64:
+ s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
+ }
+
+ return s
+}
+
+type mapKeySorter struct {
+ vs []reflect.Value
+ less func(a, b reflect.Value) bool
+}
+
+func (s mapKeySorter) Len() int { return len(s.vs) }
+func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }
+func (s mapKeySorter) Less(i, j int) bool {
+ return s.less(s.vs[i], s.vs[j])
+}
+
+// isProto3Zero reports whether v is a zero proto3 value.
+func isProto3Zero(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint32, reflect.Uint64:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.String:
+ return v.String() == ""
+ }
+ return false
}
diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/message_set_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/message_set_test.go
deleted file mode 100644
index 7c29bccf4b..0000000000
--- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/message_set_test.go
+++ /dev/null
@@ -1,66 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2014 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-import (
- "bytes"
- "testing"
-)
-
-func TestUnmarshalMessageSetWithDuplicate(t *testing.T) {
- // Check that a repeated message set entry will be concatenated.
- in := &MessageSet{
- Item: []*_MessageSet_Item{
- {TypeId: Int32(12345), Message: []byte("hoo")},
- {TypeId: Int32(12345), Message: []byte("hah")},
- },
- }
- b, err := Marshal(in)
- if err != nil {
- t.Fatalf("Marshal: %v", err)
- }
- t.Logf("Marshaled bytes: %q", b)
-
- m := make(map[int32]Extension)
- if err := UnmarshalMessageSet(b, m); err != nil {
- t.Fatalf("UnmarshalMessageSet: %v", err)
- }
- ext, ok := m[12345]
- if !ok {
- t.Fatalf("Didn't retrieve extension 12345; map is %v", m)
- }
- // Skip wire type/field number and length varints.
- got := skipVarint(skipVarint(ext.enc))
- if want := []byte("hoohah"); !bytes.Equal(got, want) {
- t.Errorf("Combined extension is %q, want %q", got, want)
- }
-}
diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_reflect.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_reflect.go
index c68b12525c..749919d250 100644
--- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_reflect.go
+++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_reflect.go
@@ -144,8 +144,8 @@ func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
return structPointer_ifield(p, f).(*map[int32]Extension)
}
-// Map returns the reflect.Value for the address of a map field in the struct.
-func structPointer_Map(p structPointer, f field, typ reflect.Type) reflect.Value {
+// NewAt returns the reflect.Value for a pointer to a field in the struct.
+func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
return structPointer_field(p, f).Addr()
}
diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_unsafe.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_unsafe.go
index 48bc0fa482..e9be0fe92e 100644
--- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_unsafe.go
+++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_unsafe.go
@@ -130,8 +130,8 @@ func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
-// Map returns the reflect.Value for the address of a map field in the struct.
-func structPointer_Map(p structPointer, f field, typ reflect.Type) reflect.Value {
+// NewAt returns the reflect.Value for a pointer to a field in the struct.
+func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f)))
}
diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/properties.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/properties.go
index d74844ab2a..bbca4daf05 100644
--- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/properties.go
+++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/properties.go
@@ -84,6 +84,12 @@ type decoder func(p *Buffer, prop *Properties, base structPointer) error
// A valueDecoder decodes a single integer in a particular encoding.
type valueDecoder func(o *Buffer) (x uint64, err error)
+// A oneofMarshaler does the marshaling for all oneof fields in a message.
+type oneofMarshaler func(Message, *Buffer) error
+
+// A oneofUnmarshaler does the unmarshaling for a oneof field in a message.
+type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error)
+
// tagMap is an optimization over map[int]int for typical protocol buffer
// use-cases. Encoded protocol buffers are often in tag order with small tag
// numbers.
@@ -132,6 +138,21 @@ type StructProperties struct {
order []int // list of struct field numbers in tag order
unrecField field // field id of the XXX_unrecognized []byte field
extendable bool // is this an extendable proto
+
+ oneofMarshaler oneofMarshaler
+ oneofUnmarshaler oneofUnmarshaler
+ stype reflect.Type
+
+ // OneofTypes contains information about the oneof fields in this message.
+ // It is keyed by the original name of a field.
+ OneofTypes map[string]*OneofProperties
+}
+
+// OneofProperties represents information about a specific field in a oneof.
+type OneofProperties struct {
+ Type reflect.Type // pointer to generated struct type for this oneof field
+ Field int // struct field number of the containing oneof in the message
+ Prop *Properties
}
// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
@@ -156,6 +177,7 @@ type Properties struct {
Packed bool // relevant for repeated primitives only
Enum string // set for enum types only
proto3 bool // whether this is known to be a proto3 field; set for []byte only
+ oneof bool // whether this is a oneof field
Default string // default value
HasDefault bool // whether an explicit default was provided
@@ -208,6 +230,9 @@ func (p *Properties) String() string {
if p.proto3 {
s += ",proto3"
}
+ if p.oneof {
+ s += ",oneof"
+ }
if len(p.Enum) > 0 {
s += ",enum=" + p.Enum
}
@@ -284,6 +309,8 @@ func (p *Properties) Parse(s string) {
p.Enum = f[5:]
case f == "proto3":
p.proto3 = true
+ case f == "oneof":
+ p.oneof = true
case strings.HasPrefix(f, "def="):
p.HasDefault = true
p.Default = f[4:] // rest of string
@@ -665,6 +692,7 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
if f.Name == "XXX_unrecognized" { // special case
prop.unrecField = toField(&f)
}
+ oneof := f.Tag.Get("protobuf_oneof") != "" // special case
prop.Prop[i] = p
prop.order[i] = i
if debug {
@@ -674,7 +702,7 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
}
print("\n")
}
- if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") {
+ if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && !oneof {
fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]")
}
}
@@ -682,6 +710,41 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
// Re-order prop.order.
sort.Sort(prop)
+ type oneofMessage interface {
+ XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), []interface{})
+ }
+ if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
+ var oots []interface{}
+ prop.oneofMarshaler, prop.oneofUnmarshaler, oots = om.XXX_OneofFuncs()
+ prop.stype = t
+
+ // Interpret oneof metadata.
+ prop.OneofTypes = make(map[string]*OneofProperties)
+ for _, oot := range oots {
+ oop := &OneofProperties{
+ Type: reflect.ValueOf(oot).Type(), // *T
+ Prop: new(Properties),
+ }
+ sft := oop.Type.Elem().Field(0)
+ oop.Prop.Name = sft.Name
+ oop.Prop.Parse(sft.Tag.Get("protobuf"))
+ // There will be exactly one interface field that
+ // this new value is assignable to.
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ if f.Type.Kind() != reflect.Interface {
+ continue
+ }
+ if !oop.Type.AssignableTo(f.Type) {
+ continue
+ }
+ oop.Field = i
+ break
+ }
+ prop.OneofTypes[oop.Prop.OrigName] = oop
+ }
+ }
+
// build required counts
// build tags
reqCount := 0
diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go
new file mode 100644
index 0000000000..37c7782092
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go
@@ -0,0 +1,122 @@
+// Code generated by protoc-gen-go.
+// source: proto3_proto/proto3.proto
+// DO NOT EDIT!
+
+/*
+Package proto3_proto is a generated protocol buffer package.
+
+It is generated from these files:
+ proto3_proto/proto3.proto
+
+It has these top-level messages:
+ Message
+ Nested
+ MessageWithMap
+*/
+package proto3_proto
+
+import proto "github.com/golang/protobuf/proto"
+import testdata "github.com/golang/protobuf/proto/testdata"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+
+type Message_Humour int32
+
+const (
+ Message_UNKNOWN Message_Humour = 0
+ Message_PUNS Message_Humour = 1
+ Message_SLAPSTICK Message_Humour = 2
+ Message_BILL_BAILEY Message_Humour = 3
+)
+
+var Message_Humour_name = map[int32]string{
+ 0: "UNKNOWN",
+ 1: "PUNS",
+ 2: "SLAPSTICK",
+ 3: "BILL_BAILEY",
+}
+var Message_Humour_value = map[string]int32{
+ "UNKNOWN": 0,
+ "PUNS": 1,
+ "SLAPSTICK": 2,
+ "BILL_BAILEY": 3,
+}
+
+func (x Message_Humour) String() string {
+ return proto.EnumName(Message_Humour_name, int32(x))
+}
+
+type Message struct {
+ Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Hilarity Message_Humour `protobuf:"varint,2,opt,name=hilarity,enum=proto3_proto.Message_Humour" json:"hilarity,omitempty"`
+ HeightInCm uint32 `protobuf:"varint,3,opt,name=height_in_cm" json:"height_in_cm,omitempty"`
+ Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"`
+ ResultCount int64 `protobuf:"varint,7,opt,name=result_count" json:"result_count,omitempty"`
+ TrueScotsman bool `protobuf:"varint,8,opt,name=true_scotsman" json:"true_scotsman,omitempty"`
+ Score float32 `protobuf:"fixed32,9,opt,name=score" json:"score,omitempty"`
+ Key []uint64 `protobuf:"varint,5,rep,name=key" json:"key,omitempty"`
+ Nested *Nested `protobuf:"bytes,6,opt,name=nested" json:"nested,omitempty"`
+ Terrain map[string]*Nested `protobuf:"bytes,10,rep,name=terrain" json:"terrain,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ Proto2Field *testdata.SubDefaults `protobuf:"bytes,11,opt,name=proto2_field" json:"proto2_field,omitempty"`
+ Proto2Value map[string]*testdata.SubDefaults `protobuf:"bytes,13,rep,name=proto2_value" json:"proto2_value,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+}
+
+func (m *Message) Reset() { *m = Message{} }
+func (m *Message) String() string { return proto.CompactTextString(m) }
+func (*Message) ProtoMessage() {}
+
+func (m *Message) GetNested() *Nested {
+ if m != nil {
+ return m.Nested
+ }
+ return nil
+}
+
+func (m *Message) GetTerrain() map[string]*Nested {
+ if m != nil {
+ return m.Terrain
+ }
+ return nil
+}
+
+func (m *Message) GetProto2Field() *testdata.SubDefaults {
+ if m != nil {
+ return m.Proto2Field
+ }
+ return nil
+}
+
+func (m *Message) GetProto2Value() map[string]*testdata.SubDefaults {
+ if m != nil {
+ return m.Proto2Value
+ }
+ return nil
+}
+
+type Nested struct {
+ Bunny string `protobuf:"bytes,1,opt,name=bunny" json:"bunny,omitempty"`
+}
+
+func (m *Nested) Reset() { *m = Nested{} }
+func (m *Nested) String() string { return proto.CompactTextString(m) }
+func (*Nested) ProtoMessage() {}
+
+type MessageWithMap struct {
+ ByteMapping map[bool][]byte `protobuf:"bytes,1,rep,name=byte_mapping" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (m *MessageWithMap) Reset() { *m = MessageWithMap{} }
+func (m *MessageWithMap) String() string { return proto.CompactTextString(m) }
+func (*MessageWithMap) ProtoMessage() {}
+
+func (m *MessageWithMap) GetByteMapping() map[bool][]byte {
+ if m != nil {
+ return m.ByteMapping
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterEnum("proto3_proto.Message_Humour", Message_Humour_name, Message_Humour_value)
+}
diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_test.go
deleted file mode 100644
index 462f8055c3..0000000000
--- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_test.go
+++ /dev/null
@@ -1,125 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2014 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto_test
-
-import (
- "testing"
-
- "github.com/golang/protobuf/proto"
- pb "github.com/golang/protobuf/proto/proto3_proto"
- tpb "github.com/golang/protobuf/proto/testdata"
-)
-
-func TestProto3ZeroValues(t *testing.T) {
- tests := []struct {
- desc string
- m proto.Message
- }{
- {"zero message", &pb.Message{}},
- {"empty bytes field", &pb.Message{Data: []byte{}}},
- }
- for _, test := range tests {
- b, err := proto.Marshal(test.m)
- if err != nil {
- t.Errorf("%s: proto.Marshal: %v", test.desc, err)
- continue
- }
- if len(b) > 0 {
- t.Errorf("%s: Encoding is non-empty: %q", test.desc, b)
- }
- }
-}
-
-func TestRoundTripProto3(t *testing.T) {
- m := &pb.Message{
- Name: "David", // (2 | 1<<3): 0x0a 0x05 "David"
- Hilarity: pb.Message_PUNS, // (0 | 2<<3): 0x10 0x01
- HeightInCm: 178, // (0 | 3<<3): 0x18 0xb2 0x01
- Data: []byte("roboto"), // (2 | 4<<3): 0x20 0x06 "roboto"
- ResultCount: 47, // (0 | 7<<3): 0x38 0x2f
- TrueScotsman: true, // (0 | 8<<3): 0x40 0x01
- Score: 8.1, // (5 | 9<<3): 0x4d <8.1>
-
- Key: []uint64{1, 0xdeadbeef},
- Nested: &pb.Nested{
- Bunny: "Monty",
- },
- }
- t.Logf(" m: %v", m)
-
- b, err := proto.Marshal(m)
- if err != nil {
- t.Fatalf("proto.Marshal: %v", err)
- }
- t.Logf(" b: %q", b)
-
- m2 := new(pb.Message)
- if err := proto.Unmarshal(b, m2); err != nil {
- t.Fatalf("proto.Unmarshal: %v", err)
- }
- t.Logf("m2: %v", m2)
-
- if !proto.Equal(m, m2) {
- t.Errorf("proto.Equal returned false:\n m: %v\nm2: %v", m, m2)
- }
-}
-
-func TestProto3SetDefaults(t *testing.T) {
- in := &pb.Message{
- Terrain: map[string]*pb.Nested{
- "meadow": new(pb.Nested),
- },
- Proto2Field: new(tpb.SubDefaults),
- Proto2Value: map[string]*tpb.SubDefaults{
- "badlands": new(tpb.SubDefaults),
- },
- }
-
- got := proto.Clone(in).(*pb.Message)
- proto.SetDefaults(got)
-
- // There are no defaults in proto3. Everything should be the zero value, but
- // we need to remember to set defaults for nested proto2 messages.
- want := &pb.Message{
- Terrain: map[string]*pb.Nested{
- "meadow": new(pb.Nested),
- },
- Proto2Field: &tpb.SubDefaults{N: proto.Int64(7)},
- Proto2Value: map[string]*tpb.SubDefaults{
- "badlands": &tpb.SubDefaults{N: proto.Int64(7)},
- },
- }
-
- if !proto.Equal(got, want) {
- t.Errorf("with in = %v\nproto.SetDefaults(in) =>\ngot %v\nwant %v", in, got, want)
- }
-}
diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/size_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/size_test.go
deleted file mode 100644
index db5614fd1d..0000000000
--- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/size_test.go
+++ /dev/null
@@ -1,142 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2012 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto_test
-
-import (
- "log"
- "strings"
- "testing"
-
- . "github.com/golang/protobuf/proto"
- proto3pb "github.com/golang/protobuf/proto/proto3_proto"
- pb "github.com/golang/protobuf/proto/testdata"
-)
-
-var messageWithExtension1 = &pb.MyMessage{Count: Int32(7)}
-
-// messageWithExtension2 is in equal_test.go.
-var messageWithExtension3 = &pb.MyMessage{Count: Int32(8)}
-
-func init() {
- if err := SetExtension(messageWithExtension1, pb.E_Ext_More, &pb.Ext{Data: String("Abbott")}); err != nil {
- log.Panicf("SetExtension: %v", err)
- }
- if err := SetExtension(messageWithExtension3, pb.E_Ext_More, &pb.Ext{Data: String("Costello")}); err != nil {
- log.Panicf("SetExtension: %v", err)
- }
-
- // Force messageWithExtension3 to have the extension encoded.
- Marshal(messageWithExtension3)
-
-}
-
-var SizeTests = []struct {
- desc string
- pb Message
-}{
- {"empty", &pb.OtherMessage{}},
- // Basic types.
- {"bool", &pb.Defaults{F_Bool: Bool(true)}},
- {"int32", &pb.Defaults{F_Int32: Int32(12)}},
- {"negative int32", &pb.Defaults{F_Int32: Int32(-1)}},
- {"small int64", &pb.Defaults{F_Int64: Int64(1)}},
- {"big int64", &pb.Defaults{F_Int64: Int64(1 << 20)}},
- {"negative int64", &pb.Defaults{F_Int64: Int64(-1)}},
- {"fixed32", &pb.Defaults{F_Fixed32: Uint32(71)}},
- {"fixed64", &pb.Defaults{F_Fixed64: Uint64(72)}},
- {"uint32", &pb.Defaults{F_Uint32: Uint32(123)}},
- {"uint64", &pb.Defaults{F_Uint64: Uint64(124)}},
- {"float", &pb.Defaults{F_Float: Float32(12.6)}},
- {"double", &pb.Defaults{F_Double: Float64(13.9)}},
- {"string", &pb.Defaults{F_String: String("niles")}},
- {"bytes", &pb.Defaults{F_Bytes: []byte("wowsa")}},
- {"bytes, empty", &pb.Defaults{F_Bytes: []byte{}}},
- {"sint32", &pb.Defaults{F_Sint32: Int32(65)}},
- {"sint64", &pb.Defaults{F_Sint64: Int64(67)}},
- {"enum", &pb.Defaults{F_Enum: pb.Defaults_BLUE.Enum()}},
- // Repeated.
- {"empty repeated bool", &pb.MoreRepeated{Bools: []bool{}}},
- {"repeated bool", &pb.MoreRepeated{Bools: []bool{false, true, true, false}}},
- {"packed repeated bool", &pb.MoreRepeated{BoolsPacked: []bool{false, true, true, false, true, true, true}}},
- {"repeated int32", &pb.MoreRepeated{Ints: []int32{1, 12203, 1729, -1}}},
- {"repeated int32 packed", &pb.MoreRepeated{IntsPacked: []int32{1, 12203, 1729}}},
- {"repeated int64 packed", &pb.MoreRepeated{Int64SPacked: []int64{
- // Need enough large numbers to verify that the header is counting the number of bytes
- // for the field, not the number of elements.
- 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62,
- 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62,
- }}},
- {"repeated string", &pb.MoreRepeated{Strings: []string{"r", "ken", "gri"}}},
- {"repeated fixed", &pb.MoreRepeated{Fixeds: []uint32{1, 2, 3, 4}}},
- // Nested.
- {"nested", &pb.OldMessage{Nested: &pb.OldMessage_Nested{Name: String("whatever")}}},
- {"group", &pb.GroupOld{G: &pb.GroupOld_G{X: Int32(12345)}}},
- // Other things.
- {"unrecognized", &pb.MoreRepeated{XXX_unrecognized: []byte{13<<3 | 0, 4}}},
- {"extension (unencoded)", messageWithExtension1},
- {"extension (encoded)", messageWithExtension3},
- // proto3 message
- {"proto3 empty", &proto3pb.Message{}},
- {"proto3 bool", &proto3pb.Message{TrueScotsman: true}},
- {"proto3 int64", &proto3pb.Message{ResultCount: 1}},
- {"proto3 uint32", &proto3pb.Message{HeightInCm: 123}},
- {"proto3 float", &proto3pb.Message{Score: 12.6}},
- {"proto3 string", &proto3pb.Message{Name: "Snezana"}},
- {"proto3 bytes", &proto3pb.Message{Data: []byte("wowsa")}},
- {"proto3 bytes, empty", &proto3pb.Message{Data: []byte{}}},
- {"proto3 enum", &proto3pb.Message{Hilarity: proto3pb.Message_PUNS}},
- {"proto3 map field with empty bytes", &proto3pb.MessageWithMap{ByteMapping: map[bool][]byte{false: []byte{}}}},
-
- {"map field", &pb.MessageWithMap{NameMapping: map[int32]string{1: "Rob", 7: "Andrew"}}},
- {"map field with message", &pb.MessageWithMap{MsgMapping: map[int64]*pb.FloatingPoint{0x7001: &pb.FloatingPoint{F: Float64(2.0)}}}},
- {"map field with bytes", &pb.MessageWithMap{ByteMapping: map[bool][]byte{true: []byte("this time for sure")}}},
- {"map field with empty bytes", &pb.MessageWithMap{ByteMapping: map[bool][]byte{true: []byte{}}}},
-
- {"map field with big entry", &pb.MessageWithMap{NameMapping: map[int32]string{8: strings.Repeat("x", 125)}}},
- {"map field with big key and val", &pb.MessageWithMap{StrToStr: map[string]string{strings.Repeat("x", 70): strings.Repeat("y", 70)}}},
- {"map field with big numeric key", &pb.MessageWithMap{NameMapping: map[int32]string{0xf00d: "om nom nom"}}},
-}
-
-func TestSize(t *testing.T) {
- for _, tc := range SizeTests {
- size := Size(tc.pb)
- b, err := Marshal(tc.pb)
- if err != nil {
- t.Errorf("%v: Marshal failed: %v", tc.desc, err)
- continue
- }
- if size != len(b) {
- t.Errorf("%v: Size(%v) = %d, want %d", tc.desc, tc.pb, size, len(b))
- t.Logf("%v: bytes: %#v", tc.desc, b)
- }
- }
-}
diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/Makefile b/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/Makefile
deleted file mode 100644
index fc288628a7..0000000000
--- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/Makefile
+++ /dev/null
@@ -1,50 +0,0 @@
-# Go support for Protocol Buffers - Google's data interchange format
-#
-# Copyright 2010 The Go Authors. All rights reserved.
-# https://github.com/golang/protobuf
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-include ../../Make.protobuf
-
-all: regenerate
-
-regenerate:
- rm -f test.pb.go
- make test.pb.go
-
-# The following rules are just aids to development. Not needed for typical testing.
-
-diff: regenerate
- git diff test.pb.go
-
-restore:
- cp test.pb.go.golden test.pb.go
-
-preserve:
- cp test.pb.go test.pb.go.golden
diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/golden_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/golden_test.go
deleted file mode 100644
index 7172d0e969..0000000000
--- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/golden_test.go
+++ /dev/null
@@ -1,86 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2012 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Verify that the compiler output for test.proto is unchanged.
-
-package testdata
-
-import (
- "crypto/sha1"
- "fmt"
- "io/ioutil"
- "os"
- "os/exec"
- "path/filepath"
- "testing"
-)
-
-// sum returns in string form (for easy comparison) the SHA-1 hash of the named file.
-func sum(t *testing.T, name string) string {
- data, err := ioutil.ReadFile(name)
- if err != nil {
- t.Fatal(err)
- }
- t.Logf("sum(%q): length is %d", name, len(data))
- hash := sha1.New()
- _, err = hash.Write(data)
- if err != nil {
- t.Fatal(err)
- }
- return fmt.Sprintf("% x", hash.Sum(nil))
-}
-
-func run(t *testing.T, name string, args ...string) {
- cmd := exec.Command(name, args...)
- cmd.Stdin = os.Stdin
- cmd.Stdout = os.Stdout
- cmd.Stderr = os.Stderr
- err := cmd.Run()
- if err != nil {
- t.Fatal(err)
- }
-}
-
-func TestGolden(t *testing.T) {
- // Compute the original checksum.
- goldenSum := sum(t, "test.pb.go")
- // Run the proto compiler.
- run(t, "protoc", "--go_out="+os.TempDir(), "test.proto")
- newFile := filepath.Join(os.TempDir(), "test.pb.go")
- defer os.Remove(newFile)
- // Compute the new checksum.
- newSum := sum(t, newFile)
- // Verify
- if newSum != goldenSum {
- run(t, "diff", "-u", "test.pb.go", newFile)
- t.Fatal("Code generated by protoc-gen-go has changed; update test.pb.go")
- }
-}
diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.pb.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.pb.go
deleted file mode 100644
index 20a57e8af2..0000000000
--- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.pb.go
+++ /dev/null
@@ -1,2397 +0,0 @@
-// Code generated by protoc-gen-go.
-// source: test.proto
-// DO NOT EDIT!
-
-/*
-Package testdata is a generated protocol buffer package.
-
-It is generated from these files:
- test.proto
-
-It has these top-level messages:
- GoEnum
- GoTestField
- GoTest
- GoSkipTest
- NonPackedTest
- PackedTest
- MaxTag
- OldMessage
- NewMessage
- InnerMessage
- OtherMessage
- MyMessage
- Ext
- MyMessageSet
- Empty
- MessageList
- Strings
- Defaults
- SubDefaults
- RepeatedEnum
- MoreRepeated
- GroupOld
- GroupNew
- FloatingPoint
- MessageWithMap
-*/
-package testdata
-
-import proto "github.com/golang/protobuf/proto"
-import math "math"
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = math.Inf
-
-type FOO int32
-
-const (
- FOO_FOO1 FOO = 1
-)
-
-var FOO_name = map[int32]string{
- 1: "FOO1",
-}
-var FOO_value = map[string]int32{
- "FOO1": 1,
-}
-
-func (x FOO) Enum() *FOO {
- p := new(FOO)
- *p = x
- return p
-}
-func (x FOO) String() string {
- return proto.EnumName(FOO_name, int32(x))
-}
-func (x *FOO) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(FOO_value, data, "FOO")
- if err != nil {
- return err
- }
- *x = FOO(value)
- return nil
-}
-
-// An enum, for completeness.
-type GoTest_KIND int32
-
-const (
- GoTest_VOID GoTest_KIND = 0
- // Basic types
- GoTest_BOOL GoTest_KIND = 1
- GoTest_BYTES GoTest_KIND = 2
- GoTest_FINGERPRINT GoTest_KIND = 3
- GoTest_FLOAT GoTest_KIND = 4
- GoTest_INT GoTest_KIND = 5
- GoTest_STRING GoTest_KIND = 6
- GoTest_TIME GoTest_KIND = 7
- // Groupings
- GoTest_TUPLE GoTest_KIND = 8
- GoTest_ARRAY GoTest_KIND = 9
- GoTest_MAP GoTest_KIND = 10
- // Table types
- GoTest_TABLE GoTest_KIND = 11
- // Functions
- GoTest_FUNCTION GoTest_KIND = 12
-)
-
-var GoTest_KIND_name = map[int32]string{
- 0: "VOID",
- 1: "BOOL",
- 2: "BYTES",
- 3: "FINGERPRINT",
- 4: "FLOAT",
- 5: "INT",
- 6: "STRING",
- 7: "TIME",
- 8: "TUPLE",
- 9: "ARRAY",
- 10: "MAP",
- 11: "TABLE",
- 12: "FUNCTION",
-}
-var GoTest_KIND_value = map[string]int32{
- "VOID": 0,
- "BOOL": 1,
- "BYTES": 2,
- "FINGERPRINT": 3,
- "FLOAT": 4,
- "INT": 5,
- "STRING": 6,
- "TIME": 7,
- "TUPLE": 8,
- "ARRAY": 9,
- "MAP": 10,
- "TABLE": 11,
- "FUNCTION": 12,
-}
-
-func (x GoTest_KIND) Enum() *GoTest_KIND {
- p := new(GoTest_KIND)
- *p = x
- return p
-}
-func (x GoTest_KIND) String() string {
- return proto.EnumName(GoTest_KIND_name, int32(x))
-}
-func (x *GoTest_KIND) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(GoTest_KIND_value, data, "GoTest_KIND")
- if err != nil {
- return err
- }
- *x = GoTest_KIND(value)
- return nil
-}
-
-type MyMessage_Color int32
-
-const (
- MyMessage_RED MyMessage_Color = 0
- MyMessage_GREEN MyMessage_Color = 1
- MyMessage_BLUE MyMessage_Color = 2
-)
-
-var MyMessage_Color_name = map[int32]string{
- 0: "RED",
- 1: "GREEN",
- 2: "BLUE",
-}
-var MyMessage_Color_value = map[string]int32{
- "RED": 0,
- "GREEN": 1,
- "BLUE": 2,
-}
-
-func (x MyMessage_Color) Enum() *MyMessage_Color {
- p := new(MyMessage_Color)
- *p = x
- return p
-}
-func (x MyMessage_Color) String() string {
- return proto.EnumName(MyMessage_Color_name, int32(x))
-}
-func (x *MyMessage_Color) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(MyMessage_Color_value, data, "MyMessage_Color")
- if err != nil {
- return err
- }
- *x = MyMessage_Color(value)
- return nil
-}
-
-type Defaults_Color int32
-
-const (
- Defaults_RED Defaults_Color = 0
- Defaults_GREEN Defaults_Color = 1
- Defaults_BLUE Defaults_Color = 2
-)
-
-var Defaults_Color_name = map[int32]string{
- 0: "RED",
- 1: "GREEN",
- 2: "BLUE",
-}
-var Defaults_Color_value = map[string]int32{
- "RED": 0,
- "GREEN": 1,
- "BLUE": 2,
-}
-
-func (x Defaults_Color) Enum() *Defaults_Color {
- p := new(Defaults_Color)
- *p = x
- return p
-}
-func (x Defaults_Color) String() string {
- return proto.EnumName(Defaults_Color_name, int32(x))
-}
-func (x *Defaults_Color) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(Defaults_Color_value, data, "Defaults_Color")
- if err != nil {
- return err
- }
- *x = Defaults_Color(value)
- return nil
-}
-
-type RepeatedEnum_Color int32
-
-const (
- RepeatedEnum_RED RepeatedEnum_Color = 1
-)
-
-var RepeatedEnum_Color_name = map[int32]string{
- 1: "RED",
-}
-var RepeatedEnum_Color_value = map[string]int32{
- "RED": 1,
-}
-
-func (x RepeatedEnum_Color) Enum() *RepeatedEnum_Color {
- p := new(RepeatedEnum_Color)
- *p = x
- return p
-}
-func (x RepeatedEnum_Color) String() string {
- return proto.EnumName(RepeatedEnum_Color_name, int32(x))
-}
-func (x *RepeatedEnum_Color) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(RepeatedEnum_Color_value, data, "RepeatedEnum_Color")
- if err != nil {
- return err
- }
- *x = RepeatedEnum_Color(value)
- return nil
-}
-
-type GoEnum struct {
- Foo *FOO `protobuf:"varint,1,req,name=foo,enum=testdata.FOO" json:"foo,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *GoEnum) Reset() { *m = GoEnum{} }
-func (m *GoEnum) String() string { return proto.CompactTextString(m) }
-func (*GoEnum) ProtoMessage() {}
-
-func (m *GoEnum) GetFoo() FOO {
- if m != nil && m.Foo != nil {
- return *m.Foo
- }
- return FOO_FOO1
-}
-
-type GoTestField struct {
- Label *string `protobuf:"bytes,1,req" json:"Label,omitempty"`
- Type *string `protobuf:"bytes,2,req" json:"Type,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *GoTestField) Reset() { *m = GoTestField{} }
-func (m *GoTestField) String() string { return proto.CompactTextString(m) }
-func (*GoTestField) ProtoMessage() {}
-
-func (m *GoTestField) GetLabel() string {
- if m != nil && m.Label != nil {
- return *m.Label
- }
- return ""
-}
-
-func (m *GoTestField) GetType() string {
- if m != nil && m.Type != nil {
- return *m.Type
- }
- return ""
-}
-
-type GoTest struct {
- // Some typical parameters
- Kind *GoTest_KIND `protobuf:"varint,1,req,enum=testdata.GoTest_KIND" json:"Kind,omitempty"`
- Table *string `protobuf:"bytes,2,opt" json:"Table,omitempty"`
- Param *int32 `protobuf:"varint,3,opt" json:"Param,omitempty"`
- // Required, repeated and optional foreign fields.
- RequiredField *GoTestField `protobuf:"bytes,4,req" json:"RequiredField,omitempty"`
- RepeatedField []*GoTestField `protobuf:"bytes,5,rep" json:"RepeatedField,omitempty"`
- OptionalField *GoTestField `protobuf:"bytes,6,opt" json:"OptionalField,omitempty"`
- // Required fields of all basic types
- F_BoolRequired *bool `protobuf:"varint,10,req,name=F_Bool_required" json:"F_Bool_required,omitempty"`
- F_Int32Required *int32 `protobuf:"varint,11,req,name=F_Int32_required" json:"F_Int32_required,omitempty"`
- F_Int64Required *int64 `protobuf:"varint,12,req,name=F_Int64_required" json:"F_Int64_required,omitempty"`
- F_Fixed32Required *uint32 `protobuf:"fixed32,13,req,name=F_Fixed32_required" json:"F_Fixed32_required,omitempty"`
- F_Fixed64Required *uint64 `protobuf:"fixed64,14,req,name=F_Fixed64_required" json:"F_Fixed64_required,omitempty"`
- F_Uint32Required *uint32 `protobuf:"varint,15,req,name=F_Uint32_required" json:"F_Uint32_required,omitempty"`
- F_Uint64Required *uint64 `protobuf:"varint,16,req,name=F_Uint64_required" json:"F_Uint64_required,omitempty"`
- F_FloatRequired *float32 `protobuf:"fixed32,17,req,name=F_Float_required" json:"F_Float_required,omitempty"`
- F_DoubleRequired *float64 `protobuf:"fixed64,18,req,name=F_Double_required" json:"F_Double_required,omitempty"`
- F_StringRequired *string `protobuf:"bytes,19,req,name=F_String_required" json:"F_String_required,omitempty"`
- F_BytesRequired []byte `protobuf:"bytes,101,req,name=F_Bytes_required" json:"F_Bytes_required,omitempty"`
- F_Sint32Required *int32 `protobuf:"zigzag32,102,req,name=F_Sint32_required" json:"F_Sint32_required,omitempty"`
- F_Sint64Required *int64 `protobuf:"zigzag64,103,req,name=F_Sint64_required" json:"F_Sint64_required,omitempty"`
- // Repeated fields of all basic types
- F_BoolRepeated []bool `protobuf:"varint,20,rep,name=F_Bool_repeated" json:"F_Bool_repeated,omitempty"`
- F_Int32Repeated []int32 `protobuf:"varint,21,rep,name=F_Int32_repeated" json:"F_Int32_repeated,omitempty"`
- F_Int64Repeated []int64 `protobuf:"varint,22,rep,name=F_Int64_repeated" json:"F_Int64_repeated,omitempty"`
- F_Fixed32Repeated []uint32 `protobuf:"fixed32,23,rep,name=F_Fixed32_repeated" json:"F_Fixed32_repeated,omitempty"`
- F_Fixed64Repeated []uint64 `protobuf:"fixed64,24,rep,name=F_Fixed64_repeated" json:"F_Fixed64_repeated,omitempty"`
- F_Uint32Repeated []uint32 `protobuf:"varint,25,rep,name=F_Uint32_repeated" json:"F_Uint32_repeated,omitempty"`
- F_Uint64Repeated []uint64 `protobuf:"varint,26,rep,name=F_Uint64_repeated" json:"F_Uint64_repeated,omitempty"`
- F_FloatRepeated []float32 `protobuf:"fixed32,27,rep,name=F_Float_repeated" json:"F_Float_repeated,omitempty"`
- F_DoubleRepeated []float64 `protobuf:"fixed64,28,rep,name=F_Double_repeated" json:"F_Double_repeated,omitempty"`
- F_StringRepeated []string `protobuf:"bytes,29,rep,name=F_String_repeated" json:"F_String_repeated,omitempty"`
- F_BytesRepeated [][]byte `protobuf:"bytes,201,rep,name=F_Bytes_repeated" json:"F_Bytes_repeated,omitempty"`
- F_Sint32Repeated []int32 `protobuf:"zigzag32,202,rep,name=F_Sint32_repeated" json:"F_Sint32_repeated,omitempty"`
- F_Sint64Repeated []int64 `protobuf:"zigzag64,203,rep,name=F_Sint64_repeated" json:"F_Sint64_repeated,omitempty"`
- // Optional fields of all basic types
- F_BoolOptional *bool `protobuf:"varint,30,opt,name=F_Bool_optional" json:"F_Bool_optional,omitempty"`
- F_Int32Optional *int32 `protobuf:"varint,31,opt,name=F_Int32_optional" json:"F_Int32_optional,omitempty"`
- F_Int64Optional *int64 `protobuf:"varint,32,opt,name=F_Int64_optional" json:"F_Int64_optional,omitempty"`
- F_Fixed32Optional *uint32 `protobuf:"fixed32,33,opt,name=F_Fixed32_optional" json:"F_Fixed32_optional,omitempty"`
- F_Fixed64Optional *uint64 `protobuf:"fixed64,34,opt,name=F_Fixed64_optional" json:"F_Fixed64_optional,omitempty"`
- F_Uint32Optional *uint32 `protobuf:"varint,35,opt,name=F_Uint32_optional" json:"F_Uint32_optional,omitempty"`
- F_Uint64Optional *uint64 `protobuf:"varint,36,opt,name=F_Uint64_optional" json:"F_Uint64_optional,omitempty"`
- F_FloatOptional *float32 `protobuf:"fixed32,37,opt,name=F_Float_optional" json:"F_Float_optional,omitempty"`
- F_DoubleOptional *float64 `protobuf:"fixed64,38,opt,name=F_Double_optional" json:"F_Double_optional,omitempty"`
- F_StringOptional *string `protobuf:"bytes,39,opt,name=F_String_optional" json:"F_String_optional,omitempty"`
- F_BytesOptional []byte `protobuf:"bytes,301,opt,name=F_Bytes_optional" json:"F_Bytes_optional,omitempty"`
- F_Sint32Optional *int32 `protobuf:"zigzag32,302,opt,name=F_Sint32_optional" json:"F_Sint32_optional,omitempty"`
- F_Sint64Optional *int64 `protobuf:"zigzag64,303,opt,name=F_Sint64_optional" json:"F_Sint64_optional,omitempty"`
- // Default-valued fields of all basic types
- F_BoolDefaulted *bool `protobuf:"varint,40,opt,name=F_Bool_defaulted,def=1" json:"F_Bool_defaulted,omitempty"`
- F_Int32Defaulted *int32 `protobuf:"varint,41,opt,name=F_Int32_defaulted,def=32" json:"F_Int32_defaulted,omitempty"`
- F_Int64Defaulted *int64 `protobuf:"varint,42,opt,name=F_Int64_defaulted,def=64" json:"F_Int64_defaulted,omitempty"`
- F_Fixed32Defaulted *uint32 `protobuf:"fixed32,43,opt,name=F_Fixed32_defaulted,def=320" json:"F_Fixed32_defaulted,omitempty"`
- F_Fixed64Defaulted *uint64 `protobuf:"fixed64,44,opt,name=F_Fixed64_defaulted,def=640" json:"F_Fixed64_defaulted,omitempty"`
- F_Uint32Defaulted *uint32 `protobuf:"varint,45,opt,name=F_Uint32_defaulted,def=3200" json:"F_Uint32_defaulted,omitempty"`
- F_Uint64Defaulted *uint64 `protobuf:"varint,46,opt,name=F_Uint64_defaulted,def=6400" json:"F_Uint64_defaulted,omitempty"`
- F_FloatDefaulted *float32 `protobuf:"fixed32,47,opt,name=F_Float_defaulted,def=314159" json:"F_Float_defaulted,omitempty"`
- F_DoubleDefaulted *float64 `protobuf:"fixed64,48,opt,name=F_Double_defaulted,def=271828" json:"F_Double_defaulted,omitempty"`
- F_StringDefaulted *string `protobuf:"bytes,49,opt,name=F_String_defaulted,def=hello, \"world!\"\n" json:"F_String_defaulted,omitempty"`
- F_BytesDefaulted []byte `protobuf:"bytes,401,opt,name=F_Bytes_defaulted,def=Bignose" json:"F_Bytes_defaulted,omitempty"`
- F_Sint32Defaulted *int32 `protobuf:"zigzag32,402,opt,name=F_Sint32_defaulted,def=-32" json:"F_Sint32_defaulted,omitempty"`
- F_Sint64Defaulted *int64 `protobuf:"zigzag64,403,opt,name=F_Sint64_defaulted,def=-64" json:"F_Sint64_defaulted,omitempty"`
- // Packed repeated fields (no string or bytes).
- F_BoolRepeatedPacked []bool `protobuf:"varint,50,rep,packed,name=F_Bool_repeated_packed" json:"F_Bool_repeated_packed,omitempty"`
- F_Int32RepeatedPacked []int32 `protobuf:"varint,51,rep,packed,name=F_Int32_repeated_packed" json:"F_Int32_repeated_packed,omitempty"`
- F_Int64RepeatedPacked []int64 `protobuf:"varint,52,rep,packed,name=F_Int64_repeated_packed" json:"F_Int64_repeated_packed,omitempty"`
- F_Fixed32RepeatedPacked []uint32 `protobuf:"fixed32,53,rep,packed,name=F_Fixed32_repeated_packed" json:"F_Fixed32_repeated_packed,omitempty"`
- F_Fixed64RepeatedPacked []uint64 `protobuf:"fixed64,54,rep,packed,name=F_Fixed64_repeated_packed" json:"F_Fixed64_repeated_packed,omitempty"`
- F_Uint32RepeatedPacked []uint32 `protobuf:"varint,55,rep,packed,name=F_Uint32_repeated_packed" json:"F_Uint32_repeated_packed,omitempty"`
- F_Uint64RepeatedPacked []uint64 `protobuf:"varint,56,rep,packed,name=F_Uint64_repeated_packed" json:"F_Uint64_repeated_packed,omitempty"`
- F_FloatRepeatedPacked []float32 `protobuf:"fixed32,57,rep,packed,name=F_Float_repeated_packed" json:"F_Float_repeated_packed,omitempty"`
- F_DoubleRepeatedPacked []float64 `protobuf:"fixed64,58,rep,packed,name=F_Double_repeated_packed" json:"F_Double_repeated_packed,omitempty"`
- F_Sint32RepeatedPacked []int32 `protobuf:"zigzag32,502,rep,packed,name=F_Sint32_repeated_packed" json:"F_Sint32_repeated_packed,omitempty"`
- F_Sint64RepeatedPacked []int64 `protobuf:"zigzag64,503,rep,packed,name=F_Sint64_repeated_packed" json:"F_Sint64_repeated_packed,omitempty"`
- Requiredgroup *GoTest_RequiredGroup `protobuf:"group,70,req,name=RequiredGroup" json:"requiredgroup,omitempty"`
- Repeatedgroup []*GoTest_RepeatedGroup `protobuf:"group,80,rep,name=RepeatedGroup" json:"repeatedgroup,omitempty"`
- Optionalgroup *GoTest_OptionalGroup `protobuf:"group,90,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *GoTest) Reset() { *m = GoTest{} }
-func (m *GoTest) String() string { return proto.CompactTextString(m) }
-func (*GoTest) ProtoMessage() {}
-
-const Default_GoTest_F_BoolDefaulted bool = true
-const Default_GoTest_F_Int32Defaulted int32 = 32
-const Default_GoTest_F_Int64Defaulted int64 = 64
-const Default_GoTest_F_Fixed32Defaulted uint32 = 320
-const Default_GoTest_F_Fixed64Defaulted uint64 = 640
-const Default_GoTest_F_Uint32Defaulted uint32 = 3200
-const Default_GoTest_F_Uint64Defaulted uint64 = 6400
-const Default_GoTest_F_FloatDefaulted float32 = 314159
-const Default_GoTest_F_DoubleDefaulted float64 = 271828
-const Default_GoTest_F_StringDefaulted string = "hello, \"world!\"\n"
-
-var Default_GoTest_F_BytesDefaulted []byte = []byte("Bignose")
-
-const Default_GoTest_F_Sint32Defaulted int32 = -32
-const Default_GoTest_F_Sint64Defaulted int64 = -64
-
-func (m *GoTest) GetKind() GoTest_KIND {
- if m != nil && m.Kind != nil {
- return *m.Kind
- }
- return GoTest_VOID
-}
-
-func (m *GoTest) GetTable() string {
- if m != nil && m.Table != nil {
- return *m.Table
- }
- return ""
-}
-
-func (m *GoTest) GetParam() int32 {
- if m != nil && m.Param != nil {
- return *m.Param
- }
- return 0
-}
-
-func (m *GoTest) GetRequiredField() *GoTestField {
- if m != nil {
- return m.RequiredField
- }
- return nil
-}
-
-func (m *GoTest) GetRepeatedField() []*GoTestField {
- if m != nil {
- return m.RepeatedField
- }
- return nil
-}
-
-func (m *GoTest) GetOptionalField() *GoTestField {
- if m != nil {
- return m.OptionalField
- }
- return nil
-}
-
-func (m *GoTest) GetF_BoolRequired() bool {
- if m != nil && m.F_BoolRequired != nil {
- return *m.F_BoolRequired
- }
- return false
-}
-
-func (m *GoTest) GetF_Int32Required() int32 {
- if m != nil && m.F_Int32Required != nil {
- return *m.F_Int32Required
- }
- return 0
-}
-
-func (m *GoTest) GetF_Int64Required() int64 {
- if m != nil && m.F_Int64Required != nil {
- return *m.F_Int64Required
- }
- return 0
-}
-
-func (m *GoTest) GetF_Fixed32Required() uint32 {
- if m != nil && m.F_Fixed32Required != nil {
- return *m.F_Fixed32Required
- }
- return 0
-}
-
-func (m *GoTest) GetF_Fixed64Required() uint64 {
- if m != nil && m.F_Fixed64Required != nil {
- return *m.F_Fixed64Required
- }
- return 0
-}
-
-func (m *GoTest) GetF_Uint32Required() uint32 {
- if m != nil && m.F_Uint32Required != nil {
- return *m.F_Uint32Required
- }
- return 0
-}
-
-func (m *GoTest) GetF_Uint64Required() uint64 {
- if m != nil && m.F_Uint64Required != nil {
- return *m.F_Uint64Required
- }
- return 0
-}
-
-func (m *GoTest) GetF_FloatRequired() float32 {
- if m != nil && m.F_FloatRequired != nil {
- return *m.F_FloatRequired
- }
- return 0
-}
-
-func (m *GoTest) GetF_DoubleRequired() float64 {
- if m != nil && m.F_DoubleRequired != nil {
- return *m.F_DoubleRequired
- }
- return 0
-}
-
-func (m *GoTest) GetF_StringRequired() string {
- if m != nil && m.F_StringRequired != nil {
- return *m.F_StringRequired
- }
- return ""
-}
-
-func (m *GoTest) GetF_BytesRequired() []byte {
- if m != nil {
- return m.F_BytesRequired
- }
- return nil
-}
-
-func (m *GoTest) GetF_Sint32Required() int32 {
- if m != nil && m.F_Sint32Required != nil {
- return *m.F_Sint32Required
- }
- return 0
-}
-
-func (m *GoTest) GetF_Sint64Required() int64 {
- if m != nil && m.F_Sint64Required != nil {
- return *m.F_Sint64Required
- }
- return 0
-}
-
-func (m *GoTest) GetF_BoolRepeated() []bool {
- if m != nil {
- return m.F_BoolRepeated
- }
- return nil
-}
-
-func (m *GoTest) GetF_Int32Repeated() []int32 {
- if m != nil {
- return m.F_Int32Repeated
- }
- return nil
-}
-
-func (m *GoTest) GetF_Int64Repeated() []int64 {
- if m != nil {
- return m.F_Int64Repeated
- }
- return nil
-}
-
-func (m *GoTest) GetF_Fixed32Repeated() []uint32 {
- if m != nil {
- return m.F_Fixed32Repeated
- }
- return nil
-}
-
-func (m *GoTest) GetF_Fixed64Repeated() []uint64 {
- if m != nil {
- return m.F_Fixed64Repeated
- }
- return nil
-}
-
-func (m *GoTest) GetF_Uint32Repeated() []uint32 {
- if m != nil {
- return m.F_Uint32Repeated
- }
- return nil
-}
-
-func (m *GoTest) GetF_Uint64Repeated() []uint64 {
- if m != nil {
- return m.F_Uint64Repeated
- }
- return nil
-}
-
-func (m *GoTest) GetF_FloatRepeated() []float32 {
- if m != nil {
- return m.F_FloatRepeated
- }
- return nil
-}
-
-func (m *GoTest) GetF_DoubleRepeated() []float64 {
- if m != nil {
- return m.F_DoubleRepeated
- }
- return nil
-}
-
-func (m *GoTest) GetF_StringRepeated() []string {
- if m != nil {
- return m.F_StringRepeated
- }
- return nil
-}
-
-func (m *GoTest) GetF_BytesRepeated() [][]byte {
- if m != nil {
- return m.F_BytesRepeated
- }
- return nil
-}
-
-func (m *GoTest) GetF_Sint32Repeated() []int32 {
- if m != nil {
- return m.F_Sint32Repeated
- }
- return nil
-}
-
-func (m *GoTest) GetF_Sint64Repeated() []int64 {
- if m != nil {
- return m.F_Sint64Repeated
- }
- return nil
-}
-
-func (m *GoTest) GetF_BoolOptional() bool {
- if m != nil && m.F_BoolOptional != nil {
- return *m.F_BoolOptional
- }
- return false
-}
-
-func (m *GoTest) GetF_Int32Optional() int32 {
- if m != nil && m.F_Int32Optional != nil {
- return *m.F_Int32Optional
- }
- return 0
-}
-
-func (m *GoTest) GetF_Int64Optional() int64 {
- if m != nil && m.F_Int64Optional != nil {
- return *m.F_Int64Optional
- }
- return 0
-}
-
-func (m *GoTest) GetF_Fixed32Optional() uint32 {
- if m != nil && m.F_Fixed32Optional != nil {
- return *m.F_Fixed32Optional
- }
- return 0
-}
-
-func (m *GoTest) GetF_Fixed64Optional() uint64 {
- if m != nil && m.F_Fixed64Optional != nil {
- return *m.F_Fixed64Optional
- }
- return 0
-}
-
-func (m *GoTest) GetF_Uint32Optional() uint32 {
- if m != nil && m.F_Uint32Optional != nil {
- return *m.F_Uint32Optional
- }
- return 0
-}
-
-func (m *GoTest) GetF_Uint64Optional() uint64 {
- if m != nil && m.F_Uint64Optional != nil {
- return *m.F_Uint64Optional
- }
- return 0
-}
-
-func (m *GoTest) GetF_FloatOptional() float32 {
- if m != nil && m.F_FloatOptional != nil {
- return *m.F_FloatOptional
- }
- return 0
-}
-
-func (m *GoTest) GetF_DoubleOptional() float64 {
- if m != nil && m.F_DoubleOptional != nil {
- return *m.F_DoubleOptional
- }
- return 0
-}
-
-func (m *GoTest) GetF_StringOptional() string {
- if m != nil && m.F_StringOptional != nil {
- return *m.F_StringOptional
- }
- return ""
-}
-
-func (m *GoTest) GetF_BytesOptional() []byte {
- if m != nil {
- return m.F_BytesOptional
- }
- return nil
-}
-
-func (m *GoTest) GetF_Sint32Optional() int32 {
- if m != nil && m.F_Sint32Optional != nil {
- return *m.F_Sint32Optional
- }
- return 0
-}
-
-func (m *GoTest) GetF_Sint64Optional() int64 {
- if m != nil && m.F_Sint64Optional != nil {
- return *m.F_Sint64Optional
- }
- return 0
-}
-
-func (m *GoTest) GetF_BoolDefaulted() bool {
- if m != nil && m.F_BoolDefaulted != nil {
- return *m.F_BoolDefaulted
- }
- return Default_GoTest_F_BoolDefaulted
-}
-
-func (m *GoTest) GetF_Int32Defaulted() int32 {
- if m != nil && m.F_Int32Defaulted != nil {
- return *m.F_Int32Defaulted
- }
- return Default_GoTest_F_Int32Defaulted
-}
-
-func (m *GoTest) GetF_Int64Defaulted() int64 {
- if m != nil && m.F_Int64Defaulted != nil {
- return *m.F_Int64Defaulted
- }
- return Default_GoTest_F_Int64Defaulted
-}
-
-func (m *GoTest) GetF_Fixed32Defaulted() uint32 {
- if m != nil && m.F_Fixed32Defaulted != nil {
- return *m.F_Fixed32Defaulted
- }
- return Default_GoTest_F_Fixed32Defaulted
-}
-
-func (m *GoTest) GetF_Fixed64Defaulted() uint64 {
- if m != nil && m.F_Fixed64Defaulted != nil {
- return *m.F_Fixed64Defaulted
- }
- return Default_GoTest_F_Fixed64Defaulted
-}
-
-func (m *GoTest) GetF_Uint32Defaulted() uint32 {
- if m != nil && m.F_Uint32Defaulted != nil {
- return *m.F_Uint32Defaulted
- }
- return Default_GoTest_F_Uint32Defaulted
-}
-
-func (m *GoTest) GetF_Uint64Defaulted() uint64 {
- if m != nil && m.F_Uint64Defaulted != nil {
- return *m.F_Uint64Defaulted
- }
- return Default_GoTest_F_Uint64Defaulted
-}
-
-func (m *GoTest) GetF_FloatDefaulted() float32 {
- if m != nil && m.F_FloatDefaulted != nil {
- return *m.F_FloatDefaulted
- }
- return Default_GoTest_F_FloatDefaulted
-}
-
-func (m *GoTest) GetF_DoubleDefaulted() float64 {
- if m != nil && m.F_DoubleDefaulted != nil {
- return *m.F_DoubleDefaulted
- }
- return Default_GoTest_F_DoubleDefaulted
-}
-
-func (m *GoTest) GetF_StringDefaulted() string {
- if m != nil && m.F_StringDefaulted != nil {
- return *m.F_StringDefaulted
- }
- return Default_GoTest_F_StringDefaulted
-}
-
-func (m *GoTest) GetF_BytesDefaulted() []byte {
- if m != nil && m.F_BytesDefaulted != nil {
- return m.F_BytesDefaulted
- }
- return append([]byte(nil), Default_GoTest_F_BytesDefaulted...)
-}
-
-func (m *GoTest) GetF_Sint32Defaulted() int32 {
- if m != nil && m.F_Sint32Defaulted != nil {
- return *m.F_Sint32Defaulted
- }
- return Default_GoTest_F_Sint32Defaulted
-}
-
-func (m *GoTest) GetF_Sint64Defaulted() int64 {
- if m != nil && m.F_Sint64Defaulted != nil {
- return *m.F_Sint64Defaulted
- }
- return Default_GoTest_F_Sint64Defaulted
-}
-
-func (m *GoTest) GetF_BoolRepeatedPacked() []bool {
- if m != nil {
- return m.F_BoolRepeatedPacked
- }
- return nil
-}
-
-func (m *GoTest) GetF_Int32RepeatedPacked() []int32 {
- if m != nil {
- return m.F_Int32RepeatedPacked
- }
- return nil
-}
-
-func (m *GoTest) GetF_Int64RepeatedPacked() []int64 {
- if m != nil {
- return m.F_Int64RepeatedPacked
- }
- return nil
-}
-
-func (m *GoTest) GetF_Fixed32RepeatedPacked() []uint32 {
- if m != nil {
- return m.F_Fixed32RepeatedPacked
- }
- return nil
-}
-
-func (m *GoTest) GetF_Fixed64RepeatedPacked() []uint64 {
- if m != nil {
- return m.F_Fixed64RepeatedPacked
- }
- return nil
-}
-
-func (m *GoTest) GetF_Uint32RepeatedPacked() []uint32 {
- if m != nil {
- return m.F_Uint32RepeatedPacked
- }
- return nil
-}
-
-func (m *GoTest) GetF_Uint64RepeatedPacked() []uint64 {
- if m != nil {
- return m.F_Uint64RepeatedPacked
- }
- return nil
-}
-
-func (m *GoTest) GetF_FloatRepeatedPacked() []float32 {
- if m != nil {
- return m.F_FloatRepeatedPacked
- }
- return nil
-}
-
-func (m *GoTest) GetF_DoubleRepeatedPacked() []float64 {
- if m != nil {
- return m.F_DoubleRepeatedPacked
- }
- return nil
-}
-
-func (m *GoTest) GetF_Sint32RepeatedPacked() []int32 {
- if m != nil {
- return m.F_Sint32RepeatedPacked
- }
- return nil
-}
-
-func (m *GoTest) GetF_Sint64RepeatedPacked() []int64 {
- if m != nil {
- return m.F_Sint64RepeatedPacked
- }
- return nil
-}
-
-func (m *GoTest) GetRequiredgroup() *GoTest_RequiredGroup {
- if m != nil {
- return m.Requiredgroup
- }
- return nil
-}
-
-func (m *GoTest) GetRepeatedgroup() []*GoTest_RepeatedGroup {
- if m != nil {
- return m.Repeatedgroup
- }
- return nil
-}
-
-func (m *GoTest) GetOptionalgroup() *GoTest_OptionalGroup {
- if m != nil {
- return m.Optionalgroup
- }
- return nil
-}
-
-// Required, repeated, and optional groups.
-type GoTest_RequiredGroup struct {
- RequiredField *string `protobuf:"bytes,71,req" json:"RequiredField,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *GoTest_RequiredGroup) Reset() { *m = GoTest_RequiredGroup{} }
-func (m *GoTest_RequiredGroup) String() string { return proto.CompactTextString(m) }
-func (*GoTest_RequiredGroup) ProtoMessage() {}
-
-func (m *GoTest_RequiredGroup) GetRequiredField() string {
- if m != nil && m.RequiredField != nil {
- return *m.RequiredField
- }
- return ""
-}
-
-type GoTest_RepeatedGroup struct {
- RequiredField *string `protobuf:"bytes,81,req" json:"RequiredField,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *GoTest_RepeatedGroup) Reset() { *m = GoTest_RepeatedGroup{} }
-func (m *GoTest_RepeatedGroup) String() string { return proto.CompactTextString(m) }
-func (*GoTest_RepeatedGroup) ProtoMessage() {}
-
-func (m *GoTest_RepeatedGroup) GetRequiredField() string {
- if m != nil && m.RequiredField != nil {
- return *m.RequiredField
- }
- return ""
-}
-
-type GoTest_OptionalGroup struct {
- RequiredField *string `protobuf:"bytes,91,req" json:"RequiredField,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *GoTest_OptionalGroup) Reset() { *m = GoTest_OptionalGroup{} }
-func (m *GoTest_OptionalGroup) String() string { return proto.CompactTextString(m) }
-func (*GoTest_OptionalGroup) ProtoMessage() {}
-
-func (m *GoTest_OptionalGroup) GetRequiredField() string {
- if m != nil && m.RequiredField != nil {
- return *m.RequiredField
- }
- return ""
-}
-
-// For testing skipping of unrecognized fields.
-// Numbers are all big, larger than tag numbers in GoTestField,
-// the message used in the corresponding test.
-type GoSkipTest struct {
- SkipInt32 *int32 `protobuf:"varint,11,req,name=skip_int32" json:"skip_int32,omitempty"`
- SkipFixed32 *uint32 `protobuf:"fixed32,12,req,name=skip_fixed32" json:"skip_fixed32,omitempty"`
- SkipFixed64 *uint64 `protobuf:"fixed64,13,req,name=skip_fixed64" json:"skip_fixed64,omitempty"`
- SkipString *string `protobuf:"bytes,14,req,name=skip_string" json:"skip_string,omitempty"`
- Skipgroup *GoSkipTest_SkipGroup `protobuf:"group,15,req,name=SkipGroup" json:"skipgroup,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *GoSkipTest) Reset() { *m = GoSkipTest{} }
-func (m *GoSkipTest) String() string { return proto.CompactTextString(m) }
-func (*GoSkipTest) ProtoMessage() {}
-
-func (m *GoSkipTest) GetSkipInt32() int32 {
- if m != nil && m.SkipInt32 != nil {
- return *m.SkipInt32
- }
- return 0
-}
-
-func (m *GoSkipTest) GetSkipFixed32() uint32 {
- if m != nil && m.SkipFixed32 != nil {
- return *m.SkipFixed32
- }
- return 0
-}
-
-func (m *GoSkipTest) GetSkipFixed64() uint64 {
- if m != nil && m.SkipFixed64 != nil {
- return *m.SkipFixed64
- }
- return 0
-}
-
-func (m *GoSkipTest) GetSkipString() string {
- if m != nil && m.SkipString != nil {
- return *m.SkipString
- }
- return ""
-}
-
-func (m *GoSkipTest) GetSkipgroup() *GoSkipTest_SkipGroup {
- if m != nil {
- return m.Skipgroup
- }
- return nil
-}
-
-type GoSkipTest_SkipGroup struct {
- GroupInt32 *int32 `protobuf:"varint,16,req,name=group_int32" json:"group_int32,omitempty"`
- GroupString *string `protobuf:"bytes,17,req,name=group_string" json:"group_string,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *GoSkipTest_SkipGroup) Reset() { *m = GoSkipTest_SkipGroup{} }
-func (m *GoSkipTest_SkipGroup) String() string { return proto.CompactTextString(m) }
-func (*GoSkipTest_SkipGroup) ProtoMessage() {}
-
-func (m *GoSkipTest_SkipGroup) GetGroupInt32() int32 {
- if m != nil && m.GroupInt32 != nil {
- return *m.GroupInt32
- }
- return 0
-}
-
-func (m *GoSkipTest_SkipGroup) GetGroupString() string {
- if m != nil && m.GroupString != nil {
- return *m.GroupString
- }
- return ""
-}
-
-// For testing packed/non-packed decoder switching.
-// A serialized instance of one should be deserializable as the other.
-type NonPackedTest struct {
- A []int32 `protobuf:"varint,1,rep,name=a" json:"a,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *NonPackedTest) Reset() { *m = NonPackedTest{} }
-func (m *NonPackedTest) String() string { return proto.CompactTextString(m) }
-func (*NonPackedTest) ProtoMessage() {}
-
-func (m *NonPackedTest) GetA() []int32 {
- if m != nil {
- return m.A
- }
- return nil
-}
-
-type PackedTest struct {
- B []int32 `protobuf:"varint,1,rep,packed,name=b" json:"b,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *PackedTest) Reset() { *m = PackedTest{} }
-func (m *PackedTest) String() string { return proto.CompactTextString(m) }
-func (*PackedTest) ProtoMessage() {}
-
-func (m *PackedTest) GetB() []int32 {
- if m != nil {
- return m.B
- }
- return nil
-}
-
-type MaxTag struct {
- // Maximum possible tag number.
- LastField *string `protobuf:"bytes,536870911,opt,name=last_field" json:"last_field,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *MaxTag) Reset() { *m = MaxTag{} }
-func (m *MaxTag) String() string { return proto.CompactTextString(m) }
-func (*MaxTag) ProtoMessage() {}
-
-func (m *MaxTag) GetLastField() string {
- if m != nil && m.LastField != nil {
- return *m.LastField
- }
- return ""
-}
-
-type OldMessage struct {
- Nested *OldMessage_Nested `protobuf:"bytes,1,opt,name=nested" json:"nested,omitempty"`
- Num *int32 `protobuf:"varint,2,opt,name=num" json:"num,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *OldMessage) Reset() { *m = OldMessage{} }
-func (m *OldMessage) String() string { return proto.CompactTextString(m) }
-func (*OldMessage) ProtoMessage() {}
-
-func (m *OldMessage) GetNested() *OldMessage_Nested {
- if m != nil {
- return m.Nested
- }
- return nil
-}
-
-func (m *OldMessage) GetNum() int32 {
- if m != nil && m.Num != nil {
- return *m.Num
- }
- return 0
-}
-
-type OldMessage_Nested struct {
- Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *OldMessage_Nested) Reset() { *m = OldMessage_Nested{} }
-func (m *OldMessage_Nested) String() string { return proto.CompactTextString(m) }
-func (*OldMessage_Nested) ProtoMessage() {}
-
-func (m *OldMessage_Nested) GetName() string {
- if m != nil && m.Name != nil {
- return *m.Name
- }
- return ""
-}
-
-// NewMessage is wire compatible with OldMessage;
-// imagine it as a future version.
-type NewMessage struct {
- Nested *NewMessage_Nested `protobuf:"bytes,1,opt,name=nested" json:"nested,omitempty"`
- // This is an int32 in OldMessage.
- Num *int64 `protobuf:"varint,2,opt,name=num" json:"num,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *NewMessage) Reset() { *m = NewMessage{} }
-func (m *NewMessage) String() string { return proto.CompactTextString(m) }
-func (*NewMessage) ProtoMessage() {}
-
-func (m *NewMessage) GetNested() *NewMessage_Nested {
- if m != nil {
- return m.Nested
- }
- return nil
-}
-
-func (m *NewMessage) GetNum() int64 {
- if m != nil && m.Num != nil {
- return *m.Num
- }
- return 0
-}
-
-type NewMessage_Nested struct {
- Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
- FoodGroup *string `protobuf:"bytes,2,opt,name=food_group" json:"food_group,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *NewMessage_Nested) Reset() { *m = NewMessage_Nested{} }
-func (m *NewMessage_Nested) String() string { return proto.CompactTextString(m) }
-func (*NewMessage_Nested) ProtoMessage() {}
-
-func (m *NewMessage_Nested) GetName() string {
- if m != nil && m.Name != nil {
- return *m.Name
- }
- return ""
-}
-
-func (m *NewMessage_Nested) GetFoodGroup() string {
- if m != nil && m.FoodGroup != nil {
- return *m.FoodGroup
- }
- return ""
-}
-
-type InnerMessage struct {
- Host *string `protobuf:"bytes,1,req,name=host" json:"host,omitempty"`
- Port *int32 `protobuf:"varint,2,opt,name=port,def=4000" json:"port,omitempty"`
- Connected *bool `protobuf:"varint,3,opt,name=connected" json:"connected,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *InnerMessage) Reset() { *m = InnerMessage{} }
-func (m *InnerMessage) String() string { return proto.CompactTextString(m) }
-func (*InnerMessage) ProtoMessage() {}
-
-const Default_InnerMessage_Port int32 = 4000
-
-func (m *InnerMessage) GetHost() string {
- if m != nil && m.Host != nil {
- return *m.Host
- }
- return ""
-}
-
-func (m *InnerMessage) GetPort() int32 {
- if m != nil && m.Port != nil {
- return *m.Port
- }
- return Default_InnerMessage_Port
-}
-
-func (m *InnerMessage) GetConnected() bool {
- if m != nil && m.Connected != nil {
- return *m.Connected
- }
- return false
-}
-
-type OtherMessage struct {
- Key *int64 `protobuf:"varint,1,opt,name=key" json:"key,omitempty"`
- Value []byte `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
- Weight *float32 `protobuf:"fixed32,3,opt,name=weight" json:"weight,omitempty"`
- Inner *InnerMessage `protobuf:"bytes,4,opt,name=inner" json:"inner,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *OtherMessage) Reset() { *m = OtherMessage{} }
-func (m *OtherMessage) String() string { return proto.CompactTextString(m) }
-func (*OtherMessage) ProtoMessage() {}
-
-func (m *OtherMessage) GetKey() int64 {
- if m != nil && m.Key != nil {
- return *m.Key
- }
- return 0
-}
-
-func (m *OtherMessage) GetValue() []byte {
- if m != nil {
- return m.Value
- }
- return nil
-}
-
-func (m *OtherMessage) GetWeight() float32 {
- if m != nil && m.Weight != nil {
- return *m.Weight
- }
- return 0
-}
-
-func (m *OtherMessage) GetInner() *InnerMessage {
- if m != nil {
- return m.Inner
- }
- return nil
-}
-
-type MyMessage struct {
- Count *int32 `protobuf:"varint,1,req,name=count" json:"count,omitempty"`
- Name *string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"`
- Quote *string `protobuf:"bytes,3,opt,name=quote" json:"quote,omitempty"`
- Pet []string `protobuf:"bytes,4,rep,name=pet" json:"pet,omitempty"`
- Inner *InnerMessage `protobuf:"bytes,5,opt,name=inner" json:"inner,omitempty"`
- Others []*OtherMessage `protobuf:"bytes,6,rep,name=others" json:"others,omitempty"`
- RepInner []*InnerMessage `protobuf:"bytes,12,rep,name=rep_inner" json:"rep_inner,omitempty"`
- Bikeshed *MyMessage_Color `protobuf:"varint,7,opt,name=bikeshed,enum=testdata.MyMessage_Color" json:"bikeshed,omitempty"`
- Somegroup *MyMessage_SomeGroup `protobuf:"group,8,opt,name=SomeGroup" json:"somegroup,omitempty"`
- // This field becomes [][]byte in the generated code.
- RepBytes [][]byte `protobuf:"bytes,10,rep,name=rep_bytes" json:"rep_bytes,omitempty"`
- Bigfloat *float64 `protobuf:"fixed64,11,opt,name=bigfloat" json:"bigfloat,omitempty"`
- XXX_extensions map[int32]proto.Extension `json:"-"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *MyMessage) Reset() { *m = MyMessage{} }
-func (m *MyMessage) String() string { return proto.CompactTextString(m) }
-func (*MyMessage) ProtoMessage() {}
-
-var extRange_MyMessage = []proto.ExtensionRange{
- {100, 536870911},
-}
-
-func (*MyMessage) ExtensionRangeArray() []proto.ExtensionRange {
- return extRange_MyMessage
-}
-func (m *MyMessage) ExtensionMap() map[int32]proto.Extension {
- if m.XXX_extensions == nil {
- m.XXX_extensions = make(map[int32]proto.Extension)
- }
- return m.XXX_extensions
-}
-
-func (m *MyMessage) GetCount() int32 {
- if m != nil && m.Count != nil {
- return *m.Count
- }
- return 0
-}
-
-func (m *MyMessage) GetName() string {
- if m != nil && m.Name != nil {
- return *m.Name
- }
- return ""
-}
-
-func (m *MyMessage) GetQuote() string {
- if m != nil && m.Quote != nil {
- return *m.Quote
- }
- return ""
-}
-
-func (m *MyMessage) GetPet() []string {
- if m != nil {
- return m.Pet
- }
- return nil
-}
-
-func (m *MyMessage) GetInner() *InnerMessage {
- if m != nil {
- return m.Inner
- }
- return nil
-}
-
-func (m *MyMessage) GetOthers() []*OtherMessage {
- if m != nil {
- return m.Others
- }
- return nil
-}
-
-func (m *MyMessage) GetRepInner() []*InnerMessage {
- if m != nil {
- return m.RepInner
- }
- return nil
-}
-
-func (m *MyMessage) GetBikeshed() MyMessage_Color {
- if m != nil && m.Bikeshed != nil {
- return *m.Bikeshed
- }
- return MyMessage_RED
-}
-
-func (m *MyMessage) GetSomegroup() *MyMessage_SomeGroup {
- if m != nil {
- return m.Somegroup
- }
- return nil
-}
-
-func (m *MyMessage) GetRepBytes() [][]byte {
- if m != nil {
- return m.RepBytes
- }
- return nil
-}
-
-func (m *MyMessage) GetBigfloat() float64 {
- if m != nil && m.Bigfloat != nil {
- return *m.Bigfloat
- }
- return 0
-}
-
-type MyMessage_SomeGroup struct {
- GroupField *int32 `protobuf:"varint,9,opt,name=group_field" json:"group_field,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *MyMessage_SomeGroup) Reset() { *m = MyMessage_SomeGroup{} }
-func (m *MyMessage_SomeGroup) String() string { return proto.CompactTextString(m) }
-func (*MyMessage_SomeGroup) ProtoMessage() {}
-
-func (m *MyMessage_SomeGroup) GetGroupField() int32 {
- if m != nil && m.GroupField != nil {
- return *m.GroupField
- }
- return 0
-}
-
-type Ext struct {
- Data *string `protobuf:"bytes,1,opt,name=data" json:"data,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *Ext) Reset() { *m = Ext{} }
-func (m *Ext) String() string { return proto.CompactTextString(m) }
-func (*Ext) ProtoMessage() {}
-
-func (m *Ext) GetData() string {
- if m != nil && m.Data != nil {
- return *m.Data
- }
- return ""
-}
-
-var E_Ext_More = &proto.ExtensionDesc{
- ExtendedType: (*MyMessage)(nil),
- ExtensionType: (*Ext)(nil),
- Field: 103,
- Name: "testdata.Ext.more",
- Tag: "bytes,103,opt,name=more",
-}
-
-var E_Ext_Text = &proto.ExtensionDesc{
- ExtendedType: (*MyMessage)(nil),
- ExtensionType: (*string)(nil),
- Field: 104,
- Name: "testdata.Ext.text",
- Tag: "bytes,104,opt,name=text",
-}
-
-var E_Ext_Number = &proto.ExtensionDesc{
- ExtendedType: (*MyMessage)(nil),
- ExtensionType: (*int32)(nil),
- Field: 105,
- Name: "testdata.Ext.number",
- Tag: "varint,105,opt,name=number",
-}
-
-type MyMessageSet struct {
- XXX_extensions map[int32]proto.Extension `json:"-"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *MyMessageSet) Reset() { *m = MyMessageSet{} }
-func (m *MyMessageSet) String() string { return proto.CompactTextString(m) }
-func (*MyMessageSet) ProtoMessage() {}
-
-func (m *MyMessageSet) Marshal() ([]byte, error) {
- return proto.MarshalMessageSet(m.ExtensionMap())
-}
-func (m *MyMessageSet) Unmarshal(buf []byte) error {
- return proto.UnmarshalMessageSet(buf, m.ExtensionMap())
-}
-func (m *MyMessageSet) MarshalJSON() ([]byte, error) {
- return proto.MarshalMessageSetJSON(m.XXX_extensions)
-}
-func (m *MyMessageSet) UnmarshalJSON(buf []byte) error {
- return proto.UnmarshalMessageSetJSON(buf, m.XXX_extensions)
-}
-
-// ensure MyMessageSet satisfies proto.Marshaler and proto.Unmarshaler
-var _ proto.Marshaler = (*MyMessageSet)(nil)
-var _ proto.Unmarshaler = (*MyMessageSet)(nil)
-
-var extRange_MyMessageSet = []proto.ExtensionRange{
- {100, 2147483646},
-}
-
-func (*MyMessageSet) ExtensionRangeArray() []proto.ExtensionRange {
- return extRange_MyMessageSet
-}
-func (m *MyMessageSet) ExtensionMap() map[int32]proto.Extension {
- if m.XXX_extensions == nil {
- m.XXX_extensions = make(map[int32]proto.Extension)
- }
- return m.XXX_extensions
-}
-
-type Empty struct {
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *Empty) Reset() { *m = Empty{} }
-func (m *Empty) String() string { return proto.CompactTextString(m) }
-func (*Empty) ProtoMessage() {}
-
-type MessageList struct {
- Message []*MessageList_Message `protobuf:"group,1,rep" json:"message,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *MessageList) Reset() { *m = MessageList{} }
-func (m *MessageList) String() string { return proto.CompactTextString(m) }
-func (*MessageList) ProtoMessage() {}
-
-func (m *MessageList) GetMessage() []*MessageList_Message {
- if m != nil {
- return m.Message
- }
- return nil
-}
-
-type MessageList_Message struct {
- Name *string `protobuf:"bytes,2,req,name=name" json:"name,omitempty"`
- Count *int32 `protobuf:"varint,3,req,name=count" json:"count,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *MessageList_Message) Reset() { *m = MessageList_Message{} }
-func (m *MessageList_Message) String() string { return proto.CompactTextString(m) }
-func (*MessageList_Message) ProtoMessage() {}
-
-func (m *MessageList_Message) GetName() string {
- if m != nil && m.Name != nil {
- return *m.Name
- }
- return ""
-}
-
-func (m *MessageList_Message) GetCount() int32 {
- if m != nil && m.Count != nil {
- return *m.Count
- }
- return 0
-}
-
-type Strings struct {
- StringField *string `protobuf:"bytes,1,opt,name=string_field" json:"string_field,omitempty"`
- BytesField []byte `protobuf:"bytes,2,opt,name=bytes_field" json:"bytes_field,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *Strings) Reset() { *m = Strings{} }
-func (m *Strings) String() string { return proto.CompactTextString(m) }
-func (*Strings) ProtoMessage() {}
-
-func (m *Strings) GetStringField() string {
- if m != nil && m.StringField != nil {
- return *m.StringField
- }
- return ""
-}
-
-func (m *Strings) GetBytesField() []byte {
- if m != nil {
- return m.BytesField
- }
- return nil
-}
-
-type Defaults struct {
- // Default-valued fields of all basic types.
- // Same as GoTest, but copied here to make testing easier.
- F_Bool *bool `protobuf:"varint,1,opt,def=1" json:"F_Bool,omitempty"`
- F_Int32 *int32 `protobuf:"varint,2,opt,def=32" json:"F_Int32,omitempty"`
- F_Int64 *int64 `protobuf:"varint,3,opt,def=64" json:"F_Int64,omitempty"`
- F_Fixed32 *uint32 `protobuf:"fixed32,4,opt,def=320" json:"F_Fixed32,omitempty"`
- F_Fixed64 *uint64 `protobuf:"fixed64,5,opt,def=640" json:"F_Fixed64,omitempty"`
- F_Uint32 *uint32 `protobuf:"varint,6,opt,def=3200" json:"F_Uint32,omitempty"`
- F_Uint64 *uint64 `protobuf:"varint,7,opt,def=6400" json:"F_Uint64,omitempty"`
- F_Float *float32 `protobuf:"fixed32,8,opt,def=314159" json:"F_Float,omitempty"`
- F_Double *float64 `protobuf:"fixed64,9,opt,def=271828" json:"F_Double,omitempty"`
- F_String *string `protobuf:"bytes,10,opt,def=hello, \"world!\"\n" json:"F_String,omitempty"`
- F_Bytes []byte `protobuf:"bytes,11,opt,def=Bignose" json:"F_Bytes,omitempty"`
- F_Sint32 *int32 `protobuf:"zigzag32,12,opt,def=-32" json:"F_Sint32,omitempty"`
- F_Sint64 *int64 `protobuf:"zigzag64,13,opt,def=-64" json:"F_Sint64,omitempty"`
- F_Enum *Defaults_Color `protobuf:"varint,14,opt,enum=testdata.Defaults_Color,def=1" json:"F_Enum,omitempty"`
- // More fields with crazy defaults.
- F_Pinf *float32 `protobuf:"fixed32,15,opt,def=inf" json:"F_Pinf,omitempty"`
- F_Ninf *float32 `protobuf:"fixed32,16,opt,def=-inf" json:"F_Ninf,omitempty"`
- F_Nan *float32 `protobuf:"fixed32,17,opt,def=nan" json:"F_Nan,omitempty"`
- // Sub-message.
- Sub *SubDefaults `protobuf:"bytes,18,opt,name=sub" json:"sub,omitempty"`
- // Redundant but explicit defaults.
- StrZero *string `protobuf:"bytes,19,opt,name=str_zero,def=" json:"str_zero,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *Defaults) Reset() { *m = Defaults{} }
-func (m *Defaults) String() string { return proto.CompactTextString(m) }
-func (*Defaults) ProtoMessage() {}
-
-const Default_Defaults_F_Bool bool = true
-const Default_Defaults_F_Int32 int32 = 32
-const Default_Defaults_F_Int64 int64 = 64
-const Default_Defaults_F_Fixed32 uint32 = 320
-const Default_Defaults_F_Fixed64 uint64 = 640
-const Default_Defaults_F_Uint32 uint32 = 3200
-const Default_Defaults_F_Uint64 uint64 = 6400
-const Default_Defaults_F_Float float32 = 314159
-const Default_Defaults_F_Double float64 = 271828
-const Default_Defaults_F_String string = "hello, \"world!\"\n"
-
-var Default_Defaults_F_Bytes []byte = []byte("Bignose")
-
-const Default_Defaults_F_Sint32 int32 = -32
-const Default_Defaults_F_Sint64 int64 = -64
-const Default_Defaults_F_Enum Defaults_Color = Defaults_GREEN
-
-var Default_Defaults_F_Pinf float32 = float32(math.Inf(1))
-var Default_Defaults_F_Ninf float32 = float32(math.Inf(-1))
-var Default_Defaults_F_Nan float32 = float32(math.NaN())
-
-func (m *Defaults) GetF_Bool() bool {
- if m != nil && m.F_Bool != nil {
- return *m.F_Bool
- }
- return Default_Defaults_F_Bool
-}
-
-func (m *Defaults) GetF_Int32() int32 {
- if m != nil && m.F_Int32 != nil {
- return *m.F_Int32
- }
- return Default_Defaults_F_Int32
-}
-
-func (m *Defaults) GetF_Int64() int64 {
- if m != nil && m.F_Int64 != nil {
- return *m.F_Int64
- }
- return Default_Defaults_F_Int64
-}
-
-func (m *Defaults) GetF_Fixed32() uint32 {
- if m != nil && m.F_Fixed32 != nil {
- return *m.F_Fixed32
- }
- return Default_Defaults_F_Fixed32
-}
-
-func (m *Defaults) GetF_Fixed64() uint64 {
- if m != nil && m.F_Fixed64 != nil {
- return *m.F_Fixed64
- }
- return Default_Defaults_F_Fixed64
-}
-
-func (m *Defaults) GetF_Uint32() uint32 {
- if m != nil && m.F_Uint32 != nil {
- return *m.F_Uint32
- }
- return Default_Defaults_F_Uint32
-}
-
-func (m *Defaults) GetF_Uint64() uint64 {
- if m != nil && m.F_Uint64 != nil {
- return *m.F_Uint64
- }
- return Default_Defaults_F_Uint64
-}
-
-func (m *Defaults) GetF_Float() float32 {
- if m != nil && m.F_Float != nil {
- return *m.F_Float
- }
- return Default_Defaults_F_Float
-}
-
-func (m *Defaults) GetF_Double() float64 {
- if m != nil && m.F_Double != nil {
- return *m.F_Double
- }
- return Default_Defaults_F_Double
-}
-
-func (m *Defaults) GetF_String() string {
- if m != nil && m.F_String != nil {
- return *m.F_String
- }
- return Default_Defaults_F_String
-}
-
-func (m *Defaults) GetF_Bytes() []byte {
- if m != nil && m.F_Bytes != nil {
- return m.F_Bytes
- }
- return append([]byte(nil), Default_Defaults_F_Bytes...)
-}
-
-func (m *Defaults) GetF_Sint32() int32 {
- if m != nil && m.F_Sint32 != nil {
- return *m.F_Sint32
- }
- return Default_Defaults_F_Sint32
-}
-
-func (m *Defaults) GetF_Sint64() int64 {
- if m != nil && m.F_Sint64 != nil {
- return *m.F_Sint64
- }
- return Default_Defaults_F_Sint64
-}
-
-func (m *Defaults) GetF_Enum() Defaults_Color {
- if m != nil && m.F_Enum != nil {
- return *m.F_Enum
- }
- return Default_Defaults_F_Enum
-}
-
-func (m *Defaults) GetF_Pinf() float32 {
- if m != nil && m.F_Pinf != nil {
- return *m.F_Pinf
- }
- return Default_Defaults_F_Pinf
-}
-
-func (m *Defaults) GetF_Ninf() float32 {
- if m != nil && m.F_Ninf != nil {
- return *m.F_Ninf
- }
- return Default_Defaults_F_Ninf
-}
-
-func (m *Defaults) GetF_Nan() float32 {
- if m != nil && m.F_Nan != nil {
- return *m.F_Nan
- }
- return Default_Defaults_F_Nan
-}
-
-func (m *Defaults) GetSub() *SubDefaults {
- if m != nil {
- return m.Sub
- }
- return nil
-}
-
-func (m *Defaults) GetStrZero() string {
- if m != nil && m.StrZero != nil {
- return *m.StrZero
- }
- return ""
-}
-
-type SubDefaults struct {
- N *int64 `protobuf:"varint,1,opt,name=n,def=7" json:"n,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *SubDefaults) Reset() { *m = SubDefaults{} }
-func (m *SubDefaults) String() string { return proto.CompactTextString(m) }
-func (*SubDefaults) ProtoMessage() {}
-
-const Default_SubDefaults_N int64 = 7
-
-func (m *SubDefaults) GetN() int64 {
- if m != nil && m.N != nil {
- return *m.N
- }
- return Default_SubDefaults_N
-}
-
-type RepeatedEnum struct {
- Color []RepeatedEnum_Color `protobuf:"varint,1,rep,name=color,enum=testdata.RepeatedEnum_Color" json:"color,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *RepeatedEnum) Reset() { *m = RepeatedEnum{} }
-func (m *RepeatedEnum) String() string { return proto.CompactTextString(m) }
-func (*RepeatedEnum) ProtoMessage() {}
-
-func (m *RepeatedEnum) GetColor() []RepeatedEnum_Color {
- if m != nil {
- return m.Color
- }
- return nil
-}
-
-type MoreRepeated struct {
- Bools []bool `protobuf:"varint,1,rep,name=bools" json:"bools,omitempty"`
- BoolsPacked []bool `protobuf:"varint,2,rep,packed,name=bools_packed" json:"bools_packed,omitempty"`
- Ints []int32 `protobuf:"varint,3,rep,name=ints" json:"ints,omitempty"`
- IntsPacked []int32 `protobuf:"varint,4,rep,packed,name=ints_packed" json:"ints_packed,omitempty"`
- Int64SPacked []int64 `protobuf:"varint,7,rep,packed,name=int64s_packed" json:"int64s_packed,omitempty"`
- Strings []string `protobuf:"bytes,5,rep,name=strings" json:"strings,omitempty"`
- Fixeds []uint32 `protobuf:"fixed32,6,rep,name=fixeds" json:"fixeds,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *MoreRepeated) Reset() { *m = MoreRepeated{} }
-func (m *MoreRepeated) String() string { return proto.CompactTextString(m) }
-func (*MoreRepeated) ProtoMessage() {}
-
-func (m *MoreRepeated) GetBools() []bool {
- if m != nil {
- return m.Bools
- }
- return nil
-}
-
-func (m *MoreRepeated) GetBoolsPacked() []bool {
- if m != nil {
- return m.BoolsPacked
- }
- return nil
-}
-
-func (m *MoreRepeated) GetInts() []int32 {
- if m != nil {
- return m.Ints
- }
- return nil
-}
-
-func (m *MoreRepeated) GetIntsPacked() []int32 {
- if m != nil {
- return m.IntsPacked
- }
- return nil
-}
-
-func (m *MoreRepeated) GetInt64SPacked() []int64 {
- if m != nil {
- return m.Int64SPacked
- }
- return nil
-}
-
-func (m *MoreRepeated) GetStrings() []string {
- if m != nil {
- return m.Strings
- }
- return nil
-}
-
-func (m *MoreRepeated) GetFixeds() []uint32 {
- if m != nil {
- return m.Fixeds
- }
- return nil
-}
-
-type GroupOld struct {
- G *GroupOld_G `protobuf:"group,101,opt" json:"g,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *GroupOld) Reset() { *m = GroupOld{} }
-func (m *GroupOld) String() string { return proto.CompactTextString(m) }
-func (*GroupOld) ProtoMessage() {}
-
-func (m *GroupOld) GetG() *GroupOld_G {
- if m != nil {
- return m.G
- }
- return nil
-}
-
-type GroupOld_G struct {
- X *int32 `protobuf:"varint,2,opt,name=x" json:"x,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *GroupOld_G) Reset() { *m = GroupOld_G{} }
-func (m *GroupOld_G) String() string { return proto.CompactTextString(m) }
-func (*GroupOld_G) ProtoMessage() {}
-
-func (m *GroupOld_G) GetX() int32 {
- if m != nil && m.X != nil {
- return *m.X
- }
- return 0
-}
-
-type GroupNew struct {
- G *GroupNew_G `protobuf:"group,101,opt" json:"g,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *GroupNew) Reset() { *m = GroupNew{} }
-func (m *GroupNew) String() string { return proto.CompactTextString(m) }
-func (*GroupNew) ProtoMessage() {}
-
-func (m *GroupNew) GetG() *GroupNew_G {
- if m != nil {
- return m.G
- }
- return nil
-}
-
-type GroupNew_G struct {
- X *int32 `protobuf:"varint,2,opt,name=x" json:"x,omitempty"`
- Y *int32 `protobuf:"varint,3,opt,name=y" json:"y,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *GroupNew_G) Reset() { *m = GroupNew_G{} }
-func (m *GroupNew_G) String() string { return proto.CompactTextString(m) }
-func (*GroupNew_G) ProtoMessage() {}
-
-func (m *GroupNew_G) GetX() int32 {
- if m != nil && m.X != nil {
- return *m.X
- }
- return 0
-}
-
-func (m *GroupNew_G) GetY() int32 {
- if m != nil && m.Y != nil {
- return *m.Y
- }
- return 0
-}
-
-type FloatingPoint struct {
- F *float64 `protobuf:"fixed64,1,req,name=f" json:"f,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *FloatingPoint) Reset() { *m = FloatingPoint{} }
-func (m *FloatingPoint) String() string { return proto.CompactTextString(m) }
-func (*FloatingPoint) ProtoMessage() {}
-
-func (m *FloatingPoint) GetF() float64 {
- if m != nil && m.F != nil {
- return *m.F
- }
- return 0
-}
-
-type MessageWithMap struct {
- NameMapping map[int32]string `protobuf:"bytes,1,rep,name=name_mapping" json:"name_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
- MsgMapping map[int64]*FloatingPoint `protobuf:"bytes,2,rep,name=msg_mapping" json:"msg_mapping,omitempty" protobuf_key:"zigzag64,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
- ByteMapping map[bool][]byte `protobuf:"bytes,3,rep,name=byte_mapping" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
- StrToStr map[string]string `protobuf:"bytes,4,rep,name=str_to_str" json:"str_to_str,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *MessageWithMap) Reset() { *m = MessageWithMap{} }
-func (m *MessageWithMap) String() string { return proto.CompactTextString(m) }
-func (*MessageWithMap) ProtoMessage() {}
-
-func (m *MessageWithMap) GetNameMapping() map[int32]string {
- if m != nil {
- return m.NameMapping
- }
- return nil
-}
-
-func (m *MessageWithMap) GetMsgMapping() map[int64]*FloatingPoint {
- if m != nil {
- return m.MsgMapping
- }
- return nil
-}
-
-func (m *MessageWithMap) GetByteMapping() map[bool][]byte {
- if m != nil {
- return m.ByteMapping
- }
- return nil
-}
-
-func (m *MessageWithMap) GetStrToStr() map[string]string {
- if m != nil {
- return m.StrToStr
- }
- return nil
-}
-
-var E_Greeting = &proto.ExtensionDesc{
- ExtendedType: (*MyMessage)(nil),
- ExtensionType: ([]string)(nil),
- Field: 106,
- Name: "testdata.greeting",
- Tag: "bytes,106,rep,name=greeting",
-}
-
-var E_X201 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 201,
- Name: "testdata.x201",
- Tag: "bytes,201,opt,name=x201",
-}
-
-var E_X202 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 202,
- Name: "testdata.x202",
- Tag: "bytes,202,opt,name=x202",
-}
-
-var E_X203 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 203,
- Name: "testdata.x203",
- Tag: "bytes,203,opt,name=x203",
-}
-
-var E_X204 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 204,
- Name: "testdata.x204",
- Tag: "bytes,204,opt,name=x204",
-}
-
-var E_X205 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 205,
- Name: "testdata.x205",
- Tag: "bytes,205,opt,name=x205",
-}
-
-var E_X206 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 206,
- Name: "testdata.x206",
- Tag: "bytes,206,opt,name=x206",
-}
-
-var E_X207 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 207,
- Name: "testdata.x207",
- Tag: "bytes,207,opt,name=x207",
-}
-
-var E_X208 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 208,
- Name: "testdata.x208",
- Tag: "bytes,208,opt,name=x208",
-}
-
-var E_X209 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 209,
- Name: "testdata.x209",
- Tag: "bytes,209,opt,name=x209",
-}
-
-var E_X210 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 210,
- Name: "testdata.x210",
- Tag: "bytes,210,opt,name=x210",
-}
-
-var E_X211 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 211,
- Name: "testdata.x211",
- Tag: "bytes,211,opt,name=x211",
-}
-
-var E_X212 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 212,
- Name: "testdata.x212",
- Tag: "bytes,212,opt,name=x212",
-}
-
-var E_X213 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 213,
- Name: "testdata.x213",
- Tag: "bytes,213,opt,name=x213",
-}
-
-var E_X214 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 214,
- Name: "testdata.x214",
- Tag: "bytes,214,opt,name=x214",
-}
-
-var E_X215 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 215,
- Name: "testdata.x215",
- Tag: "bytes,215,opt,name=x215",
-}
-
-var E_X216 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 216,
- Name: "testdata.x216",
- Tag: "bytes,216,opt,name=x216",
-}
-
-var E_X217 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 217,
- Name: "testdata.x217",
- Tag: "bytes,217,opt,name=x217",
-}
-
-var E_X218 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 218,
- Name: "testdata.x218",
- Tag: "bytes,218,opt,name=x218",
-}
-
-var E_X219 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 219,
- Name: "testdata.x219",
- Tag: "bytes,219,opt,name=x219",
-}
-
-var E_X220 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 220,
- Name: "testdata.x220",
- Tag: "bytes,220,opt,name=x220",
-}
-
-var E_X221 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 221,
- Name: "testdata.x221",
- Tag: "bytes,221,opt,name=x221",
-}
-
-var E_X222 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 222,
- Name: "testdata.x222",
- Tag: "bytes,222,opt,name=x222",
-}
-
-var E_X223 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 223,
- Name: "testdata.x223",
- Tag: "bytes,223,opt,name=x223",
-}
-
-var E_X224 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 224,
- Name: "testdata.x224",
- Tag: "bytes,224,opt,name=x224",
-}
-
-var E_X225 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 225,
- Name: "testdata.x225",
- Tag: "bytes,225,opt,name=x225",
-}
-
-var E_X226 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 226,
- Name: "testdata.x226",
- Tag: "bytes,226,opt,name=x226",
-}
-
-var E_X227 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 227,
- Name: "testdata.x227",
- Tag: "bytes,227,opt,name=x227",
-}
-
-var E_X228 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 228,
- Name: "testdata.x228",
- Tag: "bytes,228,opt,name=x228",
-}
-
-var E_X229 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 229,
- Name: "testdata.x229",
- Tag: "bytes,229,opt,name=x229",
-}
-
-var E_X230 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 230,
- Name: "testdata.x230",
- Tag: "bytes,230,opt,name=x230",
-}
-
-var E_X231 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 231,
- Name: "testdata.x231",
- Tag: "bytes,231,opt,name=x231",
-}
-
-var E_X232 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 232,
- Name: "testdata.x232",
- Tag: "bytes,232,opt,name=x232",
-}
-
-var E_X233 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 233,
- Name: "testdata.x233",
- Tag: "bytes,233,opt,name=x233",
-}
-
-var E_X234 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 234,
- Name: "testdata.x234",
- Tag: "bytes,234,opt,name=x234",
-}
-
-var E_X235 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 235,
- Name: "testdata.x235",
- Tag: "bytes,235,opt,name=x235",
-}
-
-var E_X236 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 236,
- Name: "testdata.x236",
- Tag: "bytes,236,opt,name=x236",
-}
-
-var E_X237 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 237,
- Name: "testdata.x237",
- Tag: "bytes,237,opt,name=x237",
-}
-
-var E_X238 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 238,
- Name: "testdata.x238",
- Tag: "bytes,238,opt,name=x238",
-}
-
-var E_X239 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 239,
- Name: "testdata.x239",
- Tag: "bytes,239,opt,name=x239",
-}
-
-var E_X240 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 240,
- Name: "testdata.x240",
- Tag: "bytes,240,opt,name=x240",
-}
-
-var E_X241 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 241,
- Name: "testdata.x241",
- Tag: "bytes,241,opt,name=x241",
-}
-
-var E_X242 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 242,
- Name: "testdata.x242",
- Tag: "bytes,242,opt,name=x242",
-}
-
-var E_X243 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 243,
- Name: "testdata.x243",
- Tag: "bytes,243,opt,name=x243",
-}
-
-var E_X244 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 244,
- Name: "testdata.x244",
- Tag: "bytes,244,opt,name=x244",
-}
-
-var E_X245 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 245,
- Name: "testdata.x245",
- Tag: "bytes,245,opt,name=x245",
-}
-
-var E_X246 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 246,
- Name: "testdata.x246",
- Tag: "bytes,246,opt,name=x246",
-}
-
-var E_X247 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 247,
- Name: "testdata.x247",
- Tag: "bytes,247,opt,name=x247",
-}
-
-var E_X248 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 248,
- Name: "testdata.x248",
- Tag: "bytes,248,opt,name=x248",
-}
-
-var E_X249 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 249,
- Name: "testdata.x249",
- Tag: "bytes,249,opt,name=x249",
-}
-
-var E_X250 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 250,
- Name: "testdata.x250",
- Tag: "bytes,250,opt,name=x250",
-}
-
-func init() {
- proto.RegisterEnum("testdata.FOO", FOO_name, FOO_value)
- proto.RegisterEnum("testdata.GoTest_KIND", GoTest_KIND_name, GoTest_KIND_value)
- proto.RegisterEnum("testdata.MyMessage_Color", MyMessage_Color_name, MyMessage_Color_value)
- proto.RegisterEnum("testdata.Defaults_Color", Defaults_Color_name, Defaults_Color_value)
- proto.RegisterEnum("testdata.RepeatedEnum_Color", RepeatedEnum_Color_name, RepeatedEnum_Color_value)
- proto.RegisterExtension(E_Ext_More)
- proto.RegisterExtension(E_Ext_Text)
- proto.RegisterExtension(E_Ext_Number)
- proto.RegisterExtension(E_Greeting)
- proto.RegisterExtension(E_X201)
- proto.RegisterExtension(E_X202)
- proto.RegisterExtension(E_X203)
- proto.RegisterExtension(E_X204)
- proto.RegisterExtension(E_X205)
- proto.RegisterExtension(E_X206)
- proto.RegisterExtension(E_X207)
- proto.RegisterExtension(E_X208)
- proto.RegisterExtension(E_X209)
- proto.RegisterExtension(E_X210)
- proto.RegisterExtension(E_X211)
- proto.RegisterExtension(E_X212)
- proto.RegisterExtension(E_X213)
- proto.RegisterExtension(E_X214)
- proto.RegisterExtension(E_X215)
- proto.RegisterExtension(E_X216)
- proto.RegisterExtension(E_X217)
- proto.RegisterExtension(E_X218)
- proto.RegisterExtension(E_X219)
- proto.RegisterExtension(E_X220)
- proto.RegisterExtension(E_X221)
- proto.RegisterExtension(E_X222)
- proto.RegisterExtension(E_X223)
- proto.RegisterExtension(E_X224)
- proto.RegisterExtension(E_X225)
- proto.RegisterExtension(E_X226)
- proto.RegisterExtension(E_X227)
- proto.RegisterExtension(E_X228)
- proto.RegisterExtension(E_X229)
- proto.RegisterExtension(E_X230)
- proto.RegisterExtension(E_X231)
- proto.RegisterExtension(E_X232)
- proto.RegisterExtension(E_X233)
- proto.RegisterExtension(E_X234)
- proto.RegisterExtension(E_X235)
- proto.RegisterExtension(E_X236)
- proto.RegisterExtension(E_X237)
- proto.RegisterExtension(E_X238)
- proto.RegisterExtension(E_X239)
- proto.RegisterExtension(E_X240)
- proto.RegisterExtension(E_X241)
- proto.RegisterExtension(E_X242)
- proto.RegisterExtension(E_X243)
- proto.RegisterExtension(E_X244)
- proto.RegisterExtension(E_X245)
- proto.RegisterExtension(E_X246)
- proto.RegisterExtension(E_X247)
- proto.RegisterExtension(E_X248)
- proto.RegisterExtension(E_X249)
- proto.RegisterExtension(E_X250)
-}
diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.proto b/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.proto
deleted file mode 100644
index 411634d82f..0000000000
--- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.proto
+++ /dev/null
@@ -1,435 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// A feature-rich test file for the protocol compiler and libraries.
-
-syntax = "proto2";
-
-package testdata;
-
-enum FOO { FOO1 = 1; };
-
-message GoEnum {
- required FOO foo = 1;
-}
-
-message GoTestField {
- required string Label = 1;
- required string Type = 2;
-}
-
-message GoTest {
- // An enum, for completeness.
- enum KIND {
- VOID = 0;
-
- // Basic types
- BOOL = 1;
- BYTES = 2;
- FINGERPRINT = 3;
- FLOAT = 4;
- INT = 5;
- STRING = 6;
- TIME = 7;
-
- // Groupings
- TUPLE = 8;
- ARRAY = 9;
- MAP = 10;
-
- // Table types
- TABLE = 11;
-
- // Functions
- FUNCTION = 12; // last tag
- };
-
- // Some typical parameters
- required KIND Kind = 1;
- optional string Table = 2;
- optional int32 Param = 3;
-
- // Required, repeated and optional foreign fields.
- required GoTestField RequiredField = 4;
- repeated GoTestField RepeatedField = 5;
- optional GoTestField OptionalField = 6;
-
- // Required fields of all basic types
- required bool F_Bool_required = 10;
- required int32 F_Int32_required = 11;
- required int64 F_Int64_required = 12;
- required fixed32 F_Fixed32_required = 13;
- required fixed64 F_Fixed64_required = 14;
- required uint32 F_Uint32_required = 15;
- required uint64 F_Uint64_required = 16;
- required float F_Float_required = 17;
- required double F_Double_required = 18;
- required string F_String_required = 19;
- required bytes F_Bytes_required = 101;
- required sint32 F_Sint32_required = 102;
- required sint64 F_Sint64_required = 103;
-
- // Repeated fields of all basic types
- repeated bool F_Bool_repeated = 20;
- repeated int32 F_Int32_repeated = 21;
- repeated int64 F_Int64_repeated = 22;
- repeated fixed32 F_Fixed32_repeated = 23;
- repeated fixed64 F_Fixed64_repeated = 24;
- repeated uint32 F_Uint32_repeated = 25;
- repeated uint64 F_Uint64_repeated = 26;
- repeated float F_Float_repeated = 27;
- repeated double F_Double_repeated = 28;
- repeated string F_String_repeated = 29;
- repeated bytes F_Bytes_repeated = 201;
- repeated sint32 F_Sint32_repeated = 202;
- repeated sint64 F_Sint64_repeated = 203;
-
- // Optional fields of all basic types
- optional bool F_Bool_optional = 30;
- optional int32 F_Int32_optional = 31;
- optional int64 F_Int64_optional = 32;
- optional fixed32 F_Fixed32_optional = 33;
- optional fixed64 F_Fixed64_optional = 34;
- optional uint32 F_Uint32_optional = 35;
- optional uint64 F_Uint64_optional = 36;
- optional float F_Float_optional = 37;
- optional double F_Double_optional = 38;
- optional string F_String_optional = 39;
- optional bytes F_Bytes_optional = 301;
- optional sint32 F_Sint32_optional = 302;
- optional sint64 F_Sint64_optional = 303;
-
- // Default-valued fields of all basic types
- optional bool F_Bool_defaulted = 40 [default=true];
- optional int32 F_Int32_defaulted = 41 [default=32];
- optional int64 F_Int64_defaulted = 42 [default=64];
- optional fixed32 F_Fixed32_defaulted = 43 [default=320];
- optional fixed64 F_Fixed64_defaulted = 44 [default=640];
- optional uint32 F_Uint32_defaulted = 45 [default=3200];
- optional uint64 F_Uint64_defaulted = 46 [default=6400];
- optional float F_Float_defaulted = 47 [default=314159.];
- optional double F_Double_defaulted = 48 [default=271828.];
- optional string F_String_defaulted = 49 [default="hello, \"world!\"\n"];
- optional bytes F_Bytes_defaulted = 401 [default="Bignose"];
- optional sint32 F_Sint32_defaulted = 402 [default = -32];
- optional sint64 F_Sint64_defaulted = 403 [default = -64];
-
- // Packed repeated fields (no string or bytes).
- repeated bool F_Bool_repeated_packed = 50 [packed=true];
- repeated int32 F_Int32_repeated_packed = 51 [packed=true];
- repeated int64 F_Int64_repeated_packed = 52 [packed=true];
- repeated fixed32 F_Fixed32_repeated_packed = 53 [packed=true];
- repeated fixed64 F_Fixed64_repeated_packed = 54 [packed=true];
- repeated uint32 F_Uint32_repeated_packed = 55 [packed=true];
- repeated uint64 F_Uint64_repeated_packed = 56 [packed=true];
- repeated float F_Float_repeated_packed = 57 [packed=true];
- repeated double F_Double_repeated_packed = 58 [packed=true];
- repeated sint32 F_Sint32_repeated_packed = 502 [packed=true];
- repeated sint64 F_Sint64_repeated_packed = 503 [packed=true];
-
- // Required, repeated, and optional groups.
- required group RequiredGroup = 70 {
- required string RequiredField = 71;
- };
-
- repeated group RepeatedGroup = 80 {
- required string RequiredField = 81;
- };
-
- optional group OptionalGroup = 90 {
- required string RequiredField = 91;
- };
-}
-
-// For testing skipping of unrecognized fields.
-// Numbers are all big, larger than tag numbers in GoTestField,
-// the message used in the corresponding test.
-message GoSkipTest {
- required int32 skip_int32 = 11;
- required fixed32 skip_fixed32 = 12;
- required fixed64 skip_fixed64 = 13;
- required string skip_string = 14;
- required group SkipGroup = 15 {
- required int32 group_int32 = 16;
- required string group_string = 17;
- }
-}
-
-// For testing packed/non-packed decoder switching.
-// A serialized instance of one should be deserializable as the other.
-message NonPackedTest {
- repeated int32 a = 1;
-}
-
-message PackedTest {
- repeated int32 b = 1 [packed=true];
-}
-
-message MaxTag {
- // Maximum possible tag number.
- optional string last_field = 536870911;
-}
-
-message OldMessage {
- message Nested {
- optional string name = 1;
- }
- optional Nested nested = 1;
-
- optional int32 num = 2;
-}
-
-// NewMessage is wire compatible with OldMessage;
-// imagine it as a future version.
-message NewMessage {
- message Nested {
- optional string name = 1;
- optional string food_group = 2;
- }
- optional Nested nested = 1;
-
- // This is an int32 in OldMessage.
- optional int64 num = 2;
-}
-
-// Smaller tests for ASCII formatting.
-
-message InnerMessage {
- required string host = 1;
- optional int32 port = 2 [default=4000];
- optional bool connected = 3;
-}
-
-message OtherMessage {
- optional int64 key = 1;
- optional bytes value = 2;
- optional float weight = 3;
- optional InnerMessage inner = 4;
-}
-
-message MyMessage {
- required int32 count = 1;
- optional string name = 2;
- optional string quote = 3;
- repeated string pet = 4;
- optional InnerMessage inner = 5;
- repeated OtherMessage others = 6;
- repeated InnerMessage rep_inner = 12;
-
- enum Color {
- RED = 0;
- GREEN = 1;
- BLUE = 2;
- };
- optional Color bikeshed = 7;
-
- optional group SomeGroup = 8 {
- optional int32 group_field = 9;
- }
-
- // This field becomes [][]byte in the generated code.
- repeated bytes rep_bytes = 10;
-
- optional double bigfloat = 11;
-
- extensions 100 to max;
-}
-
-message Ext {
- extend MyMessage {
- optional Ext more = 103;
- optional string text = 104;
- optional int32 number = 105;
- }
-
- optional string data = 1;
-}
-
-extend MyMessage {
- repeated string greeting = 106;
-}
-
-message MyMessageSet {
- option message_set_wire_format = true;
- extensions 100 to max;
-}
-
-message Empty {
-}
-
-extend MyMessageSet {
- optional Empty x201 = 201;
- optional Empty x202 = 202;
- optional Empty x203 = 203;
- optional Empty x204 = 204;
- optional Empty x205 = 205;
- optional Empty x206 = 206;
- optional Empty x207 = 207;
- optional Empty x208 = 208;
- optional Empty x209 = 209;
- optional Empty x210 = 210;
- optional Empty x211 = 211;
- optional Empty x212 = 212;
- optional Empty x213 = 213;
- optional Empty x214 = 214;
- optional Empty x215 = 215;
- optional Empty x216 = 216;
- optional Empty x217 = 217;
- optional Empty x218 = 218;
- optional Empty x219 = 219;
- optional Empty x220 = 220;
- optional Empty x221 = 221;
- optional Empty x222 = 222;
- optional Empty x223 = 223;
- optional Empty x224 = 224;
- optional Empty x225 = 225;
- optional Empty x226 = 226;
- optional Empty x227 = 227;
- optional Empty x228 = 228;
- optional Empty x229 = 229;
- optional Empty x230 = 230;
- optional Empty x231 = 231;
- optional Empty x232 = 232;
- optional Empty x233 = 233;
- optional Empty x234 = 234;
- optional Empty x235 = 235;
- optional Empty x236 = 236;
- optional Empty x237 = 237;
- optional Empty x238 = 238;
- optional Empty x239 = 239;
- optional Empty x240 = 240;
- optional Empty x241 = 241;
- optional Empty x242 = 242;
- optional Empty x243 = 243;
- optional Empty x244 = 244;
- optional Empty x245 = 245;
- optional Empty x246 = 246;
- optional Empty x247 = 247;
- optional Empty x248 = 248;
- optional Empty x249 = 249;
- optional Empty x250 = 250;
-}
-
-message MessageList {
- repeated group Message = 1 {
- required string name = 2;
- required int32 count = 3;
- }
-}
-
-message Strings {
- optional string string_field = 1;
- optional bytes bytes_field = 2;
-}
-
-message Defaults {
- enum Color {
- RED = 0;
- GREEN = 1;
- BLUE = 2;
- }
-
- // Default-valued fields of all basic types.
- // Same as GoTest, but copied here to make testing easier.
- optional bool F_Bool = 1 [default=true];
- optional int32 F_Int32 = 2 [default=32];
- optional int64 F_Int64 = 3 [default=64];
- optional fixed32 F_Fixed32 = 4 [default=320];
- optional fixed64 F_Fixed64 = 5 [default=640];
- optional uint32 F_Uint32 = 6 [default=3200];
- optional uint64 F_Uint64 = 7 [default=6400];
- optional float F_Float = 8 [default=314159.];
- optional double F_Double = 9 [default=271828.];
- optional string F_String = 10 [default="hello, \"world!\"\n"];
- optional bytes F_Bytes = 11 [default="Bignose"];
- optional sint32 F_Sint32 = 12 [default=-32];
- optional sint64 F_Sint64 = 13 [default=-64];
- optional Color F_Enum = 14 [default=GREEN];
-
- // More fields with crazy defaults.
- optional float F_Pinf = 15 [default=inf];
- optional float F_Ninf = 16 [default=-inf];
- optional float F_Nan = 17 [default=nan];
-
- // Sub-message.
- optional SubDefaults sub = 18;
-
- // Redundant but explicit defaults.
- optional string str_zero = 19 [default=""];
-}
-
-message SubDefaults {
- optional int64 n = 1 [default=7];
-}
-
-message RepeatedEnum {
- enum Color {
- RED = 1;
- }
- repeated Color color = 1;
-}
-
-message MoreRepeated {
- repeated bool bools = 1;
- repeated bool bools_packed = 2 [packed=true];
- repeated int32 ints = 3;
- repeated int32 ints_packed = 4 [packed=true];
- repeated int64 int64s_packed = 7 [packed=true];
- repeated string strings = 5;
- repeated fixed32 fixeds = 6;
-}
-
-// GroupOld and GroupNew have the same wire format.
-// GroupNew has a new field inside a group.
-
-message GroupOld {
- optional group G = 101 {
- optional int32 x = 2;
- }
-}
-
-message GroupNew {
- optional group G = 101 {
- optional int32 x = 2;
- optional int32 y = 3;
- }
-}
-
-message FloatingPoint {
- required double f = 1;
-}
-
-message MessageWithMap {
- map name_mapping = 1;
- map msg_mapping = 2;
- map byte_mapping = 3;
- map str_to_str = 4;
-}
diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/text.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/text.go
index 720eac4705..81e1bdf386 100644
--- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/text.go
+++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/text.go
@@ -37,11 +37,11 @@ import (
"bufio"
"bytes"
"encoding"
+ "errors"
"fmt"
"io"
"log"
"math"
- "os"
"reflect"
"sort"
"strings"
@@ -246,7 +246,7 @@ func writeStruct(w *textWriter, sv reflect.Value) error {
}
if fv.Kind() == reflect.Map {
// Map fields are rendered as a repeated struct with key/value fields.
- keys := fv.MapKeys() // TODO: should we sort these for deterministic output?
+ keys := fv.MapKeys()
sort.Sort(mapKeys(keys))
for _, key := range keys {
val := fv.MapIndex(key)
@@ -283,20 +283,23 @@ func writeStruct(w *textWriter, sv reflect.Value) error {
if err := w.WriteByte('\n'); err != nil {
return err
}
- // value
- if _, err := w.WriteString("value:"); err != nil {
- return err
- }
- if !w.compact {
- if err := w.WriteByte(' '); err != nil {
+ // nil values aren't legal, but we can avoid panicking because of them.
+ if val.Kind() != reflect.Ptr || !val.IsNil() {
+ // value
+ if _, err := w.WriteString("value:"); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := writeAny(w, val, props.mvalprop); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
return err
}
- }
- if err := writeAny(w, val, props.mvalprop); err != nil {
- return err
- }
- if err := w.WriteByte('\n'); err != nil {
- return err
}
// close struct
w.unindent()
@@ -315,26 +318,34 @@ func writeStruct(w *textWriter, sv reflect.Value) error {
}
if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
// proto3 non-repeated scalar field; skip if zero value
- switch fv.Kind() {
- case reflect.Bool:
- if !fv.Bool() {
+ if isProto3Zero(fv) {
+ continue
+ }
+ }
+
+ if fv.Kind() == reflect.Interface {
+ // Check if it is a oneof.
+ if st.Field(i).Tag.Get("protobuf_oneof") != "" {
+ // fv is nil, or holds a pointer to generated struct.
+ // That generated struct has exactly one field,
+ // which has a protobuf struct tag.
+ if fv.IsNil() {
continue
}
- case reflect.Int32, reflect.Int64:
- if fv.Int() == 0 {
- continue
- }
- case reflect.Uint32, reflect.Uint64:
- if fv.Uint() == 0 {
- continue
- }
- case reflect.Float32, reflect.Float64:
- if fv.Float() == 0 {
- continue
- }
- case reflect.String:
- if fv.String() == "" {
- continue
+ inner := fv.Elem().Elem() // interface -> *T -> T
+ tag := inner.Type().Field(0).Tag.Get("protobuf")
+ props = new(Properties) // Overwrite the outer props var, but not its pointee.
+ props.Parse(tag)
+ // Write the value in the oneof, not the oneof itself.
+ fv = inner.Field(0)
+
+ // Special case to cope with malformed messages gracefully:
+ // If the value in the oneof is a nil pointer, don't panic
+ // in writeAny.
+ if fv.Kind() == reflect.Ptr && fv.IsNil() {
+ // Use errors.New so writeAny won't render quotes.
+ msg := errors.New("/* nil */")
+ fv = reflect.ValueOf(&msg).Elem()
}
}
}
@@ -666,10 +677,7 @@ func writeExtensions(w *textWriter, pv reflect.Value) error {
pb, err := GetExtension(ep, desc)
if err != nil {
- if _, err := fmt.Fprintln(os.Stderr, "proto: failed getting extension: ", err); err != nil {
- return err
- }
- continue
+ return fmt.Errorf("failed getting extension: %v", err)
}
// Repeated extensions will appear as a slice.
diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser.go
index d1caeff5d9..8bce369204 100644
--- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser.go
+++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser.go
@@ -174,7 +174,7 @@ func (p *textParser) advance() {
}
unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
if err != nil {
- p.errorf("invalid quoted string %v", p.s[0:i+1])
+ p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
return
}
p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
@@ -385,8 +385,7 @@ func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSet
}
// Returns the index in the struct for the named field, as well as the parsed tag properties.
-func structFieldByName(st reflect.Type, name string) (int, *Properties, bool) {
- sprops := GetProperties(st)
+func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) {
i, ok := sprops.decoderOrigNames[name]
if ok {
return i, sprops.Prop[i], true
@@ -438,7 +437,8 @@ func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseEr
func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
st := sv.Type()
- reqCount := GetProperties(st).reqCount
+ sprops := GetProperties(st)
+ reqCount := sprops.reqCount
var reqFieldErr error
fieldSet := make(map[string]bool)
// A struct is a sequence of "name: value", terminated by one of
@@ -520,99 +520,113 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
sl = reflect.Append(sl, ext)
SetExtension(ep, desc, sl.Interface())
}
- } else {
- // This is a normal, non-extension field.
- name := tok.value
- fi, props, ok := structFieldByName(st, name)
- if !ok {
- return p.errorf("unknown field name %q in %v", name, st)
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
}
+ continue
+ }
- dst := sv.Field(fi)
+ // This is a normal, non-extension field.
+ name := tok.value
+ var dst reflect.Value
+ fi, props, ok := structFieldByName(sprops, name)
+ if ok {
+ dst = sv.Field(fi)
+ } else if oop, ok := sprops.OneofTypes[name]; ok {
+ // It is a oneof.
+ props = oop.Prop
+ nv := reflect.New(oop.Type.Elem())
+ dst = nv.Elem().Field(0)
+ sv.Field(oop.Field).Set(nv)
+ }
+ if !dst.IsValid() {
+ return p.errorf("unknown field name %q in %v", name, st)
+ }
- if dst.Kind() == reflect.Map {
- // Consume any colon.
- if err := p.checkForColon(props, dst.Type()); err != nil {
- return err
- }
-
- // Construct the map if it doesn't already exist.
- if dst.IsNil() {
- dst.Set(reflect.MakeMap(dst.Type()))
- }
- key := reflect.New(dst.Type().Key()).Elem()
- val := reflect.New(dst.Type().Elem()).Elem()
-
- // The map entry should be this sequence of tokens:
- // < key : KEY value : VALUE >
- // Technically the "key" and "value" could come in any order,
- // but in practice they won't.
-
- tok := p.next()
- var terminator string
- switch tok.value {
- case "<":
- terminator = ">"
- case "{":
- terminator = "}"
- default:
- return p.errorf("expected '{' or '<', found %q", tok.value)
- }
- if err := p.consumeToken("key"); err != nil {
- return err
- }
- if err := p.consumeToken(":"); err != nil {
- return err
- }
- if err := p.readAny(key, props.mkeyprop); err != nil {
- return err
- }
- if err := p.consumeToken("value"); err != nil {
- return err
- }
- if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil {
- return err
- }
- if err := p.readAny(val, props.mvalprop); err != nil {
- return err
- }
- if err := p.consumeToken(terminator); err != nil {
- return err
- }
-
- dst.SetMapIndex(key, val)
- continue
- }
-
- // Check that it's not already set if it's not a repeated field.
- if !props.Repeated && fieldSet[name] {
- return p.errorf("non-repeated field %q was repeated", name)
- }
-
- if err := p.checkForColon(props, st.Field(fi).Type); err != nil {
+ if dst.Kind() == reflect.Map {
+ // Consume any colon.
+ if err := p.checkForColon(props, dst.Type()); err != nil {
return err
}
- // Parse into the field.
- fieldSet[name] = true
- if err := p.readAny(dst, props); err != nil {
- if _, ok := err.(*RequiredNotSetError); !ok {
- return err
- }
- reqFieldErr = err
- } else if props.Required {
- reqCount--
+ // Construct the map if it doesn't already exist.
+ if dst.IsNil() {
+ dst.Set(reflect.MakeMap(dst.Type()))
}
+ key := reflect.New(dst.Type().Key()).Elem()
+ val := reflect.New(dst.Type().Elem()).Elem()
+
+ // The map entry should be this sequence of tokens:
+ // < key : KEY value : VALUE >
+ // Technically the "key" and "value" could come in any order,
+ // but in practice they won't.
+
+ tok := p.next()
+ var terminator string
+ switch tok.value {
+ case "<":
+ terminator = ">"
+ case "{":
+ terminator = "}"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ if err := p.consumeToken("key"); err != nil {
+ return err
+ }
+ if err := p.consumeToken(":"); err != nil {
+ return err
+ }
+ if err := p.readAny(key, props.mkeyprop); err != nil {
+ return err
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ if err := p.consumeToken("value"); err != nil {
+ return err
+ }
+ if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil {
+ return err
+ }
+ if err := p.readAny(val, props.mvalprop); err != nil {
+ return err
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ if err := p.consumeToken(terminator); err != nil {
+ return err
+ }
+
+ dst.SetMapIndex(key, val)
+ continue
}
- // For backward compatibility, permit a semicolon or comma after a field.
- tok = p.next()
- if tok.err != nil {
- return tok.err
+ // Check that it's not already set if it's not a repeated field.
+ if !props.Repeated && fieldSet[name] {
+ return p.errorf("non-repeated field %q was repeated", name)
}
- if tok.value != ";" && tok.value != "," {
- p.back()
+
+ if err := p.checkForColon(props, dst.Type()); err != nil {
+ return err
}
+
+ // Parse into the field.
+ fieldSet[name] = true
+ if err := p.readAny(dst, props); err != nil {
+ if _, ok := err.(*RequiredNotSetError); !ok {
+ return err
+ }
+ reqFieldErr = err
+ } else if props.Required {
+ reqCount--
+ }
+
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+
}
if reqCount > 0 {
@@ -621,6 +635,19 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
return reqFieldErr
}
+// consumeOptionalSeparator consumes an optional semicolon or comma.
+// It is used in readStruct to provide backward compatibility.
+func (p *textParser) consumeOptionalSeparator() error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != ";" && tok.value != "," {
+ p.back()
+ }
+ return nil
+}
+
func (p *textParser) readAny(v reflect.Value, props *Properties) error {
tok := p.next()
if tok.err != nil {
diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser_test.go
deleted file mode 100644
index 2e51da30c9..0000000000
--- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser_test.go
+++ /dev/null
@@ -1,511 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto_test
-
-import (
- "math"
- "reflect"
- "testing"
-
- . "github.com/golang/protobuf/proto"
- proto3pb "github.com/golang/protobuf/proto/proto3_proto"
- . "github.com/golang/protobuf/proto/testdata"
-)
-
-type UnmarshalTextTest struct {
- in string
- err string // if "", no error expected
- out *MyMessage
-}
-
-func buildExtStructTest(text string) UnmarshalTextTest {
- msg := &MyMessage{
- Count: Int32(42),
- }
- SetExtension(msg, E_Ext_More, &Ext{
- Data: String("Hello, world!"),
- })
- return UnmarshalTextTest{in: text, out: msg}
-}
-
-func buildExtDataTest(text string) UnmarshalTextTest {
- msg := &MyMessage{
- Count: Int32(42),
- }
- SetExtension(msg, E_Ext_Text, String("Hello, world!"))
- SetExtension(msg, E_Ext_Number, Int32(1729))
- return UnmarshalTextTest{in: text, out: msg}
-}
-
-func buildExtRepStringTest(text string) UnmarshalTextTest {
- msg := &MyMessage{
- Count: Int32(42),
- }
- if err := SetExtension(msg, E_Greeting, []string{"bula", "hola"}); err != nil {
- panic(err)
- }
- return UnmarshalTextTest{in: text, out: msg}
-}
-
-var unMarshalTextTests = []UnmarshalTextTest{
- // Basic
- {
- in: " count:42\n name:\"Dave\" ",
- out: &MyMessage{
- Count: Int32(42),
- Name: String("Dave"),
- },
- },
-
- // Empty quoted string
- {
- in: `count:42 name:""`,
- out: &MyMessage{
- Count: Int32(42),
- Name: String(""),
- },
- },
-
- // Quoted string concatenation
- {
- in: `count:42 name: "My name is "` + "\n" + `"elsewhere"`,
- out: &MyMessage{
- Count: Int32(42),
- Name: String("My name is elsewhere"),
- },
- },
-
- // Quoted string with escaped apostrophe
- {
- in: `count:42 name: "HOLIDAY - New Year\'s Day"`,
- out: &MyMessage{
- Count: Int32(42),
- Name: String("HOLIDAY - New Year's Day"),
- },
- },
-
- // Quoted string with single quote
- {
- in: `count:42 name: 'Roger "The Ramster" Ramjet'`,
- out: &MyMessage{
- Count: Int32(42),
- Name: String(`Roger "The Ramster" Ramjet`),
- },
- },
-
- // Quoted string with all the accepted special characters from the C++ test
- {
- in: `count:42 name: ` + "\"\\\"A string with \\' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"",
- out: &MyMessage{
- Count: Int32(42),
- Name: String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces"),
- },
- },
-
- // Quoted string with quoted backslash
- {
- in: `count:42 name: "\\'xyz"`,
- out: &MyMessage{
- Count: Int32(42),
- Name: String(`\'xyz`),
- },
- },
-
- // Quoted string with UTF-8 bytes.
- {
- in: "count:42 name: '\303\277\302\201\xAB'",
- out: &MyMessage{
- Count: Int32(42),
- Name: String("\303\277\302\201\xAB"),
- },
- },
-
- // Bad quoted string
- {
- in: `inner: < host: "\0" >` + "\n",
- err: `line 1.15: invalid quoted string "\0"`,
- },
-
- // Number too large for int64
- {
- in: "count: 1 others { key: 123456789012345678901 }",
- err: "line 1.23: invalid int64: 123456789012345678901",
- },
-
- // Number too large for int32
- {
- in: "count: 1234567890123",
- err: "line 1.7: invalid int32: 1234567890123",
- },
-
- // Number in hexadecimal
- {
- in: "count: 0x2beef",
- out: &MyMessage{
- Count: Int32(0x2beef),
- },
- },
-
- // Number in octal
- {
- in: "count: 024601",
- out: &MyMessage{
- Count: Int32(024601),
- },
- },
-
- // Floating point number with "f" suffix
- {
- in: "count: 4 others:< weight: 17.0f >",
- out: &MyMessage{
- Count: Int32(4),
- Others: []*OtherMessage{
- {
- Weight: Float32(17),
- },
- },
- },
- },
-
- // Floating point positive infinity
- {
- in: "count: 4 bigfloat: inf",
- out: &MyMessage{
- Count: Int32(4),
- Bigfloat: Float64(math.Inf(1)),
- },
- },
-
- // Floating point negative infinity
- {
- in: "count: 4 bigfloat: -inf",
- out: &MyMessage{
- Count: Int32(4),
- Bigfloat: Float64(math.Inf(-1)),
- },
- },
-
- // Number too large for float32
- {
- in: "others:< weight: 12345678901234567890123456789012345678901234567890 >",
- err: "line 1.17: invalid float32: 12345678901234567890123456789012345678901234567890",
- },
-
- // Number posing as a quoted string
- {
- in: `inner: < host: 12 >` + "\n",
- err: `line 1.15: invalid string: 12`,
- },
-
- // Quoted string posing as int32
- {
- in: `count: "12"`,
- err: `line 1.7: invalid int32: "12"`,
- },
-
- // Quoted string posing a float32
- {
- in: `others:< weight: "17.4" >`,
- err: `line 1.17: invalid float32: "17.4"`,
- },
-
- // Enum
- {
- in: `count:42 bikeshed: BLUE`,
- out: &MyMessage{
- Count: Int32(42),
- Bikeshed: MyMessage_BLUE.Enum(),
- },
- },
-
- // Repeated field
- {
- in: `count:42 pet: "horsey" pet:"bunny"`,
- out: &MyMessage{
- Count: Int32(42),
- Pet: []string{"horsey", "bunny"},
- },
- },
-
- // Repeated message with/without colon and <>/{}
- {
- in: `count:42 others:{} others{} others:<> others:{}`,
- out: &MyMessage{
- Count: Int32(42),
- Others: []*OtherMessage{
- {},
- {},
- {},
- {},
- },
- },
- },
-
- // Missing colon for inner message
- {
- in: `count:42 inner < host: "cauchy.syd" >`,
- out: &MyMessage{
- Count: Int32(42),
- Inner: &InnerMessage{
- Host: String("cauchy.syd"),
- },
- },
- },
-
- // Missing colon for string field
- {
- in: `name "Dave"`,
- err: `line 1.5: expected ':', found "\"Dave\""`,
- },
-
- // Missing colon for int32 field
- {
- in: `count 42`,
- err: `line 1.6: expected ':', found "42"`,
- },
-
- // Missing required field
- {
- in: `name: "Pawel"`,
- err: `proto: required field "testdata.MyMessage.count" not set`,
- out: &MyMessage{
- Name: String("Pawel"),
- },
- },
-
- // Repeated non-repeated field
- {
- in: `name: "Rob" name: "Russ"`,
- err: `line 1.12: non-repeated field "name" was repeated`,
- },
-
- // Group
- {
- in: `count: 17 SomeGroup { group_field: 12 }`,
- out: &MyMessage{
- Count: Int32(17),
- Somegroup: &MyMessage_SomeGroup{
- GroupField: Int32(12),
- },
- },
- },
-
- // Semicolon between fields
- {
- in: `count:3;name:"Calvin"`,
- out: &MyMessage{
- Count: Int32(3),
- Name: String("Calvin"),
- },
- },
- // Comma between fields
- {
- in: `count:4,name:"Ezekiel"`,
- out: &MyMessage{
- Count: Int32(4),
- Name: String("Ezekiel"),
- },
- },
-
- // Extension
- buildExtStructTest(`count: 42 [testdata.Ext.more]:`),
- buildExtStructTest(`count: 42 [testdata.Ext.more] {data:"Hello, world!"}`),
- buildExtDataTest(`count: 42 [testdata.Ext.text]:"Hello, world!" [testdata.Ext.number]:1729`),
- buildExtRepStringTest(`count: 42 [testdata.greeting]:"bula" [testdata.greeting]:"hola"`),
-
- // Big all-in-one
- {
- in: "count:42 # Meaning\n" +
- `name:"Dave" ` +
- `quote:"\"I didn't want to go.\"" ` +
- `pet:"bunny" ` +
- `pet:"kitty" ` +
- `pet:"horsey" ` +
- `inner:<` +
- ` host:"footrest.syd" ` +
- ` port:7001 ` +
- ` connected:true ` +
- `> ` +
- `others:<` +
- ` key:3735928559 ` +
- ` value:"\x01A\a\f" ` +
- `> ` +
- `others:<` +
- " weight:58.9 # Atomic weight of Co\n" +
- ` inner:<` +
- ` host:"lesha.mtv" ` +
- ` port:8002 ` +
- ` >` +
- `>`,
- out: &MyMessage{
- Count: Int32(42),
- Name: String("Dave"),
- Quote: String(`"I didn't want to go."`),
- Pet: []string{"bunny", "kitty", "horsey"},
- Inner: &InnerMessage{
- Host: String("footrest.syd"),
- Port: Int32(7001),
- Connected: Bool(true),
- },
- Others: []*OtherMessage{
- {
- Key: Int64(3735928559),
- Value: []byte{0x1, 'A', '\a', '\f'},
- },
- {
- Weight: Float32(58.9),
- Inner: &InnerMessage{
- Host: String("lesha.mtv"),
- Port: Int32(8002),
- },
- },
- },
- },
- },
-}
-
-func TestUnmarshalText(t *testing.T) {
- for i, test := range unMarshalTextTests {
- pb := new(MyMessage)
- err := UnmarshalText(test.in, pb)
- if test.err == "" {
- // We don't expect failure.
- if err != nil {
- t.Errorf("Test %d: Unexpected error: %v", i, err)
- } else if !reflect.DeepEqual(pb, test.out) {
- t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v",
- i, pb, test.out)
- }
- } else {
- // We do expect failure.
- if err == nil {
- t.Errorf("Test %d: Didn't get expected error: %v", i, test.err)
- } else if err.Error() != test.err {
- t.Errorf("Test %d: Incorrect error.\nHave: %v\nWant: %v",
- i, err.Error(), test.err)
- } else if _, ok := err.(*RequiredNotSetError); ok && test.out != nil && !reflect.DeepEqual(pb, test.out) {
- t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v",
- i, pb, test.out)
- }
- }
- }
-}
-
-func TestUnmarshalTextCustomMessage(t *testing.T) {
- msg := &textMessage{}
- if err := UnmarshalText("custom", msg); err != nil {
- t.Errorf("Unexpected error from custom unmarshal: %v", err)
- }
- if UnmarshalText("not custom", msg) == nil {
- t.Errorf("Didn't get expected error from custom unmarshal")
- }
-}
-
-// Regression test; this caused a panic.
-func TestRepeatedEnum(t *testing.T) {
- pb := new(RepeatedEnum)
- if err := UnmarshalText("color: RED", pb); err != nil {
- t.Fatal(err)
- }
- exp := &RepeatedEnum{
- Color: []RepeatedEnum_Color{RepeatedEnum_RED},
- }
- if !Equal(pb, exp) {
- t.Errorf("Incorrect populated \nHave: %v\nWant: %v", pb, exp)
- }
-}
-
-func TestProto3TextParsing(t *testing.T) {
- m := new(proto3pb.Message)
- const in = `name: "Wallace" true_scotsman: true`
- want := &proto3pb.Message{
- Name: "Wallace",
- TrueScotsman: true,
- }
- if err := UnmarshalText(in, m); err != nil {
- t.Fatal(err)
- }
- if !Equal(m, want) {
- t.Errorf("\n got %v\nwant %v", m, want)
- }
-}
-
-func TestMapParsing(t *testing.T) {
- m := new(MessageWithMap)
- const in = `name_mapping: name_mapping:` +
- `msg_mapping:>` +
- `msg_mapping>` + // no colon after "value"
- `byte_mapping:`
- want := &MessageWithMap{
- NameMapping: map[int32]string{
- 1: "Beatles",
- 1234: "Feist",
- },
- MsgMapping: map[int64]*FloatingPoint{
- -4: {F: Float64(2.0)},
- -2: {F: Float64(4.0)},
- },
- ByteMapping: map[bool][]byte{
- true: []byte("so be it"),
- },
- }
- if err := UnmarshalText(in, m); err != nil {
- t.Fatal(err)
- }
- if !Equal(m, want) {
- t.Errorf("\n got %v\nwant %v", m, want)
- }
-}
-
-var benchInput string
-
-func init() {
- benchInput = "count: 4\n"
- for i := 0; i < 1000; i++ {
- benchInput += "pet: \"fido\"\n"
- }
-
- // Check it is valid input.
- pb := new(MyMessage)
- err := UnmarshalText(benchInput, pb)
- if err != nil {
- panic("Bad benchmark input: " + err.Error())
- }
-}
-
-func BenchmarkUnmarshalText(b *testing.B) {
- pb := new(MyMessage)
- for i := 0; i < b.N; i++ {
- UnmarshalText(benchInput, pb)
- }
- b.SetBytes(int64(len(benchInput)))
-}
diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_test.go
deleted file mode 100644
index 03e9a98ce9..0000000000
--- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_test.go
+++ /dev/null
@@ -1,436 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto_test
-
-import (
- "bytes"
- "errors"
- "io/ioutil"
- "math"
- "strings"
- "testing"
-
- "github.com/golang/protobuf/proto"
-
- proto3pb "github.com/golang/protobuf/proto/proto3_proto"
- pb "github.com/golang/protobuf/proto/testdata"
-)
-
-// textMessage implements the methods that allow it to marshal and unmarshal
-// itself as text.
-type textMessage struct {
-}
-
-func (*textMessage) MarshalText() ([]byte, error) {
- return []byte("custom"), nil
-}
-
-func (*textMessage) UnmarshalText(bytes []byte) error {
- if string(bytes) != "custom" {
- return errors.New("expected 'custom'")
- }
- return nil
-}
-
-func (*textMessage) Reset() {}
-func (*textMessage) String() string { return "" }
-func (*textMessage) ProtoMessage() {}
-
-func newTestMessage() *pb.MyMessage {
- msg := &pb.MyMessage{
- Count: proto.Int32(42),
- Name: proto.String("Dave"),
- Quote: proto.String(`"I didn't want to go."`),
- Pet: []string{"bunny", "kitty", "horsey"},
- Inner: &pb.InnerMessage{
- Host: proto.String("footrest.syd"),
- Port: proto.Int32(7001),
- Connected: proto.Bool(true),
- },
- Others: []*pb.OtherMessage{
- {
- Key: proto.Int64(0xdeadbeef),
- Value: []byte{1, 65, 7, 12},
- },
- {
- Weight: proto.Float32(6.022),
- Inner: &pb.InnerMessage{
- Host: proto.String("lesha.mtv"),
- Port: proto.Int32(8002),
- },
- },
- },
- Bikeshed: pb.MyMessage_BLUE.Enum(),
- Somegroup: &pb.MyMessage_SomeGroup{
- GroupField: proto.Int32(8),
- },
- // One normally wouldn't do this.
- // This is an undeclared tag 13, as a varint (wire type 0) with value 4.
- XXX_unrecognized: []byte{13<<3 | 0, 4},
- }
- ext := &pb.Ext{
- Data: proto.String("Big gobs for big rats"),
- }
- if err := proto.SetExtension(msg, pb.E_Ext_More, ext); err != nil {
- panic(err)
- }
- greetings := []string{"adg", "easy", "cow"}
- if err := proto.SetExtension(msg, pb.E_Greeting, greetings); err != nil {
- panic(err)
- }
-
- // Add an unknown extension. We marshal a pb.Ext, and fake the ID.
- b, err := proto.Marshal(&pb.Ext{Data: proto.String("3G skiing")})
- if err != nil {
- panic(err)
- }
- b = append(proto.EncodeVarint(201<<3|proto.WireBytes), b...)
- proto.SetRawExtension(msg, 201, b)
-
- // Extensions can be plain fields, too, so let's test that.
- b = append(proto.EncodeVarint(202<<3|proto.WireVarint), 19)
- proto.SetRawExtension(msg, 202, b)
-
- return msg
-}
-
-const text = `count: 42
-name: "Dave"
-quote: "\"I didn't want to go.\""
-pet: "bunny"
-pet: "kitty"
-pet: "horsey"
-inner: <
- host: "footrest.syd"
- port: 7001
- connected: true
->
-others: <
- key: 3735928559
- value: "\001A\007\014"
->
-others: <
- weight: 6.022
- inner: <
- host: "lesha.mtv"
- port: 8002
- >
->
-bikeshed: BLUE
-SomeGroup {
- group_field: 8
-}
-/* 2 unknown bytes */
-13: 4
-[testdata.Ext.more]: <
- data: "Big gobs for big rats"
->
-[testdata.greeting]: "adg"
-[testdata.greeting]: "easy"
-[testdata.greeting]: "cow"
-/* 13 unknown bytes */
-201: "\t3G skiing"
-/* 3 unknown bytes */
-202: 19
-`
-
-func TestMarshalText(t *testing.T) {
- buf := new(bytes.Buffer)
- if err := proto.MarshalText(buf, newTestMessage()); err != nil {
- t.Fatalf("proto.MarshalText: %v", err)
- }
- s := buf.String()
- if s != text {
- t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", s, text)
- }
-}
-
-func TestMarshalTextCustomMessage(t *testing.T) {
- buf := new(bytes.Buffer)
- if err := proto.MarshalText(buf, &textMessage{}); err != nil {
- t.Fatalf("proto.MarshalText: %v", err)
- }
- s := buf.String()
- if s != "custom" {
- t.Errorf("Got %q, expected %q", s, "custom")
- }
-}
-func TestMarshalTextNil(t *testing.T) {
- want := ""
- tests := []proto.Message{nil, (*pb.MyMessage)(nil)}
- for i, test := range tests {
- buf := new(bytes.Buffer)
- if err := proto.MarshalText(buf, test); err != nil {
- t.Fatal(err)
- }
- if got := buf.String(); got != want {
- t.Errorf("%d: got %q want %q", i, got, want)
- }
- }
-}
-
-func TestMarshalTextUnknownEnum(t *testing.T) {
- // The Color enum only specifies values 0-2.
- m := &pb.MyMessage{Bikeshed: pb.MyMessage_Color(3).Enum()}
- got := m.String()
- const want = `bikeshed:3 `
- if got != want {
- t.Errorf("\n got %q\nwant %q", got, want)
- }
-}
-
-func BenchmarkMarshalTextBuffered(b *testing.B) {
- buf := new(bytes.Buffer)
- m := newTestMessage()
- for i := 0; i < b.N; i++ {
- buf.Reset()
- proto.MarshalText(buf, m)
- }
-}
-
-func BenchmarkMarshalTextUnbuffered(b *testing.B) {
- w := ioutil.Discard
- m := newTestMessage()
- for i := 0; i < b.N; i++ {
- proto.MarshalText(w, m)
- }
-}
-
-func compact(src string) string {
- // s/[ \n]+/ /g; s/ $//;
- dst := make([]byte, len(src))
- space, comment := false, false
- j := 0
- for i := 0; i < len(src); i++ {
- if strings.HasPrefix(src[i:], "/*") {
- comment = true
- i++
- continue
- }
- if comment && strings.HasPrefix(src[i:], "*/") {
- comment = false
- i++
- continue
- }
- if comment {
- continue
- }
- c := src[i]
- if c == ' ' || c == '\n' {
- space = true
- continue
- }
- if j > 0 && (dst[j-1] == ':' || dst[j-1] == '<' || dst[j-1] == '{') {
- space = false
- }
- if c == '{' {
- space = false
- }
- if space {
- dst[j] = ' '
- j++
- space = false
- }
- dst[j] = c
- j++
- }
- if space {
- dst[j] = ' '
- j++
- }
- return string(dst[0:j])
-}
-
-var compactText = compact(text)
-
-func TestCompactText(t *testing.T) {
- s := proto.CompactTextString(newTestMessage())
- if s != compactText {
- t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v\n===\n", s, compactText)
- }
-}
-
-func TestStringEscaping(t *testing.T) {
- testCases := []struct {
- in *pb.Strings
- out string
- }{
- {
- // Test data from C++ test (TextFormatTest.StringEscape).
- // Single divergence: we don't escape apostrophes.
- &pb.Strings{StringField: proto.String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces")},
- "string_field: \"\\\"A string with ' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"\n",
- },
- {
- // Test data from the same C++ test.
- &pb.Strings{StringField: proto.String("\350\260\267\346\255\214")},
- "string_field: \"\\350\\260\\267\\346\\255\\214\"\n",
- },
- {
- // Some UTF-8.
- &pb.Strings{StringField: proto.String("\x00\x01\xff\x81")},
- `string_field: "\000\001\377\201"` + "\n",
- },
- }
-
- for i, tc := range testCases {
- var buf bytes.Buffer
- if err := proto.MarshalText(&buf, tc.in); err != nil {
- t.Errorf("proto.MarsalText: %v", err)
- continue
- }
- s := buf.String()
- if s != tc.out {
- t.Errorf("#%d: Got:\n%s\nExpected:\n%s\n", i, s, tc.out)
- continue
- }
-
- // Check round-trip.
- pb := new(pb.Strings)
- if err := proto.UnmarshalText(s, pb); err != nil {
- t.Errorf("#%d: UnmarshalText: %v", i, err)
- continue
- }
- if !proto.Equal(pb, tc.in) {
- t.Errorf("#%d: Round-trip failed:\nstart: %v\n end: %v", i, tc.in, pb)
- }
- }
-}
-
-// A limitedWriter accepts some output before it fails.
-// This is a proxy for something like a nearly-full or imminently-failing disk,
-// or a network connection that is about to die.
-type limitedWriter struct {
- b bytes.Buffer
- limit int
-}
-
-var outOfSpace = errors.New("proto: insufficient space")
-
-func (w *limitedWriter) Write(p []byte) (n int, err error) {
- var avail = w.limit - w.b.Len()
- if avail <= 0 {
- return 0, outOfSpace
- }
- if len(p) <= avail {
- return w.b.Write(p)
- }
- n, _ = w.b.Write(p[:avail])
- return n, outOfSpace
-}
-
-func TestMarshalTextFailing(t *testing.T) {
- // Try lots of different sizes to exercise more error code-paths.
- for lim := 0; lim < len(text); lim++ {
- buf := new(limitedWriter)
- buf.limit = lim
- err := proto.MarshalText(buf, newTestMessage())
- // We expect a certain error, but also some partial results in the buffer.
- if err != outOfSpace {
- t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", err, outOfSpace)
- }
- s := buf.b.String()
- x := text[:buf.limit]
- if s != x {
- t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", s, x)
- }
- }
-}
-
-func TestFloats(t *testing.T) {
- tests := []struct {
- f float64
- want string
- }{
- {0, "0"},
- {4.7, "4.7"},
- {math.Inf(1), "inf"},
- {math.Inf(-1), "-inf"},
- {math.NaN(), "nan"},
- }
- for _, test := range tests {
- msg := &pb.FloatingPoint{F: &test.f}
- got := strings.TrimSpace(msg.String())
- want := `f:` + test.want
- if got != want {
- t.Errorf("f=%f: got %q, want %q", test.f, got, want)
- }
- }
-}
-
-func TestRepeatedNilText(t *testing.T) {
- m := &pb.MessageList{
- Message: []*pb.MessageList_Message{
- nil,
- &pb.MessageList_Message{
- Name: proto.String("Horse"),
- },
- nil,
- },
- }
- want := `Message
-Message {
- name: "Horse"
-}
-Message
-`
- if s := proto.MarshalTextString(m); s != want {
- t.Errorf(" got: %s\nwant: %s", s, want)
- }
-}
-
-func TestProto3Text(t *testing.T) {
- tests := []struct {
- m proto.Message
- want string
- }{
- // zero message
- {&proto3pb.Message{}, ``},
- // zero message except for an empty byte slice
- {&proto3pb.Message{Data: []byte{}}, ``},
- // trivial case
- {&proto3pb.Message{Name: "Rob", HeightInCm: 175}, `name:"Rob" height_in_cm:175`},
- // empty map
- {&pb.MessageWithMap{}, ``},
- // non-empty map; current map format is the same as a repeated struct
- {
- &pb.MessageWithMap{NameMapping: map[int32]string{1234: "Feist"}},
- `name_mapping:`,
- },
- }
- for _, test := range tests {
- got := strings.TrimSpace(test.m.String())
- if got != test.want {
- t.Errorf("\n got %s\nwant %s", got, test.want)
- }
- }
-}
diff --git a/Godeps/_workspace/src/golang.org/x/net/context/context.go b/Godeps/_workspace/src/golang.org/x/net/context/context.go
index 66aff7cb4a..ef2f3e86fe 100644
--- a/Godeps/_workspace/src/golang.org/x/net/context/context.go
+++ b/Godeps/_workspace/src/golang.org/x/net/context/context.go
@@ -64,18 +64,21 @@ type Context interface {
//
// Done is provided for use in select statements:
//
- // // DoSomething calls DoSomethingSlow and returns as soon as
- // // it returns or ctx.Done is closed.
- // func DoSomething(ctx context.Context) (Result, error) {
- // c := make(chan Result, 1)
- // go func() { c <- DoSomethingSlow(ctx) }()
- // select {
- // case res := <-c:
- // return res, nil
- // case <-ctx.Done():
- // return nil, ctx.Err()
- // }
- // }
+ // // Stream generates values with DoSomething and sends them to out
+ // // until DoSomething returns an error or ctx.Done is closed.
+ // func Stream(ctx context.Context, out <-chan Value) error {
+ // for {
+ // v, err := DoSomething(ctx)
+ // if err != nil {
+ // return err
+ // }
+ // select {
+ // case <-ctx.Done():
+ // return ctx.Err()
+ // case out <- v:
+ // }
+ // }
+ // }
//
// See http://blog.golang.org/pipelines for more examples of how to use
// a Done channel for cancelation.
@@ -202,6 +205,9 @@ type CancelFunc func()
// WithCancel returns a copy of parent with a new Done channel. The returned
// context's Done channel is closed when the returned cancel function is called
// or when the parent context's Done channel is closed, whichever happens first.
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete.
func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
c := newCancelCtx(parent)
propagateCancel(parent, &c)
@@ -262,6 +268,19 @@ func parentCancelCtx(parent Context) (*cancelCtx, bool) {
}
}
+// removeChild removes a context from its parent.
+func removeChild(parent Context, child canceler) {
+ p, ok := parentCancelCtx(parent)
+ if !ok {
+ return
+ }
+ p.mu.Lock()
+ if p.children != nil {
+ delete(p.children, child)
+ }
+ p.mu.Unlock()
+}
+
// A canceler is a context type that can be canceled directly. The
// implementations are *cancelCtx and *timerCtx.
type canceler interface {
@@ -316,13 +335,7 @@ func (c *cancelCtx) cancel(removeFromParent bool, err error) {
c.mu.Unlock()
if removeFromParent {
- if p, ok := parentCancelCtx(c.Context); ok {
- p.mu.Lock()
- if p.children != nil {
- delete(p.children, c)
- }
- p.mu.Unlock()
- }
+ removeChild(c.Context, c)
}
}
@@ -333,9 +346,8 @@ func (c *cancelCtx) cancel(removeFromParent bool, err error) {
// cancel function is called, or when the parent context's Done channel is
// closed, whichever happens first.
//
-// Canceling this context releases resources associated with the deadline
-// timer, so code should call cancel as soon as the operations running in this
-// Context complete.
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete.
func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
if cur, ok := parent.Deadline(); ok && cur.Before(deadline) {
// The current deadline is already sooner than the new one.
@@ -380,7 +392,11 @@ func (c *timerCtx) String() string {
}
func (c *timerCtx) cancel(removeFromParent bool, err error) {
- c.cancelCtx.cancel(removeFromParent, err)
+ c.cancelCtx.cancel(false, err)
+ if removeFromParent {
+ // Remove this timerCtx from its parent cancelCtx's children.
+ removeChild(c.cancelCtx.Context, c)
+ }
c.mu.Lock()
if c.timer != nil {
c.timer.Stop()
@@ -391,9 +407,8 @@ func (c *timerCtx) cancel(removeFromParent bool, err error) {
// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
//
-// Canceling this context releases resources associated with the deadline
-// timer, so code should call cancel as soon as the operations running in this
-// Context complete:
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete:
//
// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
diff --git a/Godeps/_workspace/src/golang.org/x/net/context/context_test.go b/Godeps/_workspace/src/golang.org/x/net/context/context_test.go
deleted file mode 100644
index 82d2494a49..0000000000
--- a/Godeps/_workspace/src/golang.org/x/net/context/context_test.go
+++ /dev/null
@@ -1,553 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package context
-
-import (
- "fmt"
- "math/rand"
- "runtime"
- "strings"
- "sync"
- "testing"
- "time"
-)
-
-// otherContext is a Context that's not one of the types defined in context.go.
-// This lets us test code paths that differ based on the underlying type of the
-// Context.
-type otherContext struct {
- Context
-}
-
-func TestBackground(t *testing.T) {
- c := Background()
- if c == nil {
- t.Fatalf("Background returned nil")
- }
- select {
- case x := <-c.Done():
- t.Errorf("<-c.Done() == %v want nothing (it should block)", x)
- default:
- }
- if got, want := fmt.Sprint(c), "context.Background"; got != want {
- t.Errorf("Background().String() = %q want %q", got, want)
- }
-}
-
-func TestTODO(t *testing.T) {
- c := TODO()
- if c == nil {
- t.Fatalf("TODO returned nil")
- }
- select {
- case x := <-c.Done():
- t.Errorf("<-c.Done() == %v want nothing (it should block)", x)
- default:
- }
- if got, want := fmt.Sprint(c), "context.TODO"; got != want {
- t.Errorf("TODO().String() = %q want %q", got, want)
- }
-}
-
-func TestWithCancel(t *testing.T) {
- c1, cancel := WithCancel(Background())
-
- if got, want := fmt.Sprint(c1), "context.Background.WithCancel"; got != want {
- t.Errorf("c1.String() = %q want %q", got, want)
- }
-
- o := otherContext{c1}
- c2, _ := WithCancel(o)
- contexts := []Context{c1, o, c2}
-
- for i, c := range contexts {
- if d := c.Done(); d == nil {
- t.Errorf("c[%d].Done() == %v want non-nil", i, d)
- }
- if e := c.Err(); e != nil {
- t.Errorf("c[%d].Err() == %v want nil", i, e)
- }
-
- select {
- case x := <-c.Done():
- t.Errorf("<-c.Done() == %v want nothing (it should block)", x)
- default:
- }
- }
-
- cancel()
- time.Sleep(100 * time.Millisecond) // let cancelation propagate
-
- for i, c := range contexts {
- select {
- case <-c.Done():
- default:
- t.Errorf("<-c[%d].Done() blocked, but shouldn't have", i)
- }
- if e := c.Err(); e != Canceled {
- t.Errorf("c[%d].Err() == %v want %v", i, e, Canceled)
- }
- }
-}
-
-func TestParentFinishesChild(t *testing.T) {
- // Context tree:
- // parent -> cancelChild
- // parent -> valueChild -> timerChild
- parent, cancel := WithCancel(Background())
- cancelChild, stop := WithCancel(parent)
- defer stop()
- valueChild := WithValue(parent, "key", "value")
- timerChild, stop := WithTimeout(valueChild, 10000*time.Hour)
- defer stop()
-
- select {
- case x := <-parent.Done():
- t.Errorf("<-parent.Done() == %v want nothing (it should block)", x)
- case x := <-cancelChild.Done():
- t.Errorf("<-cancelChild.Done() == %v want nothing (it should block)", x)
- case x := <-timerChild.Done():
- t.Errorf("<-timerChild.Done() == %v want nothing (it should block)", x)
- case x := <-valueChild.Done():
- t.Errorf("<-valueChild.Done() == %v want nothing (it should block)", x)
- default:
- }
-
- // The parent's children should contain the two cancelable children.
- pc := parent.(*cancelCtx)
- cc := cancelChild.(*cancelCtx)
- tc := timerChild.(*timerCtx)
- pc.mu.Lock()
- if len(pc.children) != 2 || !pc.children[cc] || !pc.children[tc] {
- t.Errorf("bad linkage: pc.children = %v, want %v and %v",
- pc.children, cc, tc)
- }
- pc.mu.Unlock()
-
- if p, ok := parentCancelCtx(cc.Context); !ok || p != pc {
- t.Errorf("bad linkage: parentCancelCtx(cancelChild.Context) = %v, %v want %v, true", p, ok, pc)
- }
- if p, ok := parentCancelCtx(tc.Context); !ok || p != pc {
- t.Errorf("bad linkage: parentCancelCtx(timerChild.Context) = %v, %v want %v, true", p, ok, pc)
- }
-
- cancel()
-
- pc.mu.Lock()
- if len(pc.children) != 0 {
- t.Errorf("pc.cancel didn't clear pc.children = %v", pc.children)
- }
- pc.mu.Unlock()
-
- // parent and children should all be finished.
- check := func(ctx Context, name string) {
- select {
- case <-ctx.Done():
- default:
- t.Errorf("<-%s.Done() blocked, but shouldn't have", name)
- }
- if e := ctx.Err(); e != Canceled {
- t.Errorf("%s.Err() == %v want %v", name, e, Canceled)
- }
- }
- check(parent, "parent")
- check(cancelChild, "cancelChild")
- check(valueChild, "valueChild")
- check(timerChild, "timerChild")
-
- // WithCancel should return a canceled context on a canceled parent.
- precanceledChild := WithValue(parent, "key", "value")
- select {
- case <-precanceledChild.Done():
- default:
- t.Errorf("<-precanceledChild.Done() blocked, but shouldn't have")
- }
- if e := precanceledChild.Err(); e != Canceled {
- t.Errorf("precanceledChild.Err() == %v want %v", e, Canceled)
- }
-}
-
-func TestChildFinishesFirst(t *testing.T) {
- cancelable, stop := WithCancel(Background())
- defer stop()
- for _, parent := range []Context{Background(), cancelable} {
- child, cancel := WithCancel(parent)
-
- select {
- case x := <-parent.Done():
- t.Errorf("<-parent.Done() == %v want nothing (it should block)", x)
- case x := <-child.Done():
- t.Errorf("<-child.Done() == %v want nothing (it should block)", x)
- default:
- }
-
- cc := child.(*cancelCtx)
- pc, pcok := parent.(*cancelCtx) // pcok == false when parent == Background()
- if p, ok := parentCancelCtx(cc.Context); ok != pcok || (ok && pc != p) {
- t.Errorf("bad linkage: parentCancelCtx(cc.Context) = %v, %v want %v, %v", p, ok, pc, pcok)
- }
-
- if pcok {
- pc.mu.Lock()
- if len(pc.children) != 1 || !pc.children[cc] {
- t.Errorf("bad linkage: pc.children = %v, cc = %v", pc.children, cc)
- }
- pc.mu.Unlock()
- }
-
- cancel()
-
- if pcok {
- pc.mu.Lock()
- if len(pc.children) != 0 {
- t.Errorf("child's cancel didn't remove self from pc.children = %v", pc.children)
- }
- pc.mu.Unlock()
- }
-
- // child should be finished.
- select {
- case <-child.Done():
- default:
- t.Errorf("<-child.Done() blocked, but shouldn't have")
- }
- if e := child.Err(); e != Canceled {
- t.Errorf("child.Err() == %v want %v", e, Canceled)
- }
-
- // parent should not be finished.
- select {
- case x := <-parent.Done():
- t.Errorf("<-parent.Done() == %v want nothing (it should block)", x)
- default:
- }
- if e := parent.Err(); e != nil {
- t.Errorf("parent.Err() == %v want nil", e)
- }
- }
-}
-
-func testDeadline(c Context, wait time.Duration, t *testing.T) {
- select {
- case <-time.After(wait):
- t.Fatalf("context should have timed out")
- case <-c.Done():
- }
- if e := c.Err(); e != DeadlineExceeded {
- t.Errorf("c.Err() == %v want %v", e, DeadlineExceeded)
- }
-}
-
-func TestDeadline(t *testing.T) {
- c, _ := WithDeadline(Background(), time.Now().Add(100*time.Millisecond))
- if got, prefix := fmt.Sprint(c), "context.Background.WithDeadline("; !strings.HasPrefix(got, prefix) {
- t.Errorf("c.String() = %q want prefix %q", got, prefix)
- }
- testDeadline(c, 200*time.Millisecond, t)
-
- c, _ = WithDeadline(Background(), time.Now().Add(100*time.Millisecond))
- o := otherContext{c}
- testDeadline(o, 200*time.Millisecond, t)
-
- c, _ = WithDeadline(Background(), time.Now().Add(100*time.Millisecond))
- o = otherContext{c}
- c, _ = WithDeadline(o, time.Now().Add(300*time.Millisecond))
- testDeadline(c, 200*time.Millisecond, t)
-}
-
-func TestTimeout(t *testing.T) {
- c, _ := WithTimeout(Background(), 100*time.Millisecond)
- if got, prefix := fmt.Sprint(c), "context.Background.WithDeadline("; !strings.HasPrefix(got, prefix) {
- t.Errorf("c.String() = %q want prefix %q", got, prefix)
- }
- testDeadline(c, 200*time.Millisecond, t)
-
- c, _ = WithTimeout(Background(), 100*time.Millisecond)
- o := otherContext{c}
- testDeadline(o, 200*time.Millisecond, t)
-
- c, _ = WithTimeout(Background(), 100*time.Millisecond)
- o = otherContext{c}
- c, _ = WithTimeout(o, 300*time.Millisecond)
- testDeadline(c, 200*time.Millisecond, t)
-}
-
-func TestCanceledTimeout(t *testing.T) {
- c, _ := WithTimeout(Background(), 200*time.Millisecond)
- o := otherContext{c}
- c, cancel := WithTimeout(o, 400*time.Millisecond)
- cancel()
- time.Sleep(100 * time.Millisecond) // let cancelation propagate
- select {
- case <-c.Done():
- default:
- t.Errorf("<-c.Done() blocked, but shouldn't have")
- }
- if e := c.Err(); e != Canceled {
- t.Errorf("c.Err() == %v want %v", e, Canceled)
- }
-}
-
-type key1 int
-type key2 int
-
-var k1 = key1(1)
-var k2 = key2(1) // same int as k1, different type
-var k3 = key2(3) // same type as k2, different int
-
-func TestValues(t *testing.T) {
- check := func(c Context, nm, v1, v2, v3 string) {
- if v, ok := c.Value(k1).(string); ok == (len(v1) == 0) || v != v1 {
- t.Errorf(`%s.Value(k1).(string) = %q, %t want %q, %t`, nm, v, ok, v1, len(v1) != 0)
- }
- if v, ok := c.Value(k2).(string); ok == (len(v2) == 0) || v != v2 {
- t.Errorf(`%s.Value(k2).(string) = %q, %t want %q, %t`, nm, v, ok, v2, len(v2) != 0)
- }
- if v, ok := c.Value(k3).(string); ok == (len(v3) == 0) || v != v3 {
- t.Errorf(`%s.Value(k3).(string) = %q, %t want %q, %t`, nm, v, ok, v3, len(v3) != 0)
- }
- }
-
- c0 := Background()
- check(c0, "c0", "", "", "")
-
- c1 := WithValue(Background(), k1, "c1k1")
- check(c1, "c1", "c1k1", "", "")
-
- if got, want := fmt.Sprint(c1), `context.Background.WithValue(1, "c1k1")`; got != want {
- t.Errorf("c.String() = %q want %q", got, want)
- }
-
- c2 := WithValue(c1, k2, "c2k2")
- check(c2, "c2", "c1k1", "c2k2", "")
-
- c3 := WithValue(c2, k3, "c3k3")
- check(c3, "c2", "c1k1", "c2k2", "c3k3")
-
- c4 := WithValue(c3, k1, nil)
- check(c4, "c4", "", "c2k2", "c3k3")
-
- o0 := otherContext{Background()}
- check(o0, "o0", "", "", "")
-
- o1 := otherContext{WithValue(Background(), k1, "c1k1")}
- check(o1, "o1", "c1k1", "", "")
-
- o2 := WithValue(o1, k2, "o2k2")
- check(o2, "o2", "c1k1", "o2k2", "")
-
- o3 := otherContext{c4}
- check(o3, "o3", "", "c2k2", "c3k3")
-
- o4 := WithValue(o3, k3, nil)
- check(o4, "o4", "", "c2k2", "")
-}
-
-func TestAllocs(t *testing.T) {
- bg := Background()
- for _, test := range []struct {
- desc string
- f func()
- limit float64
- gccgoLimit float64
- }{
- {
- desc: "Background()",
- f: func() { Background() },
- limit: 0,
- gccgoLimit: 0,
- },
- {
- desc: fmt.Sprintf("WithValue(bg, %v, nil)", k1),
- f: func() {
- c := WithValue(bg, k1, nil)
- c.Value(k1)
- },
- limit: 3,
- gccgoLimit: 3,
- },
- {
- desc: "WithTimeout(bg, 15*time.Millisecond)",
- f: func() {
- c, _ := WithTimeout(bg, 15*time.Millisecond)
- <-c.Done()
- },
- limit: 8,
- gccgoLimit: 13,
- },
- {
- desc: "WithCancel(bg)",
- f: func() {
- c, cancel := WithCancel(bg)
- cancel()
- <-c.Done()
- },
- limit: 5,
- gccgoLimit: 8,
- },
- {
- desc: "WithTimeout(bg, 100*time.Millisecond)",
- f: func() {
- c, cancel := WithTimeout(bg, 100*time.Millisecond)
- cancel()
- <-c.Done()
- },
- limit: 8,
- gccgoLimit: 25,
- },
- } {
- limit := test.limit
- if runtime.Compiler == "gccgo" {
- // gccgo does not yet do escape analysis.
- // TOOD(iant): Remove this when gccgo does do escape analysis.
- limit = test.gccgoLimit
- }
- if n := testing.AllocsPerRun(100, test.f); n > limit {
- t.Errorf("%s allocs = %f want %d", test.desc, n, int(limit))
- }
- }
-}
-
-func TestSimultaneousCancels(t *testing.T) {
- root, cancel := WithCancel(Background())
- m := map[Context]CancelFunc{root: cancel}
- q := []Context{root}
- // Create a tree of contexts.
- for len(q) != 0 && len(m) < 100 {
- parent := q[0]
- q = q[1:]
- for i := 0; i < 4; i++ {
- ctx, cancel := WithCancel(parent)
- m[ctx] = cancel
- q = append(q, ctx)
- }
- }
- // Start all the cancels in a random order.
- var wg sync.WaitGroup
- wg.Add(len(m))
- for _, cancel := range m {
- go func(cancel CancelFunc) {
- cancel()
- wg.Done()
- }(cancel)
- }
- // Wait on all the contexts in a random order.
- for ctx := range m {
- select {
- case <-ctx.Done():
- case <-time.After(1 * time.Second):
- buf := make([]byte, 10<<10)
- n := runtime.Stack(buf, true)
- t.Fatalf("timed out waiting for <-ctx.Done(); stacks:\n%s", buf[:n])
- }
- }
- // Wait for all the cancel functions to return.
- done := make(chan struct{})
- go func() {
- wg.Wait()
- close(done)
- }()
- select {
- case <-done:
- case <-time.After(1 * time.Second):
- buf := make([]byte, 10<<10)
- n := runtime.Stack(buf, true)
- t.Fatalf("timed out waiting for cancel functions; stacks:\n%s", buf[:n])
- }
-}
-
-func TestInterlockedCancels(t *testing.T) {
- parent, cancelParent := WithCancel(Background())
- child, cancelChild := WithCancel(parent)
- go func() {
- parent.Done()
- cancelChild()
- }()
- cancelParent()
- select {
- case <-child.Done():
- case <-time.After(1 * time.Second):
- buf := make([]byte, 10<<10)
- n := runtime.Stack(buf, true)
- t.Fatalf("timed out waiting for child.Done(); stacks:\n%s", buf[:n])
- }
-}
-
-func TestLayersCancel(t *testing.T) {
- testLayers(t, time.Now().UnixNano(), false)
-}
-
-func TestLayersTimeout(t *testing.T) {
- testLayers(t, time.Now().UnixNano(), true)
-}
-
-func testLayers(t *testing.T, seed int64, testTimeout bool) {
- rand.Seed(seed)
- errorf := func(format string, a ...interface{}) {
- t.Errorf(fmt.Sprintf("seed=%d: %s", seed, format), a...)
- }
- const (
- timeout = 200 * time.Millisecond
- minLayers = 30
- )
- type value int
- var (
- vals []*value
- cancels []CancelFunc
- numTimers int
- ctx = Background()
- )
- for i := 0; i < minLayers || numTimers == 0 || len(cancels) == 0 || len(vals) == 0; i++ {
- switch rand.Intn(3) {
- case 0:
- v := new(value)
- ctx = WithValue(ctx, v, v)
- vals = append(vals, v)
- case 1:
- var cancel CancelFunc
- ctx, cancel = WithCancel(ctx)
- cancels = append(cancels, cancel)
- case 2:
- var cancel CancelFunc
- ctx, cancel = WithTimeout(ctx, timeout)
- cancels = append(cancels, cancel)
- numTimers++
- }
- }
- checkValues := func(when string) {
- for _, key := range vals {
- if val := ctx.Value(key).(*value); key != val {
- errorf("%s: ctx.Value(%p) = %p want %p", when, key, val, key)
- }
- }
- }
- select {
- case <-ctx.Done():
- errorf("ctx should not be canceled yet")
- default:
- }
- if s, prefix := fmt.Sprint(ctx), "context.Background."; !strings.HasPrefix(s, prefix) {
- t.Errorf("ctx.String() = %q want prefix %q", s, prefix)
- }
- t.Log(ctx)
- checkValues("before cancel")
- if testTimeout {
- select {
- case <-ctx.Done():
- case <-time.After(timeout + timeout/10):
- errorf("ctx should have timed out")
- }
- checkValues("after timeout")
- } else {
- cancel := cancels[rand.Intn(len(cancels))]
- cancel()
- select {
- case <-ctx.Done():
- default:
- errorf("ctx should be canceled")
- }
- checkValues("after cancel")
- }
-}
diff --git a/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/cancelreq.go b/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/cancelreq.go
new file mode 100644
index 0000000000..48610e3627
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/cancelreq.go
@@ -0,0 +1,18 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.5
+
+package ctxhttp
+
+import "net/http"
+
+func canceler(client *http.Client, req *http.Request) func() {
+ ch := make(chan struct{})
+ req.Cancel = ch
+
+ return func() {
+ close(ch)
+ }
+}
diff --git a/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/cancelreq_go14.go b/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/cancelreq_go14.go
new file mode 100644
index 0000000000..56bcbadb85
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/cancelreq_go14.go
@@ -0,0 +1,23 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.5
+
+package ctxhttp
+
+import "net/http"
+
+type requestCanceler interface {
+ CancelRequest(*http.Request)
+}
+
+func canceler(client *http.Client, req *http.Request) func() {
+ rc, ok := client.Transport.(requestCanceler)
+ if !ok {
+ return func() {}
+ }
+ return func() {
+ rc.CancelRequest(req)
+ }
+}
diff --git a/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/ctxhttp.go b/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/ctxhttp.go
new file mode 100644
index 0000000000..9f34888132
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/ctxhttp.go
@@ -0,0 +1,79 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package ctxhttp provides helper functions for performing context-aware HTTP requests.
+package ctxhttp
+
+import (
+ "io"
+ "net/http"
+ "net/url"
+ "strings"
+
+ "golang.org/x/net/context"
+)
+
+// Do sends an HTTP request with the provided http.Client and returns an HTTP response.
+// If the client is nil, http.DefaultClient is used.
+// If the context is canceled or times out, ctx.Err() will be returned.
+func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) {
+ if client == nil {
+ client = http.DefaultClient
+ }
+
+ // Request cancelation changed in Go 1.5, see cancelreq.go and cancelreq_go14.go.
+ cancel := canceler(client, req)
+
+ type responseAndError struct {
+ resp *http.Response
+ err error
+ }
+ result := make(chan responseAndError, 1)
+
+ go func() {
+ resp, err := client.Do(req)
+ result <- responseAndError{resp, err}
+ }()
+
+ select {
+ case <-ctx.Done():
+ cancel()
+ return nil, ctx.Err()
+ case r := <-result:
+ return r.resp, r.err
+ }
+}
+
+// Get issues a GET request via the Do function.
+func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
+ req, err := http.NewRequest("GET", url, nil)
+ if err != nil {
+ return nil, err
+ }
+ return Do(ctx, client, req)
+}
+
+// Head issues a HEAD request via the Do function.
+func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
+ req, err := http.NewRequest("HEAD", url, nil)
+ if err != nil {
+ return nil, err
+ }
+ return Do(ctx, client, req)
+}
+
+// Post issues a POST request via the Do function.
+func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) {
+ req, err := http.NewRequest("POST", url, body)
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("Content-Type", bodyType)
+ return Do(ctx, client, req)
+}
+
+// PostForm issues a POST request via the Do function.
+func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) {
+ return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
+}
diff --git a/Godeps/_workspace/src/golang.org/x/net/context/withtimeout_test.go b/Godeps/_workspace/src/golang.org/x/net/context/withtimeout_test.go
deleted file mode 100644
index a6754dc368..0000000000
--- a/Godeps/_workspace/src/golang.org/x/net/context/withtimeout_test.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package context_test
-
-import (
- "fmt"
- "time"
-
- "golang.org/x/net/context"
-)
-
-func ExampleWithTimeout() {
- // Pass a context with a timeout to tell a blocking function that it
- // should abandon its work after the timeout elapses.
- ctx, _ := context.WithTimeout(context.Background(), 100*time.Millisecond)
- select {
- case <-time.After(200 * time.Millisecond):
- fmt.Println("overslept")
- case <-ctx.Done():
- fmt.Println(ctx.Err()) // prints "context deadline exceeded"
- }
- // Output:
- // context deadline exceeded
-}
diff --git a/Godeps/_workspace/src/golang.org/x/net/http2/.gitignore b/Godeps/_workspace/src/golang.org/x/net/http2/.gitignore
new file mode 100644
index 0000000000..190f12234a
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/net/http2/.gitignore
@@ -0,0 +1,2 @@
+*~
+h2i/h2i
diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/Dockerfile b/Godeps/_workspace/src/golang.org/x/net/http2/Dockerfile
similarity index 100%
rename from Godeps/_workspace/src/github.com/bradfitz/http2/Dockerfile
rename to Godeps/_workspace/src/golang.org/x/net/http2/Dockerfile
diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/Makefile b/Godeps/_workspace/src/golang.org/x/net/http2/Makefile
similarity index 100%
rename from Godeps/_workspace/src/github.com/bradfitz/http2/Makefile
rename to Godeps/_workspace/src/golang.org/x/net/http2/Makefile
diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/README b/Godeps/_workspace/src/golang.org/x/net/http2/README
similarity index 68%
rename from Godeps/_workspace/src/github.com/bradfitz/http2/README
rename to Godeps/_workspace/src/golang.org/x/net/http2/README
index 16cb516a69..360d5aa379 100644
--- a/Godeps/_workspace/src/github.com/bradfitz/http2/README
+++ b/Godeps/_workspace/src/golang.org/x/net/http2/README
@@ -10,8 +10,11 @@ Status:
* The client work has just started but shares a lot of code
is coming along much quicker.
-Docs are at https://godoc.org/github.com/bradfitz/http2
+Docs are at https://godoc.org/golang.org/x/net/http2
Demo test server at https://http2.golang.org/
-Help & bug reports welcome.
+Help & bug reports welcome!
+
+Contributing: https://golang.org/doc/contribute.html
+Bugs: https://golang.org/issue/new?title=x/net/http2:+
diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/buffer.go b/Godeps/_workspace/src/golang.org/x/net/http2/buffer.go
similarity index 89%
rename from Godeps/_workspace/src/github.com/bradfitz/http2/buffer.go
rename to Godeps/_workspace/src/golang.org/x/net/http2/buffer.go
index 48d7561ecf..c43954cf04 100644
--- a/Godeps/_workspace/src/github.com/bradfitz/http2/buffer.go
+++ b/Godeps/_workspace/src/golang.org/x/net/http2/buffer.go
@@ -19,8 +19,9 @@ type buffer struct {
}
var (
- errReadEmpty = errors.New("read from empty buffer")
- errWriteFull = errors.New("write on full buffer")
+ errReadEmpty = errors.New("read from empty buffer")
+ errWriteClosed = errors.New("write on closed buffer")
+ errWriteFull = errors.New("write on full buffer")
)
// Read copies bytes from the buffer into p.
@@ -45,7 +46,7 @@ func (b *buffer) Len() int {
// It is an error to write more data than the buffer can hold.
func (b *buffer) Write(p []byte) (n int, err error) {
if b.closed {
- return 0, errors.New("closed")
+ return 0, errWriteClosed
}
// Slide existing data to beginning.
diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/errors.go b/Godeps/_workspace/src/golang.org/x/net/http2/errors.go
similarity index 100%
rename from Godeps/_workspace/src/github.com/bradfitz/http2/errors.go
rename to Godeps/_workspace/src/golang.org/x/net/http2/errors.go
diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/flow.go b/Godeps/_workspace/src/golang.org/x/net/http2/flow.go
similarity index 100%
rename from Godeps/_workspace/src/github.com/bradfitz/http2/flow.go
rename to Godeps/_workspace/src/golang.org/x/net/http2/flow.go
diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/frame.go b/Godeps/_workspace/src/golang.org/x/net/http2/frame.go
similarity index 99%
rename from Godeps/_workspace/src/github.com/bradfitz/http2/frame.go
rename to Godeps/_workspace/src/golang.org/x/net/http2/frame.go
index 94c5e44ae5..e8b872a19b 100644
--- a/Godeps/_workspace/src/github.com/bradfitz/http2/frame.go
+++ b/Godeps/_workspace/src/golang.org/x/net/http2/frame.go
@@ -85,8 +85,8 @@ const (
// Continuation Frame
FlagContinuationEndHeaders Flags = 0x4
- FlagPushPromiseEndHeaders = 0x4
- FlagPushPromisePadded = 0x8
+ FlagPushPromiseEndHeaders Flags = 0x4
+ FlagPushPromisePadded Flags = 0x8
)
var flagName = map[FrameType]map[Flags]string{
diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/gotrack.go b/Godeps/_workspace/src/golang.org/x/net/http2/gotrack.go
similarity index 96%
rename from Godeps/_workspace/src/github.com/bradfitz/http2/gotrack.go
rename to Godeps/_workspace/src/golang.org/x/net/http2/gotrack.go
index 6e1a3823ca..7dc2ef90db 100644
--- a/Godeps/_workspace/src/github.com/bradfitz/http2/gotrack.go
+++ b/Godeps/_workspace/src/golang.org/x/net/http2/gotrack.go
@@ -14,16 +14,20 @@ import (
"bytes"
"errors"
"fmt"
+ "os"
"runtime"
"strconv"
"sync"
)
-var DebugGoroutines = false
+var DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1"
type goroutineLock uint64
func newGoroutineLock() goroutineLock {
+ if !DebugGoroutines {
+ return 0
+ }
return goroutineLock(curGoroutineID())
}
diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/.gitignore b/Godeps/_workspace/src/golang.org/x/net/http2/h2demo/.gitignore
similarity index 100%
rename from Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/.gitignore
rename to Godeps/_workspace/src/golang.org/x/net/http2/h2demo/.gitignore
diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/Makefile b/Godeps/_workspace/src/golang.org/x/net/http2/h2demo/Makefile
similarity index 100%
rename from Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/Makefile
rename to Godeps/_workspace/src/golang.org/x/net/http2/h2demo/Makefile
diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/README b/Godeps/_workspace/src/golang.org/x/net/http2/h2demo/README
similarity index 100%
rename from Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/README
rename to Godeps/_workspace/src/golang.org/x/net/http2/h2demo/README
diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/h2demo.go b/Godeps/_workspace/src/golang.org/x/net/http2/h2demo/h2demo.go
similarity index 93%
rename from Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/h2demo.go
rename to Godeps/_workspace/src/golang.org/x/net/http2/h2demo/h2demo.go
index de6caaa749..50480449d9 100644
--- a/Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/h2demo.go
+++ b/Godeps/_workspace/src/golang.org/x/net/http2/h2demo/h2demo.go
@@ -31,7 +31,7 @@ import (
"camlistore.org/pkg/googlestorage"
"camlistore.org/pkg/singleflight"
- "github.com/bradfitz/http2"
+ "golang.org/x/net/http2"
)
var (
@@ -51,7 +51,7 @@ func homeOldHTTP(w http.ResponseWriter, r *http.Request) {
Use Firefox Nightly or go to about:config and enable "network.http.spdy.enabled.http2draft"
Use Google Chrome Canary and/or go to chrome://flags/#enable-spdy4 to Enable SPDY/4 (Chrome's name for HTTP/2)
This server exists for others in the HTTP/2 community to test their HTTP/2 client implementations and point out flaws in our server.
The code is currently at github.com/bradfitz/http2
+href="https://golang.org/x/net/http2">golang.org/x/net/http2
but will move to the Go standard library at some point in the future
(enabled by default, without users needing to change their code).
+{{end}}
+
+
+`
diff --git a/Godeps/_workspace/src/golang.org/x/net/trace/histogram.go b/Godeps/_workspace/src/golang.org/x/net/trace/histogram.go
new file mode 100644
index 0000000000..bb42aa5320
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/net/trace/histogram.go
@@ -0,0 +1,356 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package trace
+
+// This file implements histogramming for RPC statistics collection.
+
+import (
+ "bytes"
+ "fmt"
+ "html/template"
+ "log"
+ "math"
+
+ "golang.org/x/net/internal/timeseries"
+)
+
+const (
+ bucketCount = 38
+)
+
+// histogram keeps counts of values in buckets that are spaced
+// out in powers of 2: 0-1, 2-3, 4-7...
+// histogram implements timeseries.Observable
+type histogram struct {
+ sum int64 // running total of measurements
+ sumOfSquares float64 // square of running total
+ buckets []int64 // bucketed values for histogram
+ value int // holds a single value as an optimization
+ valueCount int64 // number of values recorded for single value
+}
+
+// AddMeasurement records a value measurement observation to the histogram.
+func (h *histogram) addMeasurement(value int64) {
+ // TODO: assert invariant
+ h.sum += value
+ h.sumOfSquares += float64(value) * float64(value)
+
+ bucketIndex := getBucket(value)
+
+ if h.valueCount == 0 || (h.valueCount > 0 && h.value == bucketIndex) {
+ h.value = bucketIndex
+ h.valueCount++
+ } else {
+ h.allocateBuckets()
+ h.buckets[bucketIndex]++
+ }
+}
+
+func (h *histogram) allocateBuckets() {
+ if h.buckets == nil {
+ h.buckets = make([]int64, bucketCount)
+ h.buckets[h.value] = h.valueCount
+ h.value = 0
+ h.valueCount = -1
+ }
+}
+
+func log2(i int64) int {
+ n := 0
+ for ; i >= 0x100; i >>= 8 {
+ n += 8
+ }
+ for ; i > 0; i >>= 1 {
+ n += 1
+ }
+ return n
+}
+
+func getBucket(i int64) (index int) {
+ index = log2(i) - 1
+ if index < 0 {
+ index = 0
+ }
+ if index >= bucketCount {
+ index = bucketCount - 1
+ }
+ return
+}
+
+// Total returns the number of recorded observations.
+func (h *histogram) total() (total int64) {
+ if h.valueCount >= 0 {
+ total = h.valueCount
+ }
+ for _, val := range h.buckets {
+ total += int64(val)
+ }
+ return
+}
+
+// Average returns the average value of recorded observations.
+func (h *histogram) average() float64 {
+ t := h.total()
+ if t == 0 {
+ return 0
+ }
+ return float64(h.sum) / float64(t)
+}
+
+// Variance returns the variance of recorded observations.
+func (h *histogram) variance() float64 {
+ t := float64(h.total())
+ if t == 0 {
+ return 0
+ }
+ s := float64(h.sum) / t
+ return h.sumOfSquares/t - s*s
+}
+
+// StandardDeviation returns the standard deviation of recorded observations.
+func (h *histogram) standardDeviation() float64 {
+ return math.Sqrt(h.variance())
+}
+
+// PercentileBoundary estimates the value that the given fraction of recorded
+// observations are less than.
+func (h *histogram) percentileBoundary(percentile float64) int64 {
+ total := h.total()
+
+ // Corner cases (make sure result is strictly less than Total())
+ if total == 0 {
+ return 0
+ } else if total == 1 {
+ return int64(h.average())
+ }
+
+ percentOfTotal := round(float64(total) * percentile)
+ var runningTotal int64
+
+ for i := range h.buckets {
+ value := h.buckets[i]
+ runningTotal += value
+ if runningTotal == percentOfTotal {
+ // We hit an exact bucket boundary. If the next bucket has data, it is a
+ // good estimate of the value. If the bucket is empty, we interpolate the
+ // midpoint between the next bucket's boundary and the next non-zero
+ // bucket. If the remaining buckets are all empty, then we use the
+ // boundary for the next bucket as the estimate.
+ j := uint8(i + 1)
+ min := bucketBoundary(j)
+ if runningTotal < total {
+ for h.buckets[j] == 0 {
+ j++
+ }
+ }
+ max := bucketBoundary(j)
+ return min + round(float64(max-min)/2)
+ } else if runningTotal > percentOfTotal {
+ // The value is in this bucket. Interpolate the value.
+ delta := runningTotal - percentOfTotal
+ percentBucket := float64(value-delta) / float64(value)
+ bucketMin := bucketBoundary(uint8(i))
+ nextBucketMin := bucketBoundary(uint8(i + 1))
+ bucketSize := nextBucketMin - bucketMin
+ return bucketMin + round(percentBucket*float64(bucketSize))
+ }
+ }
+ return bucketBoundary(bucketCount - 1)
+}
+
+// Median returns the estimated median of the observed values.
+func (h *histogram) median() int64 {
+ return h.percentileBoundary(0.5)
+}
+
+// Add adds other to h.
+func (h *histogram) Add(other timeseries.Observable) {
+ o := other.(*histogram)
+ if o.valueCount == 0 {
+ // Other histogram is empty
+ } else if h.valueCount >= 0 && o.valueCount > 0 && h.value == o.value {
+ // Both have a single bucketed value, aggregate them
+ h.valueCount += o.valueCount
+ } else {
+ // Two different values necessitate buckets in this histogram
+ h.allocateBuckets()
+ if o.valueCount >= 0 {
+ h.buckets[o.value] += o.valueCount
+ } else {
+ for i := range h.buckets {
+ h.buckets[i] += o.buckets[i]
+ }
+ }
+ }
+ h.sumOfSquares += o.sumOfSquares
+ h.sum += o.sum
+}
+
+// Clear resets the histogram to an empty state, removing all observed values.
+func (h *histogram) Clear() {
+ h.buckets = nil
+ h.value = 0
+ h.valueCount = 0
+ h.sum = 0
+ h.sumOfSquares = 0
+}
+
+// CopyFrom copies from other, which must be a *histogram, into h.
+func (h *histogram) CopyFrom(other timeseries.Observable) {
+ o := other.(*histogram)
+ if o.valueCount == -1 {
+ h.allocateBuckets()
+ copy(h.buckets, o.buckets)
+ }
+ h.sum = o.sum
+ h.sumOfSquares = o.sumOfSquares
+ h.value = o.value
+ h.valueCount = o.valueCount
+}
+
+// Multiply scales the histogram by the specified ratio.
+func (h *histogram) Multiply(ratio float64) {
+ if h.valueCount == -1 {
+ for i := range h.buckets {
+ h.buckets[i] = int64(float64(h.buckets[i]) * ratio)
+ }
+ } else {
+ h.valueCount = int64(float64(h.valueCount) * ratio)
+ }
+ h.sum = int64(float64(h.sum) * ratio)
+ h.sumOfSquares = h.sumOfSquares * ratio
+}
+
+// New creates a new histogram.
+func (h *histogram) New() timeseries.Observable {
+ r := new(histogram)
+ r.Clear()
+ return r
+}
+
+func (h *histogram) String() string {
+ return fmt.Sprintf("%d, %f, %d, %d, %v",
+ h.sum, h.sumOfSquares, h.value, h.valueCount, h.buckets)
+}
+
+// round returns the closest int64 to the argument
+func round(in float64) int64 {
+ return int64(math.Floor(in + 0.5))
+}
+
+// bucketBoundary returns the first value in the bucket.
+func bucketBoundary(bucket uint8) int64 {
+ if bucket == 0 {
+ return 0
+ }
+ return 1 << bucket
+}
+
+// bucketData holds data about a specific bucket for use in distTmpl.
+type bucketData struct {
+ Lower, Upper int64
+ N int64
+ Pct, CumulativePct float64
+ GraphWidth int
+}
+
+// data holds data about a Distribution for use in distTmpl.
+type data struct {
+ Buckets []*bucketData
+ Count, Median int64
+ Mean, StandardDeviation float64
+}
+
+// maxHTMLBarWidth is the maximum width of the HTML bar for visualizing buckets.
+const maxHTMLBarWidth = 350.0
+
+// newData returns data representing h for use in distTmpl.
+func (h *histogram) newData() *data {
+ // Force the allocation of buckets to simplify the rendering implementation
+ h.allocateBuckets()
+ // We scale the bars on the right so that the largest bar is
+ // maxHTMLBarWidth pixels in width.
+ maxBucket := int64(0)
+ for _, n := range h.buckets {
+ if n > maxBucket {
+ maxBucket = n
+ }
+ }
+ total := h.total()
+ barsizeMult := maxHTMLBarWidth / float64(maxBucket)
+ var pctMult float64
+ if total == 0 {
+ pctMult = 1.0
+ } else {
+ pctMult = 100.0 / float64(total)
+ }
+
+ buckets := make([]*bucketData, len(h.buckets))
+ runningTotal := int64(0)
+ for i, n := range h.buckets {
+ if n == 0 {
+ continue
+ }
+ runningTotal += n
+ var upperBound int64
+ if i < bucketCount-1 {
+ upperBound = bucketBoundary(uint8(i + 1))
+ } else {
+ upperBound = math.MaxInt64
+ }
+ buckets[i] = &bucketData{
+ Lower: bucketBoundary(uint8(i)),
+ Upper: upperBound,
+ N: n,
+ Pct: float64(n) * pctMult,
+ CumulativePct: float64(runningTotal) * pctMult,
+ GraphWidth: int(float64(n) * barsizeMult),
+ }
+ }
+ return &data{
+ Buckets: buckets,
+ Count: total,
+ Median: h.median(),
+ Mean: h.average(),
+ StandardDeviation: h.standardDeviation(),
+ }
+}
+
+func (h *histogram) html() template.HTML {
+ buf := new(bytes.Buffer)
+ if err := distTmpl.Execute(buf, h.newData()); err != nil {
+ buf.Reset()
+ log.Printf("net/trace: couldn't execute template: %v", err)
+ }
+ return template.HTML(buf.String())
+}
+
+// Input: data
+var distTmpl = template.Must(template.New("distTmpl").Parse(`
+
+
+
Count: {{.Count}}
+
Mean: {{printf "%.0f" .Mean}}
+
StdDev: {{printf "%.0f" .StandardDeviation}}
+
Median: {{.Median}}
+
+
+
+
+{{range $b := .Buckets}}
+{{if $b}}
+
+
[
+
{{.Lower}},
+
{{.Upper}})
+
{{.N}}
+
{{printf "%#.3f" .Pct}}%
+
{{printf "%#.3f" .CumulativePct}}%
+
+
+{{end}}
+{{end}}
+
+`))
diff --git a/Godeps/_workspace/src/golang.org/x/net/trace/trace.go b/Godeps/_workspace/src/golang.org/x/net/trace/trace.go
new file mode 100644
index 0000000000..c87290b76e
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/net/trace/trace.go
@@ -0,0 +1,1057 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package trace implements tracing of requests and long-lived objects.
+It exports HTTP interfaces on /debug/requests and /debug/events.
+
+A trace.Trace provides tracing for short-lived objects, usually requests.
+A request handler might be implemented like this:
+
+ func fooHandler(w http.ResponseWriter, req *http.Request) {
+ tr := trace.New("mypkg.Foo", req.URL.Path)
+ defer tr.Finish()
+ ...
+ tr.LazyPrintf("some event %q happened", str)
+ ...
+ if err := somethingImportant(); err != nil {
+ tr.LazyPrintf("somethingImportant failed: %v", err)
+ tr.SetError()
+ }
+ }
+
+The /debug/requests HTTP endpoint organizes the traces by family,
+errors, and duration. It also provides histogram of request duration
+for each family.
+
+A trace.EventLog provides tracing for long-lived objects, such as RPC
+connections.
+
+ // A Fetcher fetches URL paths for a single domain.
+ type Fetcher struct {
+ domain string
+ events trace.EventLog
+ }
+
+ func NewFetcher(domain string) *Fetcher {
+ return &Fetcher{
+ domain,
+ trace.NewEventLog("mypkg.Fetcher", domain),
+ }
+ }
+
+ func (f *Fetcher) Fetch(path string) (string, error) {
+ resp, err := http.Get("http://" + f.domain + "/" + path)
+ if err != nil {
+ f.events.Errorf("Get(%q) = %v", path, err)
+ return "", err
+ }
+ f.events.Printf("Get(%q) = %s", path, resp.Status)
+ ...
+ }
+
+ func (f *Fetcher) Close() error {
+ f.events.Finish()
+ return nil
+ }
+
+The /debug/events HTTP endpoint organizes the event logs by family and
+by time since the last error. The expanded view displays recent log
+entries and the log's call stack.
+*/
+package trace
+
+import (
+ "bytes"
+ "fmt"
+ "html/template"
+ "io"
+ "log"
+ "net"
+ "net/http"
+ "runtime"
+ "sort"
+ "strconv"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "golang.org/x/net/context"
+ "golang.org/x/net/internal/timeseries"
+)
+
+// DebugUseAfterFinish controls whether to debug uses of Trace values after finishing.
+// FOR DEBUGGING ONLY. This will slow down the program.
+var DebugUseAfterFinish = false
+
+// AuthRequest determines whether a specific request is permitted to load the
+// /debug/requests or /debug/events pages.
+//
+// It returns two bools; the first indicates whether the page may be viewed at all,
+// and the second indicates whether sensitive events will be shown.
+//
+// AuthRequest may be replaced by a program to customise its authorisation requirements.
+//
+// The default AuthRequest function returns (true, true) iff the request comes from localhost/127.0.0.1/[::1].
+var AuthRequest = func(req *http.Request) (any, sensitive bool) {
+ host, _, err := net.SplitHostPort(req.RemoteAddr)
+ switch {
+ case err != nil: // Badly formed address; fail closed.
+ return false, false
+ case host == "localhost" || host == "127.0.0.1" || host == "::1":
+ return true, true
+ default:
+ return false, false
+ }
+}
+
+func init() {
+ http.HandleFunc("/debug/requests", func(w http.ResponseWriter, req *http.Request) {
+ any, sensitive := AuthRequest(req)
+ if !any {
+ http.Error(w, "not allowed", http.StatusUnauthorized)
+ return
+ }
+ Render(w, req, sensitive)
+ })
+ http.HandleFunc("/debug/events", func(w http.ResponseWriter, req *http.Request) {
+ any, sensitive := AuthRequest(req)
+ if !any {
+ http.Error(w, "not allowed", http.StatusUnauthorized)
+ return
+ }
+ RenderEvents(w, req, sensitive)
+ })
+}
+
+// Render renders the HTML page typically served at /debug/requests.
+// It does not do any auth checking; see AuthRequest for the default auth check
+// used by the handler registered on http.DefaultServeMux.
+// req may be nil.
+func Render(w io.Writer, req *http.Request, sensitive bool) {
+ data := &struct {
+ Families []string
+ ActiveTraceCount map[string]int
+ CompletedTraces map[string]*family
+
+ // Set when a bucket has been selected.
+ Traces traceList
+ Family string
+ Bucket int
+ Expanded bool
+ Traced bool
+ Active bool
+ ShowSensitive bool // whether to show sensitive events
+
+ Histogram template.HTML
+ HistogramWindow string // e.g. "last minute", "last hour", "all time"
+
+ // If non-zero, the set of traces is a partial set,
+ // and this is the total number.
+ Total int
+ }{
+ CompletedTraces: completedTraces,
+ }
+
+ data.ShowSensitive = sensitive
+ if req != nil {
+ // Allow show_sensitive=0 to force hiding of sensitive data for testing.
+ // This only goes one way; you can't use show_sensitive=1 to see things.
+ if req.FormValue("show_sensitive") == "0" {
+ data.ShowSensitive = false
+ }
+
+ if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil {
+ data.Expanded = exp
+ }
+ if exp, err := strconv.ParseBool(req.FormValue("rtraced")); err == nil {
+ data.Traced = exp
+ }
+ }
+
+ completedMu.RLock()
+ data.Families = make([]string, 0, len(completedTraces))
+ for fam, _ := range completedTraces {
+ data.Families = append(data.Families, fam)
+ }
+ completedMu.RUnlock()
+ sort.Strings(data.Families)
+
+ // We are careful here to minimize the time spent locking activeMu,
+ // since that lock is required every time an RPC starts and finishes.
+ data.ActiveTraceCount = make(map[string]int, len(data.Families))
+ activeMu.RLock()
+ for fam, s := range activeTraces {
+ data.ActiveTraceCount[fam] = s.Len()
+ }
+ activeMu.RUnlock()
+
+ var ok bool
+ data.Family, data.Bucket, ok = parseArgs(req)
+ switch {
+ case !ok:
+ // No-op
+ case data.Bucket == -1:
+ data.Active = true
+ n := data.ActiveTraceCount[data.Family]
+ data.Traces = getActiveTraces(data.Family)
+ if len(data.Traces) < n {
+ data.Total = n
+ }
+ case data.Bucket < bucketsPerFamily:
+ if b := lookupBucket(data.Family, data.Bucket); b != nil {
+ data.Traces = b.Copy(data.Traced)
+ }
+ default:
+ if f := getFamily(data.Family, false); f != nil {
+ var obs timeseries.Observable
+ f.LatencyMu.RLock()
+ switch o := data.Bucket - bucketsPerFamily; o {
+ case 0:
+ obs = f.Latency.Minute()
+ data.HistogramWindow = "last minute"
+ case 1:
+ obs = f.Latency.Hour()
+ data.HistogramWindow = "last hour"
+ case 2:
+ obs = f.Latency.Total()
+ data.HistogramWindow = "all time"
+ }
+ f.LatencyMu.RUnlock()
+ if obs != nil {
+ data.Histogram = obs.(*histogram).html()
+ }
+ }
+ }
+
+ if data.Traces != nil {
+ defer data.Traces.Free()
+ sort.Sort(data.Traces)
+ }
+
+ completedMu.RLock()
+ defer completedMu.RUnlock()
+ if err := pageTmpl.ExecuteTemplate(w, "Page", data); err != nil {
+ log.Printf("net/trace: Failed executing template: %v", err)
+ }
+}
+
+func parseArgs(req *http.Request) (fam string, b int, ok bool) {
+ if req == nil {
+ return "", 0, false
+ }
+ fam, bStr := req.FormValue("fam"), req.FormValue("b")
+ if fam == "" || bStr == "" {
+ return "", 0, false
+ }
+ b, err := strconv.Atoi(bStr)
+ if err != nil || b < -1 {
+ return "", 0, false
+ }
+
+ return fam, b, true
+}
+
+func lookupBucket(fam string, b int) *traceBucket {
+ f := getFamily(fam, false)
+ if f == nil || b < 0 || b >= len(f.Buckets) {
+ return nil
+ }
+ return f.Buckets[b]
+}
+
+type contextKeyT string
+
+var contextKey = contextKeyT("golang.org/x/net/trace.Trace")
+
+// NewContext returns a copy of the parent context
+// and associates it with a Trace.
+func NewContext(ctx context.Context, tr Trace) context.Context {
+ return context.WithValue(ctx, contextKey, tr)
+}
+
+// FromContext returns the Trace bound to the context, if any.
+func FromContext(ctx context.Context) (tr Trace, ok bool) {
+ tr, ok = ctx.Value(contextKey).(Trace)
+ return
+}
+
+// Trace represents an active request.
+type Trace interface {
+ // LazyLog adds x to the event log. It will be evaluated each time the
+ // /debug/requests page is rendered. Any memory referenced by x will be
+ // pinned until the trace is finished and later discarded.
+ LazyLog(x fmt.Stringer, sensitive bool)
+
+ // LazyPrintf evaluates its arguments with fmt.Sprintf each time the
+ // /debug/requests page is rendered. Any memory referenced by a will be
+ // pinned until the trace is finished and later discarded.
+ LazyPrintf(format string, a ...interface{})
+
+ // SetError declares that this trace resulted in an error.
+ SetError()
+
+ // SetRecycler sets a recycler for the trace.
+ // f will be called for each event passed to LazyLog at a time when
+ // it is no longer required, whether while the trace is still active
+ // and the event is discarded, or when a completed trace is discarded.
+ SetRecycler(f func(interface{}))
+
+ // SetTraceInfo sets the trace info for the trace.
+ // This is currently unused.
+ SetTraceInfo(traceID, spanID uint64)
+
+ // SetMaxEvents sets the maximum number of events that will be stored
+ // in the trace. This has no effect if any events have already been
+ // added to the trace.
+ SetMaxEvents(m int)
+
+ // Finish declares that this trace is complete.
+ // The trace should not be used after calling this method.
+ Finish()
+}
+
+type lazySprintf struct {
+ format string
+ a []interface{}
+}
+
+func (l *lazySprintf) String() string {
+ return fmt.Sprintf(l.format, l.a...)
+}
+
+// New returns a new Trace with the specified family and title.
+func New(family, title string) Trace {
+ tr := newTrace()
+ tr.ref()
+ tr.Family, tr.Title = family, title
+ tr.Start = time.Now()
+ tr.events = make([]event, 0, maxEventsPerTrace)
+
+ activeMu.RLock()
+ s := activeTraces[tr.Family]
+ activeMu.RUnlock()
+ if s == nil {
+ activeMu.Lock()
+ s = activeTraces[tr.Family] // check again
+ if s == nil {
+ s = new(traceSet)
+ activeTraces[tr.Family] = s
+ }
+ activeMu.Unlock()
+ }
+ s.Add(tr)
+
+ // Trigger allocation of the completed trace structure for this family.
+ // This will cause the family to be present in the request page during
+ // the first trace of this family. We don't care about the return value,
+ // nor is there any need for this to run inline, so we execute it in its
+ // own goroutine, but only if the family isn't allocated yet.
+ completedMu.RLock()
+ if _, ok := completedTraces[tr.Family]; !ok {
+ go allocFamily(tr.Family)
+ }
+ completedMu.RUnlock()
+
+ return tr
+}
+
+func (tr *trace) Finish() {
+ tr.Elapsed = time.Now().Sub(tr.Start)
+ if DebugUseAfterFinish {
+ buf := make([]byte, 4<<10) // 4 KB should be enough
+ n := runtime.Stack(buf, false)
+ tr.finishStack = buf[:n]
+ }
+
+ activeMu.RLock()
+ m := activeTraces[tr.Family]
+ activeMu.RUnlock()
+ m.Remove(tr)
+
+ f := getFamily(tr.Family, true)
+ for _, b := range f.Buckets {
+ if b.Cond.match(tr) {
+ b.Add(tr)
+ }
+ }
+ // Add a sample of elapsed time as microseconds to the family's timeseries
+ h := new(histogram)
+ h.addMeasurement(tr.Elapsed.Nanoseconds() / 1e3)
+ f.LatencyMu.Lock()
+ f.Latency.Add(h)
+ f.LatencyMu.Unlock()
+
+ tr.unref() // matches ref in New
+}
+
+const (
+ bucketsPerFamily = 9
+ tracesPerBucket = 10
+ maxActiveTraces = 20 // Maximum number of active traces to show.
+ maxEventsPerTrace = 10
+ numHistogramBuckets = 38
+)
+
+var (
+ // The active traces.
+ activeMu sync.RWMutex
+ activeTraces = make(map[string]*traceSet) // family -> traces
+
+ // Families of completed traces.
+ completedMu sync.RWMutex
+ completedTraces = make(map[string]*family) // family -> traces
+)
+
+type traceSet struct {
+ mu sync.RWMutex
+ m map[*trace]bool
+
+ // We could avoid the entire map scan in FirstN by having a slice of all the traces
+ // ordered by start time, and an index into that from the trace struct, with a periodic
+ // repack of the slice after enough traces finish; we could also use a skip list or similar.
+ // However, that would shift some of the expense from /debug/requests time to RPC time,
+ // which is probably the wrong trade-off.
+}
+
+func (ts *traceSet) Len() int {
+ ts.mu.RLock()
+ defer ts.mu.RUnlock()
+ return len(ts.m)
+}
+
+func (ts *traceSet) Add(tr *trace) {
+ ts.mu.Lock()
+ if ts.m == nil {
+ ts.m = make(map[*trace]bool)
+ }
+ ts.m[tr] = true
+ ts.mu.Unlock()
+}
+
+func (ts *traceSet) Remove(tr *trace) {
+ ts.mu.Lock()
+ delete(ts.m, tr)
+ ts.mu.Unlock()
+}
+
+// FirstN returns the first n traces ordered by time.
+func (ts *traceSet) FirstN(n int) traceList {
+ ts.mu.RLock()
+ defer ts.mu.RUnlock()
+
+ if n > len(ts.m) {
+ n = len(ts.m)
+ }
+ trl := make(traceList, 0, n)
+
+ // Fast path for when no selectivity is needed.
+ if n == len(ts.m) {
+ for tr := range ts.m {
+ tr.ref()
+ trl = append(trl, tr)
+ }
+ sort.Sort(trl)
+ return trl
+ }
+
+ // Pick the oldest n traces.
+ // This is inefficient. See the comment in the traceSet struct.
+ for tr := range ts.m {
+ // Put the first n traces into trl in the order they occur.
+ // When we have n, sort trl, and thereafter maintain its order.
+ if len(trl) < n {
+ tr.ref()
+ trl = append(trl, tr)
+ if len(trl) == n {
+ // This is guaranteed to happen exactly once during this loop.
+ sort.Sort(trl)
+ }
+ continue
+ }
+ if tr.Start.After(trl[n-1].Start) {
+ continue
+ }
+
+ // Find where to insert this one.
+ tr.ref()
+ i := sort.Search(n, func(i int) bool { return trl[i].Start.After(tr.Start) })
+ trl[n-1].unref()
+ copy(trl[i+1:], trl[i:])
+ trl[i] = tr
+ }
+
+ return trl
+}
+
+func getActiveTraces(fam string) traceList {
+ activeMu.RLock()
+ s := activeTraces[fam]
+ activeMu.RUnlock()
+ if s == nil {
+ return nil
+ }
+ return s.FirstN(maxActiveTraces)
+}
+
+func getFamily(fam string, allocNew bool) *family {
+ completedMu.RLock()
+ f := completedTraces[fam]
+ completedMu.RUnlock()
+ if f == nil && allocNew {
+ f = allocFamily(fam)
+ }
+ return f
+}
+
+func allocFamily(fam string) *family {
+ completedMu.Lock()
+ defer completedMu.Unlock()
+ f := completedTraces[fam]
+ if f == nil {
+ f = newFamily()
+ completedTraces[fam] = f
+ }
+ return f
+}
+
+// family represents a set of trace buckets and associated latency information.
+type family struct {
+ // traces may occur in multiple buckets.
+ Buckets [bucketsPerFamily]*traceBucket
+
+ // latency time series
+ LatencyMu sync.RWMutex
+ Latency *timeseries.MinuteHourSeries
+}
+
+func newFamily() *family {
+ return &family{
+ Buckets: [bucketsPerFamily]*traceBucket{
+ {Cond: minCond(0)},
+ {Cond: minCond(50 * time.Millisecond)},
+ {Cond: minCond(100 * time.Millisecond)},
+ {Cond: minCond(200 * time.Millisecond)},
+ {Cond: minCond(500 * time.Millisecond)},
+ {Cond: minCond(1 * time.Second)},
+ {Cond: minCond(10 * time.Second)},
+ {Cond: minCond(100 * time.Second)},
+ {Cond: errorCond{}},
+ },
+ Latency: timeseries.NewMinuteHourSeries(func() timeseries.Observable { return new(histogram) }),
+ }
+}
+
+// traceBucket represents a size-capped bucket of historic traces,
+// along with a condition for a trace to belong to the bucket.
+type traceBucket struct {
+ Cond cond
+
+ // Ring buffer implementation of a fixed-size FIFO queue.
+ mu sync.RWMutex
+ buf [tracesPerBucket]*trace
+ start int // < tracesPerBucket
+ length int // <= tracesPerBucket
+}
+
+func (b *traceBucket) Add(tr *trace) {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+
+ i := b.start + b.length
+ if i >= tracesPerBucket {
+ i -= tracesPerBucket
+ }
+ if b.length == tracesPerBucket {
+ // "Remove" an element from the bucket.
+ b.buf[i].unref()
+ b.start++
+ if b.start == tracesPerBucket {
+ b.start = 0
+ }
+ }
+ b.buf[i] = tr
+ if b.length < tracesPerBucket {
+ b.length++
+ }
+ tr.ref()
+}
+
+// Copy returns a copy of the traces in the bucket.
+// If tracedOnly is true, only the traces with trace information will be returned.
+// The logs will be ref'd before returning; the caller should call
+// the Free method when it is done with them.
+// TODO(dsymonds): keep track of traced requests in separate buckets.
+func (b *traceBucket) Copy(tracedOnly bool) traceList {
+ b.mu.RLock()
+ defer b.mu.RUnlock()
+
+ trl := make(traceList, 0, b.length)
+ for i, x := 0, b.start; i < b.length; i++ {
+ tr := b.buf[x]
+ if !tracedOnly || tr.spanID != 0 {
+ tr.ref()
+ trl = append(trl, tr)
+ }
+ x++
+ if x == b.length {
+ x = 0
+ }
+ }
+ return trl
+}
+
+func (b *traceBucket) Empty() bool {
+ b.mu.RLock()
+ defer b.mu.RUnlock()
+ return b.length == 0
+}
+
+// cond represents a condition on a trace.
+type cond interface {
+ match(t *trace) bool
+ String() string
+}
+
+type minCond time.Duration
+
+func (m minCond) match(t *trace) bool { return t.Elapsed >= time.Duration(m) }
+func (m minCond) String() string { return fmt.Sprintf("≥%gs", time.Duration(m).Seconds()) }
+
+type errorCond struct{}
+
+func (e errorCond) match(t *trace) bool { return t.IsError }
+func (e errorCond) String() string { return "errors" }
+
+type traceList []*trace
+
+// Free calls unref on each element of the list.
+func (trl traceList) Free() {
+ for _, t := range trl {
+ t.unref()
+ }
+}
+
+// traceList may be sorted in reverse chronological order.
+func (trl traceList) Len() int { return len(trl) }
+func (trl traceList) Less(i, j int) bool { return trl[i].Start.After(trl[j].Start) }
+func (trl traceList) Swap(i, j int) { trl[i], trl[j] = trl[j], trl[i] }
+
+// An event is a timestamped log entry in a trace.
+type event struct {
+ When time.Time
+ Elapsed time.Duration // since previous event in trace
+ NewDay bool // whether this event is on a different day to the previous event
+ Recyclable bool // whether this event was passed via LazyLog
+ What interface{} // string or fmt.Stringer
+ Sensitive bool // whether this event contains sensitive information
+}
+
+// WhenString returns a string representation of the elapsed time of the event.
+// It will include the date if midnight was crossed.
+func (e event) WhenString() string {
+ if e.NewDay {
+ return e.When.Format("2006/01/02 15:04:05.000000")
+ }
+ return e.When.Format("15:04:05.000000")
+}
+
+// discarded represents a number of discarded events.
+// It is stored as *discarded to make it easier to update in-place.
+type discarded int
+
+func (d *discarded) String() string {
+ return fmt.Sprintf("(%d events discarded)", int(*d))
+}
+
+// trace represents an active or complete request,
+// either sent or received by this program.
+type trace struct {
+ // Family is the top-level grouping of traces to which this belongs.
+ Family string
+
+ // Title is the title of this trace.
+ Title string
+
+ // Timing information.
+ Start time.Time
+ Elapsed time.Duration // zero while active
+
+ // Trace information if non-zero.
+ traceID uint64
+ spanID uint64
+
+ // Whether this trace resulted in an error.
+ IsError bool
+
+ // Append-only sequence of events (modulo discards).
+ mu sync.RWMutex
+ events []event
+
+ refs int32 // how many buckets this is in
+ recycler func(interface{})
+ disc discarded // scratch space to avoid allocation
+
+ finishStack []byte // where finish was called, if DebugUseAfterFinish is set
+}
+
+func (tr *trace) reset() {
+ // Clear all but the mutex. Mutexes may not be copied, even when unlocked.
+ tr.Family = ""
+ tr.Title = ""
+ tr.Start = time.Time{}
+ tr.Elapsed = 0
+ tr.traceID = 0
+ tr.spanID = 0
+ tr.IsError = false
+ tr.events = nil
+ tr.refs = 0
+ tr.recycler = nil
+ tr.disc = 0
+ tr.finishStack = nil
+}
+
+// delta returns the elapsed time since the last event or the trace start,
+// and whether it spans midnight.
+// L >= tr.mu
+func (tr *trace) delta(t time.Time) (time.Duration, bool) {
+ if len(tr.events) == 0 {
+ return t.Sub(tr.Start), false
+ }
+ prev := tr.events[len(tr.events)-1].When
+ return t.Sub(prev), prev.Day() != t.Day()
+}
+
+func (tr *trace) addEvent(x interface{}, recyclable, sensitive bool) {
+ if DebugUseAfterFinish && tr.finishStack != nil {
+ buf := make([]byte, 4<<10) // 4 KB should be enough
+ n := runtime.Stack(buf, false)
+ log.Printf("net/trace: trace used after finish:\nFinished at:\n%s\nUsed at:\n%s", tr.finishStack, buf[:n])
+ }
+
+ /*
+ NOTE TO DEBUGGERS
+
+ If you are here because your program panicked in this code,
+ it is almost definitely the fault of code using this package,
+ and very unlikely to be the fault of this code.
+
+ The most likely scenario is that some code elsewhere is using
+ a requestz.Trace after its Finish method is called.
+ You can temporarily set the DebugUseAfterFinish var
+ to help discover where that is; do not leave that var set,
+ since it makes this package much less efficient.
+ */
+
+ e := event{When: time.Now(), What: x, Recyclable: recyclable, Sensitive: sensitive}
+ tr.mu.Lock()
+ e.Elapsed, e.NewDay = tr.delta(e.When)
+ if len(tr.events) < cap(tr.events) {
+ tr.events = append(tr.events, e)
+ } else {
+ // Discard the middle events.
+ di := int((cap(tr.events) - 1) / 2)
+ if d, ok := tr.events[di].What.(*discarded); ok {
+ (*d)++
+ } else {
+ // disc starts at two to count for the event it is replacing,
+ // plus the next one that we are about to drop.
+ tr.disc = 2
+ if tr.recycler != nil && tr.events[di].Recyclable {
+ go tr.recycler(tr.events[di].What)
+ }
+ tr.events[di].What = &tr.disc
+ }
+ // The timestamp of the discarded meta-event should be
+ // the time of the last event it is representing.
+ tr.events[di].When = tr.events[di+1].When
+
+ if tr.recycler != nil && tr.events[di+1].Recyclable {
+ go tr.recycler(tr.events[di+1].What)
+ }
+ copy(tr.events[di+1:], tr.events[di+2:])
+ tr.events[cap(tr.events)-1] = e
+ }
+ tr.mu.Unlock()
+}
+
+func (tr *trace) LazyLog(x fmt.Stringer, sensitive bool) {
+ tr.addEvent(x, true, sensitive)
+}
+
+func (tr *trace) LazyPrintf(format string, a ...interface{}) {
+ tr.addEvent(&lazySprintf{format, a}, false, false)
+}
+
+func (tr *trace) SetError() { tr.IsError = true }
+
+func (tr *trace) SetRecycler(f func(interface{})) {
+ tr.recycler = f
+}
+
+func (tr *trace) SetTraceInfo(traceID, spanID uint64) {
+ tr.traceID, tr.spanID = traceID, spanID
+}
+
+func (tr *trace) SetMaxEvents(m int) {
+ // Always keep at least three events: first, discarded count, last.
+ if len(tr.events) == 0 && m > 3 {
+ tr.events = make([]event, 0, m)
+ }
+}
+
+func (tr *trace) ref() {
+ atomic.AddInt32(&tr.refs, 1)
+}
+
+func (tr *trace) unref() {
+ if atomic.AddInt32(&tr.refs, -1) == 0 {
+ if tr.recycler != nil {
+ // freeTrace clears tr, so we hold tr.recycler and tr.events here.
+ go func(f func(interface{}), es []event) {
+ for _, e := range es {
+ if e.Recyclable {
+ f(e.What)
+ }
+ }
+ }(tr.recycler, tr.events)
+ }
+
+ freeTrace(tr)
+ }
+}
+
+func (tr *trace) When() string {
+ return tr.Start.Format("2006/01/02 15:04:05.000000")
+}
+
+func (tr *trace) ElapsedTime() string {
+ t := tr.Elapsed
+ if t == 0 {
+ // Active trace.
+ t = time.Since(tr.Start)
+ }
+ return fmt.Sprintf("%.6f", t.Seconds())
+}
+
+func (tr *trace) Events() []event {
+ tr.mu.RLock()
+ defer tr.mu.RUnlock()
+ return tr.events
+}
+
+var traceFreeList = make(chan *trace, 1000) // TODO(dsymonds): Use sync.Pool?
+
+// newTrace returns a trace ready to use.
+func newTrace() *trace {
+ select {
+ case tr := <-traceFreeList:
+ return tr
+ default:
+ return new(trace)
+ }
+}
+
+// freeTrace adds tr to traceFreeList if there's room.
+// This is non-blocking.
+func freeTrace(tr *trace) {
+ if DebugUseAfterFinish {
+ return // never reuse
+ }
+ tr.reset()
+ select {
+ case traceFreeList <- tr:
+ default:
+ }
+}
+
+func elapsed(d time.Duration) string {
+ b := []byte(fmt.Sprintf("%.6f", d.Seconds()))
+
+ // For subsecond durations, blank all zeros before decimal point,
+ // and all zeros between the decimal point and the first non-zero digit.
+ if d < time.Second {
+ dot := bytes.IndexByte(b, '.')
+ for i := 0; i < dot; i++ {
+ b[i] = ' '
+ }
+ for i := dot + 1; i < len(b); i++ {
+ if b[i] == '0' {
+ b[i] = ' '
+ } else {
+ break
+ }
+ }
+ }
+
+ return string(b)
+}
+
+var pageTmpl = template.Must(template.New("Page").Funcs(template.FuncMap{
+ "elapsed": elapsed,
+ "add": func(a, b int) int { return a + b },
+}).Parse(pageHTML))
+
+const pageHTML = `
+{{template "Prolog" .}}
+{{template "StatusTable" .}}
+{{template "Epilog" .}}
+
+{{define "Prolog"}}
+
+
+ /debug/requests
+
+
+
+
+
/debug/requests
+{{end}} {{/* end of Prolog */}}
+
+{{define "StatusTable"}}
+