compress (#33)

This commit is contained in:
Gyu-Ho Lee 2016-04-25 07:08:33 -07:00
parent cdcf92e16a
commit 39a07e6ab8
101 changed files with 14334 additions and 371 deletions

66
Godeps/Godeps.json generated
View File

@ -23,50 +23,59 @@
"Comment": "v1.0.2-4-g8808370",
"Rev": "8808370bf63524e115da1371ba42bce6739f3a6b"
},
{
"ImportPath": "github.com/cockroachdb/c-snappy",
"Rev": "5c6d0932e0adaffce4bfca7bdf2ac37f79952ccf"
},
{
"ImportPath": "github.com/coreos/etcd/auth/authpb",
"Comment": "v2.3.0-534-gf8673b5",
"Rev": "f8673b5f60477fa2123a1195267938b21c102fb0"
"Comment": "v2.3.0-541-g1c455b9",
"Rev": "1c455b9da65ee5bf4cd3aa9f2e1673c3dfbcb654"
},
{
"ImportPath": "github.com/coreos/etcd/client",
"Comment": "v2.3.0-534-gf8673b5",
"Rev": "f8673b5f60477fa2123a1195267938b21c102fb0"
"Comment": "v2.3.0-541-g1c455b9",
"Rev": "1c455b9da65ee5bf4cd3aa9f2e1673c3dfbcb654"
},
{
"ImportPath": "github.com/coreos/etcd/clientv3",
"Comment": "v2.3.0-534-gf8673b5",
"Rev": "f8673b5f60477fa2123a1195267938b21c102fb0"
"Comment": "v2.3.0-541-g1c455b9",
"Rev": "1c455b9da65ee5bf4cd3aa9f2e1673c3dfbcb654"
},
{
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes",
"Comment": "v2.3.0-534-gf8673b5",
"Rev": "f8673b5f60477fa2123a1195267938b21c102fb0"
"Comment": "v2.3.0-541-g1c455b9",
"Rev": "1c455b9da65ee5bf4cd3aa9f2e1673c3dfbcb654"
},
{
"ImportPath": "github.com/coreos/etcd/etcdserver/etcdserverpb",
"Comment": "v2.3.0-534-gf8673b5",
"Rev": "f8673b5f60477fa2123a1195267938b21c102fb0"
"Comment": "v2.3.0-541-g1c455b9",
"Rev": "1c455b9da65ee5bf4cd3aa9f2e1673c3dfbcb654"
},
{
"ImportPath": "github.com/coreos/etcd/pkg/compress",
"Comment": "v2.3.0-541-g1c455b9",
"Rev": "1c455b9da65ee5bf4cd3aa9f2e1673c3dfbcb654"
},
{
"ImportPath": "github.com/coreos/etcd/pkg/pathutil",
"Comment": "v2.3.0-534-gf8673b5",
"Rev": "f8673b5f60477fa2123a1195267938b21c102fb0"
"Comment": "v2.3.0-541-g1c455b9",
"Rev": "1c455b9da65ee5bf4cd3aa9f2e1673c3dfbcb654"
},
{
"ImportPath": "github.com/coreos/etcd/pkg/tlsutil",
"Comment": "v2.3.0-534-gf8673b5",
"Rev": "f8673b5f60477fa2123a1195267938b21c102fb0"
"Comment": "v2.3.0-541-g1c455b9",
"Rev": "1c455b9da65ee5bf4cd3aa9f2e1673c3dfbcb654"
},
{
"ImportPath": "github.com/coreos/etcd/pkg/types",
"Comment": "v2.3.0-534-gf8673b5",
"Rev": "f8673b5f60477fa2123a1195267938b21c102fb0"
"Comment": "v2.3.0-541-g1c455b9",
"Rev": "1c455b9da65ee5bf4cd3aa9f2e1673c3dfbcb654"
},
{
"ImportPath": "github.com/coreos/etcd/storage/storagepb",
"Comment": "v2.3.0-534-gf8673b5",
"Rev": "f8673b5f60477fa2123a1195267938b21c102fb0"
"Comment": "v2.3.0-541-g1c455b9",
"Rev": "1c455b9da65ee5bf4cd3aa9f2e1673c3dfbcb654"
},
{
"ImportPath": "github.com/cpuguy83/go-md2man/md2man",
@ -88,18 +97,18 @@
},
{
"ImportPath": "github.com/gogo/protobuf/gogoproto",
"Comment": "v0.1-118-ge8904f5",
"Rev": "e8904f58e872a473a5b91bc9bf3377d223555263"
"Comment": "v0.2-11-g4f262e4",
"Rev": "4f262e4b0f3a6cea646e15798109335551e21756"
},
{
"ImportPath": "github.com/gogo/protobuf/proto",
"Comment": "v0.1-118-ge8904f5",
"Rev": "e8904f58e872a473a5b91bc9bf3377d223555263"
"Comment": "v0.2-11-g4f262e4",
"Rev": "4f262e4b0f3a6cea646e15798109335551e21756"
},
{
"ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/descriptor",
"Comment": "v0.1-118-ge8904f5",
"Rev": "e8904f58e872a473a5b91bc9bf3377d223555263"
"Comment": "v0.2-11-g4f262e4",
"Rev": "4f262e4b0f3a6cea646e15798109335551e21756"
},
{
"ImportPath": "github.com/golang/freetype",
@ -120,6 +129,10 @@
"ImportPath": "github.com/golang/protobuf/proto",
"Rev": "6aaa8d47701fa6cf07e914ec01fde3d4a1fe79c3"
},
{
"ImportPath": "github.com/golang/snappy",
"Rev": "ec642410cd033af63620b66a91ccbd3c69c2c59a"
},
{
"ImportPath": "github.com/gonum/floats",
"Rev": "856ee8119ad91596c5394fa0a5c0470b6f16e676"
@ -256,6 +269,11 @@
"ImportPath": "github.com/ugorji/go/codec",
"Rev": "f1f1a805ed361a0e078bb537e4ea78cd37dcf065"
},
{
"ImportPath": "github.com/youtube/vitess/go/cgzip",
"Comment": "v2.0.0-beta.2-23-gfb1f128",
"Rev": "fb1f128bd85463f6d5512105d9a8ac0a688bf24e"
},
{
"ImportPath": "golang.org/x/image/draw",
"Rev": "f551d3a6b7fc11df315ad9e18b404280680f8bec"

View File

@ -243,6 +243,11 @@ func (t *transporterServer) Transfer(ctx context.Context, r *Request) (*Response
"--initial-cluster", clusterStr,
"--initial-cluster-state", "new",
}
if t.req.EtcdCompression != "" {
flags = append(flags,
"--experimental-compression", t.req.EtcdCompression,
)
}
flagString := strings.Join(flags, " ")
cmd := exec.Command(etcdBinaryPath, flags...)

View File

@ -112,6 +112,8 @@ type Request struct {
ZookeeperMaxClientCnxns int64 `protobuf:"varint,11,opt,name=zookeeperMaxClientCnxns,proto3" json:"zookeeperMaxClientCnxns,omitempty"`
// ZookeeperSnapCount is 100,000 by default.
ZookeeperSnapCount int64 `protobuf:"varint,12,opt,name=zookeeperSnapCount,proto3" json:"zookeeperSnapCount,omitempty"`
// Etcdv3Compression specifies etcd compression algorithm.
EtcdCompression string `protobuf:"bytes,13,opt,name=etcdCompression,proto3" json:"etcdCompression,omitempty"`
}
func (m *Request) Reset() { *m = Request{} }
@ -277,6 +279,12 @@ func (m *Request) MarshalTo(data []byte) (int, error) {
i++
i = encodeVarintMessage(data, i, uint64(m.ZookeeperSnapCount))
}
if len(m.EtcdCompression) > 0 {
data[i] = 0x6a
i++
i = encodeVarintMessage(data, i, uint64(len(m.EtcdCompression)))
i += copy(data[i:], m.EtcdCompression)
}
return i, nil
}
@ -380,6 +388,10 @@ func (m *Request) Size() (n int) {
if m.ZookeeperSnapCount != 0 {
n += 1 + sovMessage(uint64(m.ZookeeperSnapCount))
}
l = len(m.EtcdCompression)
if l > 0 {
n += 1 + l + sovMessage(uint64(l))
}
return n
}
@ -722,6 +734,35 @@ func (m *Request) Unmarshal(data []byte) error {
break
}
}
case 13:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field EtcdCompression", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMessage
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthMessage
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.EtcdCompression = string(data[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipMessage(data[iNdEx:])
@ -919,36 +960,38 @@ var (
)
var fileDescriptorMessage = []byte{
// 496 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x84, 0x53, 0x4f, 0x6f, 0xd3, 0x30,
0x14, 0x6f, 0xd7, 0xb5, 0x4d, 0x5e, 0xd7, 0x51, 0x59, 0xc0, 0xac, 0x0a, 0x55, 0x53, 0xb4, 0x03,
0x97, 0x66, 0x52, 0x8b, 0x10, 0x07, 0xb8, 0xb4, 0x15, 0xd2, 0x34, 0x01, 0x53, 0xc2, 0x89, 0x9b,
0x93, 0xbe, 0x85, 0xb0, 0xd6, 0x0e, 0x8e, 0x33, 0xb5, 0x7c, 0x12, 0x3e, 0xd2, 0x8e, 0xfb, 0x08,
0xfc, 0xf9, 0x22, 0xbc, 0xb9, 0x6b, 0xb6, 0xb2, 0x0d, 0x0e, 0x91, 0xde, 0xef, 0xdf, 0xb3, 0x1d,
0x3f, 0x43, 0x7b, 0x8e, 0x79, 0x2e, 0x12, 0xf4, 0x33, 0xad, 0x8c, 0x62, 0x75, 0x2a, 0xa5, 0xe9,
0xf6, 0x93, 0xd4, 0x7c, 0x2e, 0x22, 0x3f, 0x56, 0xf3, 0xc3, 0x44, 0x25, 0xea, 0xd0, 0xaa, 0x51,
0x71, 0x6a, 0x91, 0x05, 0xb6, 0x5a, 0xa5, 0xbc, 0xcb, 0x3a, 0x34, 0x03, 0xfc, 0x5a, 0x60, 0x6e,
0xd8, 0x4b, 0x70, 0x55, 0x86, 0x5a, 0x98, 0x54, 0x49, 0x5e, 0xdd, 0xaf, 0x3e, 0xdf, 0x1d, 0x70,
0xdf, 0x76, 0xf5, 0xaf, 0x2d, 0xfe, 0x87, 0xb5, 0x1e, 0xdc, 0x58, 0xd9, 0x10, 0x9c, 0xa9, 0x30,
0x22, 0x12, 0x39, 0xf2, 0x2d, 0x1b, 0xdb, 0xfb, 0x2b, 0x36, 0xb9, 0x96, 0x83, 0xd2, 0xc8, 0x3c,
0xd8, 0xc9, 0x10, 0xf5, 0xd1, 0x49, 0x68, 0x74, 0x2a, 0x13, 0x5e, 0xa3, 0xa0, 0x1b, 0x6c, 0x70,
0x6c, 0x1f, 0x5a, 0x39, 0xea, 0x73, 0x62, 0xe4, 0x14, 0x17, 0x7c, 0x9b, 0x2c, 0xed, 0xe0, 0x36,
0xc5, 0xba, 0xe0, 0x18, 0x5a, 0xe0, 0xbd, 0x98, 0x23, 0xaf, 0xdb, 0x0e, 0x25, 0xa6, 0xe3, 0x3c,
0x4d, 0x94, 0x4a, 0x66, 0x38, 0x9e, 0xa9, 0x62, 0x7a, 0xa2, 0xd5, 0x17, 0x8c, 0x57, 0xce, 0x86,
0x75, 0x3e, 0xa0, 0xb2, 0x17, 0xf0, 0xe4, 0x96, 0x12, 0x1a, 0xa5, 0xe9, 0x30, 0xc7, 0xb8, 0xe4,
0x4d, 0x1b, 0xbb, 0x5f, 0x64, 0x23, 0x78, 0x76, 0x57, 0x18, 0x15, 0xf1, 0x19, 0xae, 0xd6, 0x74,
0x6c, 0xf8, 0x9f, 0x1e, 0xf6, 0x16, 0x7a, 0x77, 0xf5, 0xb0, 0x88, 0x26, 0xa9, 0xa6, 0xcd, 0x29,
0xbd, 0xe4, 0xae, 0xed, 0xf2, 0x1f, 0x17, 0x3b, 0x80, 0xf6, 0x37, 0xa5, 0xce, 0x10, 0xe9, 0x8a,
0xde, 0x2d, 0x8f, 0x26, 0x1c, 0xec, 0x9f, 0xdb, 0x24, 0xd9, 0x2b, 0xd8, 0xbb, 0x21, 0xc4, 0x62,
0x3c, 0x4b, 0xe9, 0xca, 0xc6, 0x72, 0x21, 0x73, 0xde, 0x22, 0x7f, 0x2d, 0x78, 0x48, 0x66, 0x3e,
0xb0, 0x52, 0x0a, 0xa5, 0xc8, 0xc6, 0xaa, 0x90, 0x86, 0xef, 0xd8, 0xd0, 0x3d, 0x8a, 0xd7, 0x07,
0xb7, 0x1c, 0x1c, 0xe6, 0x42, 0x3d, 0x34, 0x42, 0x9b, 0x4e, 0x85, 0x39, 0xb0, 0x4d, 0xdb, 0xcf,
0x3a, 0x55, 0xd6, 0xba, 0x9a, 0xc2, 0xdc, 0xd2, 0x5b, 0xde, 0x1b, 0x70, 0xd6, 0x03, 0xc3, 0x00,
0x1a, 0x68, 0xe2, 0xe9, 0xf9, 0x90, 0xec, 0xeb, 0x7a, 0x40, 0x81, 0x36, 0xb8, 0x9f, 0x94, 0x3a,
0xb6, 0x0b, 0x75, 0xb6, 0xae, 0xa4, 0xb1, 0x92, 0x79, 0x31, 0xeb, 0xd4, 0xbc, 0x03, 0x70, 0xa8,
0x57, 0x46, 0x10, 0x19, 0x87, 0x66, 0x5e, 0xc4, 0x31, 0x3d, 0x14, 0x3b, 0xd0, 0x4e, 0xb0, 0x86,
0x83, 0xd7, 0xd0, 0xfa, 0xa8, 0x85, 0x24, 0x9f, 0x36, 0xa8, 0x59, 0x1f, 0x1c, 0x0b, 0x4f, 0xa9,
0xde, 0xdd, 0x9c, 0xde, 0xee, 0xa3, 0x12, 0xaf, 0xba, 0x7a, 0x95, 0xd1, 0xe3, 0x8b, 0x9f, 0xbd,
0xca, 0xc5, 0xaf, 0x5e, 0xf5, 0x92, 0xbe, 0x1f, 0xf4, 0x7d, 0xff, 0xdd, 0xab, 0x44, 0x0d, 0xfb,
0xa6, 0x86, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x1d, 0x54, 0x5c, 0x2b, 0x9a, 0x03, 0x00, 0x00,
// 515 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x84, 0x53, 0xcd, 0x6e, 0xda, 0x40,
0x10, 0xe6, 0x27, 0x80, 0x3d, 0x84, 0x04, 0xad, 0xda, 0x66, 0x85, 0x2a, 0x14, 0x59, 0x39, 0xe4,
0x82, 0x23, 0x41, 0x55, 0xf5, 0xd0, 0x5e, 0x30, 0xaa, 0x14, 0x45, 0x6d, 0x23, 0xd3, 0x53, 0x6f,
0xc6, 0x4c, 0x5c, 0x1a, 0xf0, 0xb8, 0xeb, 0x75, 0x04, 0x7d, 0x92, 0x3e, 0x52, 0x8e, 0x7d, 0x84,
0xfe, 0xbc, 0x45, 0x4f, 0x1d, 0x96, 0xe0, 0x84, 0xfc, 0x34, 0x07, 0x4b, 0x33, 0xdf, 0xcf, 0xec,
0x0e, 0xfb, 0x01, 0x8d, 0x19, 0xa6, 0x69, 0x10, 0xa1, 0x9b, 0x28, 0xd2, 0x24, 0x2a, 0x5c, 0xc6,
0xba, 0xd5, 0x89, 0x26, 0xfa, 0x73, 0x36, 0x72, 0x43, 0x9a, 0x1d, 0x45, 0x14, 0xd1, 0x91, 0x61,
0x47, 0xd9, 0x99, 0xe9, 0x4c, 0x63, 0xaa, 0x95, 0xcb, 0xf9, 0x5b, 0x81, 0x9a, 0x8f, 0x5f, 0x33,
0x4c, 0xb5, 0x78, 0x09, 0x36, 0x25, 0xa8, 0x02, 0x3d, 0xa1, 0x58, 0x16, 0xf7, 0x8b, 0x87, 0x3b,
0x5d, 0xe9, 0x9a, 0xa9, 0xee, 0x95, 0xc4, 0xfd, 0xb0, 0xe6, 0xfd, 0x6b, 0xa9, 0xe8, 0x81, 0x35,
0x0e, 0x74, 0x30, 0x0a, 0x52, 0x94, 0x25, 0x63, 0xdb, 0xbb, 0x65, 0x1b, 0x5c, 0xd1, 0x7e, 0x2e,
0x14, 0x0e, 0x6c, 0x27, 0x88, 0xea, 0xf8, 0x74, 0xa8, 0xd5, 0x24, 0x8e, 0x64, 0x99, 0x8d, 0xb6,
0xbf, 0x81, 0x89, 0x7d, 0xa8, 0xa7, 0xa8, 0x2e, 0x18, 0x89, 0xc7, 0x38, 0x97, 0x5b, 0x2c, 0x69,
0xf8, 0x37, 0x21, 0xd1, 0x02, 0x4b, 0xf3, 0x01, 0xef, 0x83, 0x19, 0xca, 0x8a, 0x99, 0x90, 0xf7,
0xbc, 0xce, 0xb3, 0x88, 0x28, 0x9a, 0xa2, 0x37, 0xa5, 0x6c, 0x7c, 0xaa, 0xe8, 0x0b, 0x86, 0x2b,
0x65, 0xd5, 0x28, 0x1f, 0x60, 0xc5, 0x0b, 0x78, 0x7a, 0x83, 0x19, 0x6a, 0x52, 0xbc, 0xcc, 0x09,
0x2e, 0x64, 0xcd, 0xd8, 0xee, 0x27, 0x45, 0x1f, 0x9e, 0xdf, 0x25, 0xfa, 0x59, 0x78, 0x8e, 0xab,
0x33, 0x2d, 0x63, 0xfe, 0xaf, 0x46, 0xbc, 0x85, 0xf6, 0x5d, 0x7e, 0x98, 0x8d, 0x06, 0x13, 0xc5,
0x97, 0x23, 0xb5, 0x90, 0xb6, 0x99, 0xf2, 0x88, 0x4a, 0x1c, 0x40, 0xe3, 0x1b, 0xd1, 0x39, 0x22,
0x3f, 0xd1, 0xbb, 0xc5, 0xf1, 0x40, 0x82, 0xf9, 0xe5, 0x36, 0x41, 0xf1, 0x0a, 0xf6, 0xae, 0x81,
0x60, 0xee, 0x4d, 0x27, 0xfc, 0x64, 0x5e, 0x3c, 0x8f, 0x53, 0x59, 0x67, 0x7d, 0xd9, 0x7f, 0x88,
0x16, 0x2e, 0x88, 0x9c, 0x1a, 0xc6, 0x41, 0xe2, 0x51, 0x16, 0x6b, 0xb9, 0x6d, 0x4c, 0xf7, 0x30,
0xe2, 0x10, 0x76, 0x51, 0x87, 0x63, 0x8f, 0x66, 0x89, 0xe2, 0xcc, 0x2e, 0xe3, 0xd5, 0x30, 0x8b,
0xdc, 0x86, 0x9d, 0x0e, 0xd8, 0x79, 0xc4, 0x84, 0x0d, 0x95, 0xa1, 0x0e, 0x94, 0x6e, 0x16, 0x84,
0x05, 0x5b, 0xbc, 0x68, 0xd2, 0x2c, 0x8a, 0xfa, 0x32, 0xaf, 0xa9, 0x81, 0x4b, 0xce, 0x1b, 0xb0,
0xd6, 0xd1, 0x12, 0x00, 0xd5, 0xe5, 0xb4, 0x8b, 0x1e, 0xcb, 0xd7, 0x75, 0x97, 0x0d, 0x0d, 0xb0,
0x3f, 0x11, 0x9d, 0x98, 0x2b, 0x35, 0x4b, 0x4b, 0xca, 0xa3, 0x38, 0xcd, 0xa6, 0xcd, 0xb2, 0x73,
0x00, 0x16, 0xcf, 0x4a, 0xb8, 0x45, 0x21, 0xa1, 0x96, 0x66, 0x61, 0xc8, 0xf7, 0x30, 0xd1, 0xb7,
0xfc, 0x75, 0xdb, 0x7d, 0x0d, 0xf5, 0x8f, 0x2a, 0x88, 0x59, 0xa7, 0x34, 0x2a, 0xd1, 0x01, 0xcb,
0xb4, 0x67, 0x5c, 0xef, 0x6c, 0xe6, 0xbc, 0xb5, 0x9b, 0xf7, 0xab, 0xa9, 0x4e, 0xa1, 0xff, 0xe4,
0xf2, 0x57, 0xbb, 0x70, 0xf9, 0xbb, 0x5d, 0xfc, 0xc1, 0xdf, 0x4f, 0xfe, 0xbe, 0xff, 0x69, 0x17,
0x46, 0x55, 0xf3, 0xef, 0xeb, 0xfd, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x3f, 0xff, 0xae, 0x1e, 0xc4,
0x03, 0x00, 0x00,
}

View File

@ -59,6 +59,9 @@ message Request {
// ZookeeperSnapCount is 100,000 by default.
int64 zookeeperSnapCount = 12;
// Etcdv3Compression specifies etcd compression algorithm.
string etcdCompression = 13;
}
message Response {

View File

@ -1,89 +0,0 @@
titles:
- Write 1M keys, 1000-client, 8-byte key, 256-byte value (etcd v2)
step1:
- data_path_list:
- 2016041802/01-etcdv2-1-monitor.csv
- 2016041802/01-etcdv2-2-monitor.csv
- 2016041802/01-etcdv2-3-monitor.csv
data_benchmark_path: 2016041802/01-etcdv2-timeseries.csv
output_path: 2016041802/01-etcdv2-aggregated.csv
step2:
- data_list:
- path: 2016041802/01-etcdv2-aggregated.csv
name: etcd_v2
output_path: 2016041802/01-all-aggregated.csv
step3:
- data_path: 2016041802/01-all-aggregated.csv
plot_list:
- lines:
- column: avg_latency_ms_etcd_v2
legend: etcd v2
x_axis: Second
y_axis: Latency(millisecond)
output_path_list:
- 2016041802/01-avg-latency-ms.svg
- 2016041802/01-avg-latency-ms.png
- lines:
- column: throughput_etcd_v2
legend: etcd v2
x_axis: Second
y_axis: Throughput
output_path_list:
- 2016041802/01-throughput.svg
- 2016041802/01-throughput.png
- lines:
- column: avg_cpu_etcd_v2
legend: etcd v2
x_axis: Second
y_axis: CPU
output_path_list:
- 2016041802/01-avg-cpu.svg
- 2016041802/01-avg-cpu.png
- lines:
- column: avg_memory_mb_etcd_v2
legend: etcd v2
x_axis: Second
y_axis: Memory(MB)
output_path_list:
- 2016041802/01-avg-memory.svg
- 2016041802/01-avg-memory.png
step4:
preface: |
- Google Cloud Compute Engine
- 8 vCPUs + 16GB Memory + 50GB SSD
- 1 machine(client) of 16 vCPUs + 30GB Memory + 50GB SSD
- Ubuntu 15.10
- Go 1.6, 1.7
- Java 8
- Java(TM) SE Runtime Environment (build 1.8.0_74-b02)
- Java HotSpot(TM) 64-Bit Server VM (build 25.74-b02, mixed mode)
- Consul v0.6.4
- etcd v3 (master branch)
- Zookeeper v3.4.8
results:
- images:
- image_title: 2016041802/01-avg-latency-ms
image_path: https://storage.googleapis.com/dbtester-results/2016041802/01-avg-latency-ms.svg
image_type: remote
- image_title: 2016041802/01-throughput
image_path: https://storage.googleapis.com/dbtester-results/2016041802/01-throughput.svg
image_type: remote
- image_title: 2016041802/01-avg-cpu
image_path: https://storage.googleapis.com/dbtester-results/2016041802/01-avg-cpu.svg
image_type: remote
- image_title: 2016041802/01-avg-memory
image_path: https://storage.googleapis.com/dbtester-results/2016041802/01-avg-memory.svg
image_type: remote
output_path: 2016041802/README.md

View File

@ -0,0 +1,173 @@
titles:
- Write 2M keys, 1000-client, 100-conn, 8-byte key, 256-byte value (etcd v3, no compression)
- Write 2M keys, 1000-client, 100-conn, 8-byte key, 256-byte value (etcd v3, cgzip)
- Write 2M keys, 1000-client, 100-conn, 8-byte key, 256-byte value (etcd v3, cgzip-lv)
- Write 2M keys, 1000-client, 100-conn, 8-byte key, 256-byte value (etcd v3, gzip)
- Write 2M keys, 1000-client, 100-conn, 8-byte key, 256-byte value (etcd v3, snappy)
- Write 2M keys, 1000-client, 100-conn, 8-byte key, 256-byte value (etcd v3, snappy-cpp)
step1:
- data_path_list:
- 2016042501/01-etcdv3-nocompression-1-monitor.csv
- 2016042501/01-etcdv3-nocompression-2-monitor.csv
- 2016042501/01-etcdv3-nocompression-3-monitor.csv
data_benchmark_path: 2016042501/01-etcdv3-nocompression-timeseries.csv
output_path: 2016042501/01-etcdv3-nocompression-aggregated.csv
- data_path_list:
- 2016042501/01-etcdv3-cgzip-1-monitor.csv
- 2016042501/01-etcdv3-cgzip-2-monitor.csv
- 2016042501/01-etcdv3-cgzip-3-monitor.csv
data_benchmark_path: 2016042501/01-etcdv3-cgzip-timeseries.csv
output_path: 2016042501/01-etcdv3-cgzip-aggregated.csv
- data_path_list:
- 2016042501/01-etcdv3-cgzip-lv2-1-monitor.csv
- 2016042501/01-etcdv3-cgzip-lv2-2-monitor.csv
- 2016042501/01-etcdv3-cgzip-lv2-3-monitor.csv
data_benchmark_path: 2016042501/01-etcdv3-cgzip-lv2-timeseries.csv
output_path: 2016042501/01-etcdv3-cgzip-lv2-aggregated.csv
- data_path_list:
- 2016042501/01-etcdv3-gzip-1-monitor.csv
- 2016042501/01-etcdv3-gzip-2-monitor.csv
- 2016042501/01-etcdv3-gzip-3-monitor.csv
data_benchmark_path: 2016042501/01-etcdv3-gzip-timeseries.csv
output_path: 2016042501/01-etcdv3-gzip-aggregated.csv
- data_path_list:
- 2016042501/01-etcdv3-snappy-1-monitor.csv
- 2016042501/01-etcdv3-snappy-2-monitor.csv
- 2016042501/01-etcdv3-snappy-3-monitor.csv
data_benchmark_path: 2016042501/01-etcdv3-snappy-timeseries.csv
output_path: 2016042501/01-etcdv3-snappy-aggregated.csv
- data_path_list:
- 2016042501/01-etcdv3-snappy-cpp-1-monitor.csv
- 2016042501/01-etcdv3-snappy-cpp-2-monitor.csv
- 2016042501/01-etcdv3-snappy-cpp-3-monitor.csv
data_benchmark_path: 2016042501/01-etcdv3-snappy-cpp-timeseries.csv
output_path: 2016042501/01-etcdv3-snappy-cpp-aggregated.csv
step2:
- data_list:
- path: 2016042501/01-etcdv3-nocompression-aggregated.csv
name: etcd_v3_nocompression
- path: 2016042501/01-etcdv3-cgzip-aggregated.csv
name: etcd_v3_cgzip
- path: 2016042501/01-etcdv3-cgzip-lv2-aggregated.csv
name: etcd_v3_cgzip_lv2
- path: 2016042501/01-etcdv3-gzip-aggregated.csv
name: etcd_v3_gzip
- path: 2016042501/01-etcdv3-snappy-aggregated.csv
name: etcd_v3_snappy
- path: 2016042501/01-etcdv3-snappy-cpp-aggregated.csv
name: etcd_v3_snappy_cpp
output_path: 2016042501/01-all-aggregated.csv
step3:
- data_path: 2016042501/01-all-aggregated.csv
plot_list:
- lines:
- column: avg_latency_ms_etcd_v3_nocompression
legend: etcd v3 (no compression)
- column: avg_latency_ms_etcd_v3_cgzip
legend: etcd v3 (cgzip)
- column: avg_latency_ms_etcd_v3_cgzip_lv2
legend: etcd v3 (cgzip Level 2)
- column: avg_latency_ms_etcd_v3_gzip
legend: etcd v3 (gzip)
- column: avg_latency_ms_etcd_v3_snappy
legend: etcd v3 (snappy)
- column: avg_latency_ms_etcd_v3_snappy_cpp
legend: etcd v3 (snappy C++)
x_axis: Second
y_axis: Latency(millisecond)
output_path_list:
- 2016042501/01-avg-latency-ms.svg
- 2016042501/01-avg-latency-ms.png
- lines:
- column: throughput_etcd_v3_nocompression
legend: etcd v3 (no compression)
- column: throughput_etcd_v3_cgzip
legend: etcd v3 (cgzip)
- column: throughput_etcd_v3_cgzip_lv2
legend: etcd v3 (cgzip Level 2)
- column: throughput_etcd_v3_gzip
legend: etcd v3 (gzip)
- column: throughput_etcd_v3_snappy
legend: etcd v3 (snappy)
- column: throughput_etcd_v3_snappy_cpp
legend: etcd v3 (snappy C++)
x_axis: Second
y_axis: Throughput
output_path_list:
- 2016042501/01-throughput.svg
- 2016042501/01-throughput.png
- lines:
- column: avg_cpu_etcd_v3_nocompression
legend: etcd v3 (no compression)
- column: avg_cpu_etcd_v3_cgzip
legend: etcd v3 (cgzip)
- column: avg_cpu_etcd_v3_cgzip_lv2
legend: etcd v3 (cgzip Level 2)
- column: avg_cpu_etcd_v3_gzip
legend: etcd v3 (gzip)
- column: avg_cpu_etcd_v3_snappy
legend: etcd v3 (snappy)
- column: avg_cpu_etcd_v3_snappy_cpp
legend: etcd v3 (snappy C++)
x_axis: Second
y_axis: CPU
output_path_list:
- 2016042501/01-avg-cpu.svg
- 2016042501/01-avg-cpu.png
- lines:
- column: avg_memory_mb_etcd_v3_nocompression
legend: etcd v3 (no compression)
- column: avg_memory_mb_etcd_v3_cgzip
legend: etcd v3 (cgzip)
- column: avg_memory_mb_etcd_v3_cgzip_lv2
legend: etcd v3 (cgzip Level 2)
- column: avg_memory_mb_etcd_v3_gzip
legend: etcd v3 (gzip)
- column: avg_memory_mb_etcd_v3_snappy
legend: etcd v3 (snappy)
- column: avg_memory_mb_etcd_v3_snappy_cpp
legend: etcd v3 (snappy C++)
x_axis: Second
y_axis: Memory(MB)
output_path_list:
- 2016042501/01-avg-memory.svg
- 2016042501/01-avg-memory.png
step4:
preface: |
- Google Cloud Compute Engine
- 8 vCPUs + 16GB Memory + 50GB SSD
- 1 machine(client) of 16 vCPUs + 30GB Memory + 50GB SSD
- Ubuntu 15.10
- Go master branch on 2016-04-25
- etcd v3 (compress branch)
results:
- images:
- image_title: 2016042501/01-avg-latency-ms
image_path: https://storage.googleapis.com/dbtester-results/2016042501/01-avg-latency-ms.svg
image_type: remote
- image_title: 2016042501/01-throughput
image_path: https://storage.googleapis.com/dbtester-results/2016042501/01-throughput.svg
image_type: remote
- image_title: 2016042501/01-avg-cpu
image_path: https://storage.googleapis.com/dbtester-results/2016042501/01-avg-cpu.svg
image_type: remote
- image_title: 2016042501/01-avg-memory
image_path: https://storage.googleapis.com/dbtester-results/2016042501/01-avg-memory.svg
image_type: remote
output_path: 2016042501/README.md

View File

@ -0,0 +1,42 @@
database: etcdv3
test_name: 01-etcdv3-nocompression
google_cloud_project_name: etcd-development
google_cloud_storage_key_path: /home/gyuho/gcloud-key.json
google_cloud_storage_bucket_name: dbtester-results
google_cloud_storage_sub_directory: 2016042501
peer_ips:
- 10.240.0.17
- 10.240.0.18
- 10.240.0.19
agent_port: 3500
database_port: 2379
# no compression
etcd_compression: cgzip-lv2
# start database by sending RPC calls to agents
step1:
skip: false
zookeeper_max_client_connections: 5000
zookeeper_snap_count: 100000
# start benchmark
step2:
skip: false
bench_type: write
local_read: true
result_path: timeseries.csv
connections: 100
clients: 1000
key_size: 8
value_size: 256
total_requests: 2000000
etcdv3_compaction_cycle: 0
# after benchmark
step3:
skip: false
result_path: result.log

View File

@ -0,0 +1,42 @@
database: etcdv3
test_name: 01-etcdv3-nocompression
google_cloud_project_name: etcd-development
google_cloud_storage_key_path: /home/gyuho/gcloud-key.json
google_cloud_storage_bucket_name: dbtester-results
google_cloud_storage_sub_directory: 2016042501
peer_ips:
- 10.240.0.17
- 10.240.0.18
- 10.240.0.19
agent_port: 3500
database_port: 2379
# no compression
etcd_compression: cgzip
# start database by sending RPC calls to agents
step1:
skip: false
zookeeper_max_client_connections: 5000
zookeeper_snap_count: 100000
# start benchmark
step2:
skip: false
bench_type: write
local_read: true
result_path: timeseries.csv
connections: 100
clients: 1000
key_size: 8
value_size: 256
total_requests: 2000000
etcdv3_compaction_cycle: 0
# after benchmark
step3:
skip: false
result_path: result.log

View File

@ -1,19 +1,22 @@
database: etcdv2
test_name: 01-etcdv2
database: etcdv3
test_name: 01-etcdv3-nocompression
google_cloud_project_name: etcd-development
google_cloud_storage_key_path: /home/gyuho/gcloud-key.json
google_cloud_storage_bucket_name: dbtester-results
google_cloud_storage_sub_directory: 2016041802
google_cloud_storage_sub_directory: 2016042501
peer_ips:
- 10.240.0.7
- 10.240.0.13
- 10.240.0.14
- 10.240.0.17
- 10.240.0.18
- 10.240.0.19
agent_port: 3500
database_port: 2379
# no compression
etcd_compression: ""
# start database by sending RPC calls to agents
step1:
skip: false
@ -26,15 +29,14 @@ step2:
bench_type: write
local_read: true
result_path: timeseries.csv
connections: 1000
connections: 100
clients: 1000
key_size: 8
value_size: 256
total_requests: 1000000
total_requests: 2000000
etcdv3_compaction_cycle: 0
# after benchmark
step3:
skip: false
result_path: result.log

View File

@ -0,0 +1,42 @@
database: etcdv3
test_name: 01-etcdv3-nocompression
google_cloud_project_name: etcd-development
google_cloud_storage_key_path: /home/gyuho/gcloud-key.json
google_cloud_storage_bucket_name: dbtester-results
google_cloud_storage_sub_directory: 2016042501
peer_ips:
- 10.240.0.17
- 10.240.0.18
- 10.240.0.19
agent_port: 3500
database_port: 2379
# no compression
etcd_compression: snappy-cpp
# start database by sending RPC calls to agents
step1:
skip: false
zookeeper_max_client_connections: 5000
zookeeper_snap_count: 100000
# start benchmark
step2:
skip: false
bench_type: write
local_read: true
result_path: timeseries.csv
connections: 100
clients: 1000
key_size: 8
value_size: 256
total_requests: 2000000
etcdv3_compaction_cycle: 0
# after benchmark
step3:
skip: false
result_path: result.log

View File

@ -0,0 +1,42 @@
database: etcdv3
test_name: 01-etcdv3-nocompression
google_cloud_project_name: etcd-development
google_cloud_storage_key_path: /home/gyuho/gcloud-key.json
google_cloud_storage_bucket_name: dbtester-results
google_cloud_storage_sub_directory: 2016042501
peer_ips:
- 10.240.0.17
- 10.240.0.18
- 10.240.0.19
agent_port: 3500
database_port: 2379
# no compression
etcd_compression: snappy
# start database by sending RPC calls to agents
step1:
skip: false
zookeeper_max_client_connections: 5000
zookeeper_snap_count: 100000
# start benchmark
step2:
skip: false
bench_type: write
local_read: true
result_path: timeseries.csv
connections: 100
clients: 1000
key_size: 8
value_size: 256
total_requests: 2000000
etcdv3_compaction_cycle: 0
# after benchmark
step3:
skip: false
result_path: result.log

View File

@ -37,6 +37,9 @@ type Config struct {
AgentEndpoints []string
DatabaseEndpoints []string
// cgzip, cgzip-lv2, gzip, snappy, snappy-cpp
EtcdCompression string `yaml:"etcd_compression"`
Step1 struct {
Skip bool `yaml:"skip"`

View File

@ -39,6 +39,9 @@ func TestReadConfig(t *testing.T) {
if c.DatabasePort != 2379 {
t.Fatalf("unexpected %d", c.DatabasePort)
}
if c.EtcdCompression != "snappy" {
t.Fatalf("unexpected %q", c.EtcdCompression)
}
if c.GoogleCloudProjectName != "etcd-development" {
t.Fatalf("unexpected %s", c.GoogleCloudProjectName)
}

View File

@ -145,6 +145,7 @@ func step1(cfg Config) error {
req.ZookeeperMaxClientCnxns = cfg.Step1.ZookeeperMaxClientCnxns
req.ZookeeperSnapCount = cfg.Step1.ZookeeperSnapCount
req.EtcdCompression = cfg.EtcdCompression
donec, errc := make(chan struct{}), make(chan error)
for i := range cfg.PeerIPs {
@ -206,7 +207,6 @@ var (
func step2(cfg Config) error {
switch cfg.Step2.BenchType {
case "write":
results = make(chan result)
requests := make(chan request, cfg.Step2.Clients)
bar = pb.New(cfg.Step2.TotalRequests)
@ -227,7 +227,7 @@ func step2(cfg Config) error {
}
case "etcdv3":
etcdClients = mustCreateClientsEtcdv3(cfg.DatabaseEndpoints, cfg.Step2.Clients, cfg.Step2.Connections)
etcdClients = mustCreateClientsEtcdv3(cfg.DatabaseEndpoints, cfg.Step2.Clients, cfg.Step2.Connections, cfg.EtcdCompression)
for i := range etcdClients {
wg.Add(1)
go doPutEtcdv3(context.Background(), etcdClients[i], requests)
@ -353,7 +353,7 @@ func step2(cfg Config) error {
log.Printf("PUT '%s' to etcd", key)
var err error
for i := 0; i < 5; i++ {
clients := mustCreateClientsEtcdv3(cfg.DatabaseEndpoints, 1, 1)
clients := mustCreateClientsEtcdv3(cfg.DatabaseEndpoints, 1, 1, cfg.EtcdCompression)
_, err = clients[0].Do(context.Background(), clientv3.OpPut(key, value))
if err != nil {
continue
@ -417,7 +417,7 @@ func step2(cfg Config) error {
}
case "etcdv3":
clients := mustCreateClientsEtcdv3(cfg.DatabaseEndpoints, cfg.Step2.Clients, cfg.Step2.Connections)
clients := mustCreateClientsEtcdv3(cfg.DatabaseEndpoints, cfg.Step2.Clients, cfg.Step2.Connections, cfg.EtcdCompression)
for i := range clients {
wg.Add(1)
go doRangeEtcdv3(clients[i].KV, requests)

View File

@ -14,6 +14,8 @@ peer_ips:
agent_port: 3500
database_port: 2379
etcd_compression: snappy
# start database by sending RPC calls to agents
step1:
skip: false

View File

@ -34,6 +34,7 @@ import (
clientv2 "github.com/coreos/etcd/client"
"github.com/coreos/etcd/clientv3"
"github.com/coreos/etcd/pkg/compress"
consulapi "github.com/hashicorp/consul/api"
"github.com/samuel/go-zookeeper/zk"
)
@ -71,10 +72,13 @@ var (
dialTotal int
)
func mustCreateConnEtcdv3(endpoints []string) *clientv3.Client {
func mustCreateConnEtcdv3(endpoints []string, compressType compress.Type) *clientv3.Client {
endpoint := endpoints[dialTotal%len(endpoints)]
dialTotal++
cfg := clientv3.Config{Endpoints: []string{endpoint}}
cfg := clientv3.Config{
Endpoints: []string{endpoint},
CompressType: compressType,
}
client, err := clientv3.New(cfg)
if err != nil {
fmt.Fprintf(os.Stderr, "dial error: %v\n", err)
@ -83,10 +87,10 @@ func mustCreateConnEtcdv3(endpoints []string) *clientv3.Client {
return client
}
func mustCreateClientsEtcdv3(endpoints []string, totalClients, totalConns int) []*clientv3.Client {
func mustCreateClientsEtcdv3(endpoints []string, totalClients, totalConns int, compressionTypeTxt string) []*clientv3.Client {
conns := make([]*clientv3.Client, totalConns)
for i := range conns {
conns[i] = mustCreateConnEtcdv3(endpoints)
conns[i] = mustCreateConnEtcdv3(endpoints, compress.ParseType(compressionTypeTxt))
}
clients := make([]*clientv3.Client, totalClients)

2
vendor/github.com/cockroachdb/c-snappy/.gitignore generated vendored Normal file
View File

@ -0,0 +1,2 @@
*.test
testdata

27
vendor/github.com/cockroachdb/c-snappy/LICENSE generated vendored Normal file
View File

@ -0,0 +1,27 @@
Copyright (c) 2011 The Snappy-Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

6
vendor/github.com/cockroachdb/c-snappy/Makefile generated vendored Normal file
View File

@ -0,0 +1,6 @@
include internal/Makefile.am
sources:
@for source_file in $(libsnappy_la_SOURCES); do \
echo internal/$$source_file; \
done

22
vendor/github.com/cockroachdb/c-snappy/README.md generated vendored Normal file
View File

@ -0,0 +1,22 @@
# Snappy
This is a fork of the snappy-go library from code.google.com. It has been
changed to use the C++ snappy library for encoding and decoding.
This package is also a go-gettable version of the C++ snappy library for use in
Go code that needs to link against the C++ snappy library but wants to
integrate with `go get` and `go build`. The snappy source is currently pinned
to the 1.1.1 release.
To use in your project you need to import the package and set appropriate cgo
flag directives:
```
import _ "github.com/cockroachdb/c-snappy"
// #cgo CXXFLAGS: -std=c++11
// #cgo CPPFLAGS: -I <relative-path>/c-snappy/internal
// #cgo darwin LDFLAGS: -Wl,-undefined -Wl,dynamic_lookup
// #cgo !darwin LDFLAGS: -Wl,-unresolved-symbols=ignore-all
import "C"
```

7
vendor/github.com/cockroachdb/c-snappy/cgo_flags.go generated vendored Normal file
View File

@ -0,0 +1,7 @@
// Package snappy uses the cgo compilation facilities to build the
// Snappy C++ library.
package snappy
// #cgo CXXFLAGS: -std=c++11
// #cgo CPPFLAGS: -DHAVE_CONFIG_H -Iinternal
import "C"

8
vendor/github.com/cockroachdb/c-snappy/circle.yml generated vendored Normal file
View File

@ -0,0 +1,8 @@
dependencies:
pre:
- ./install-clang-3.6.sh
machine:
environment:
CC: /usr/lib/llvm-3.6/bin/clang
CXX: /usr/lib/llvm-3.6/bin/clang++

227
vendor/github.com/cockroachdb/c-snappy/decode.go generated vendored Normal file
View File

@ -0,0 +1,227 @@
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package snappy
import (
"encoding/binary"
"errors"
"io"
"unsafe"
)
// #include <snappy-c.h>
import "C"
var (
// ErrCorrupt reports that the input is invalid.
ErrCorrupt = errors.New("snappy: corrupt input")
// ErrUnsupported reports that the input isn't supported.
ErrUnsupported = errors.New("snappy: unsupported input")
)
// DecodedLen returns the length of the decoded block.
func DecodedLen(src []byte) (int, error) {
v, _, err := decodedLen(src)
return v, err
}
// decodedLen returns the length of the decoded block and the number of bytes
// that the length header occupied.
func decodedLen(src []byte) (blockLen, headerLen int, err error) {
v, n := binary.Uvarint(src)
if n == 0 {
return 0, 0, ErrCorrupt
}
if uint64(int(v)) != v {
return 0, 0, errors.New("snappy: decoded block is too large")
}
return int(v), n, nil
}
// Decode returns the decoded form of src. The returned slice may be a sub-
// slice of dst if dst was large enough to hold the entire decoded block.
// Otherwise, a newly allocated slice will be returned.
// It is valid to pass a nil dst.
func Decode(dst, src []byte) ([]byte, error) {
dLen, _, err := decodedLen(src)
if err != nil {
return nil, err
}
if len(dst) < dLen {
dst = make([]byte, dLen)
}
if dLen == 0 {
return dst[:0], nil
}
tLen := C.size_t(dLen)
status := C.snappy_uncompress((*C.char)(unsafe.Pointer(&src[0])), C.size_t(len(src)),
(*C.char)(unsafe.Pointer(&dst[0])), &tLen)
if status != C.SNAPPY_OK {
return nil, ErrCorrupt
}
return dst[:tLen], nil
}
// NewReader returns a new Reader that decompresses from r, using the framing
// format described at
// https://code.google.com/p/snappy/source/browse/trunk/framing_format.txt
func NewReader(r io.Reader) *Reader {
return &Reader{
r: r,
decoded: make([]byte, maxUncompressedChunkLen),
buf: make([]byte, MaxEncodedLen(maxUncompressedChunkLen)+checksumSize),
}
}
// Reader is an io.Reader than can read Snappy-compressed bytes.
type Reader struct {
r io.Reader
err error
decoded []byte
buf []byte
// decoded[i:j] contains decoded bytes that have not yet been passed on.
i, j int
readHeader bool
}
// Reset discards any buffered data, resets all state, and switches the Snappy
// reader to read from r. This permits reusing a Reader rather than allocating
// a new one.
func (r *Reader) Reset(reader io.Reader) {
r.r = reader
r.err = nil
r.i = 0
r.j = 0
r.readHeader = false
}
func (r *Reader) readFull(p []byte) (ok bool) {
if _, r.err = io.ReadFull(r.r, p); r.err != nil {
if r.err == io.ErrUnexpectedEOF {
r.err = ErrCorrupt
}
return false
}
return true
}
// Read satisfies the io.Reader interface.
func (r *Reader) Read(p []byte) (int, error) {
if r.err != nil {
return 0, r.err
}
for {
if r.i < r.j {
n := copy(p, r.decoded[r.i:r.j])
r.i += n
return n, nil
}
if !r.readFull(r.buf[:4]) {
return 0, r.err
}
chunkType := r.buf[0]
if !r.readHeader {
if chunkType != chunkTypeStreamIdentifier {
r.err = ErrCorrupt
return 0, r.err
}
r.readHeader = true
}
chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
if chunkLen > len(r.buf) {
r.err = ErrUnsupported
return 0, r.err
}
// The chunk types are specified at
// https://code.google.com/p/snappy/source/browse/trunk/framing_format.txt
switch chunkType {
case chunkTypeCompressedData:
// Section 4.2. Compressed data (chunk type 0x00).
if chunkLen < checksumSize {
r.err = ErrCorrupt
return 0, r.err
}
buf := r.buf[:chunkLen]
if !r.readFull(buf) {
return 0, r.err
}
checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
buf = buf[checksumSize:]
n, err := DecodedLen(buf)
if err != nil {
r.err = err
return 0, r.err
}
if n > len(r.decoded) {
r.err = ErrCorrupt
return 0, r.err
}
if _, err := Decode(r.decoded, buf); err != nil {
r.err = err
return 0, r.err
}
if crc(r.decoded[:n]) != checksum {
r.err = ErrCorrupt
return 0, r.err
}
r.i, r.j = 0, n
continue
case chunkTypeUncompressedData:
// Section 4.3. Uncompressed data (chunk type 0x01).
if chunkLen < checksumSize {
r.err = ErrCorrupt
return 0, r.err
}
buf := r.buf[:checksumSize]
if !r.readFull(buf) {
return 0, r.err
}
checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
// Read directly into r.decoded instead of via r.buf.
n := chunkLen - checksumSize
if !r.readFull(r.decoded[:n]) {
return 0, r.err
}
if crc(r.decoded[:n]) != checksum {
r.err = ErrCorrupt
return 0, r.err
}
r.i, r.j = 0, n
continue
case chunkTypeStreamIdentifier:
// Section 4.1. Stream identifier (chunk type 0xff).
if chunkLen != len(magicBody) {
r.err = ErrCorrupt
return 0, r.err
}
if !r.readFull(r.buf[:len(magicBody)]) {
return 0, r.err
}
for i := 0; i < len(magicBody); i++ {
if r.buf[i] != magicBody[i] {
r.err = ErrCorrupt
return 0, r.err
}
}
continue
}
if chunkType <= 0x7f {
// Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
r.err = ErrUnsupported
return 0, r.err
}
// Section 4.4 Padding (chunk type 0xfe).
// Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
if !r.readFull(r.buf[:chunkLen]) {
return 0, r.err
}
}
}

146
vendor/github.com/cockroachdb/c-snappy/encode.go generated vendored Normal file
View File

@ -0,0 +1,146 @@
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package snappy
import (
"io"
"unsafe"
)
// #include <snappy-c.h>
import "C"
// Encode returns the encoded form of src. The returned slice may be a sub-
// slice of dst if dst was large enough to hold the entire encoded block.
// Otherwise, a newly allocated slice will be returned.
// It is valid to pass a nil dst.
func Encode(dst, src []byte) ([]byte, error) {
if n := MaxEncodedLen(len(src)); len(dst) < n {
dst = make([]byte, n)
}
var srcPtr unsafe.Pointer
if len(src) != 0 {
srcPtr = unsafe.Pointer(&src[0])
}
dLen := C.size_t(len(dst))
status := C.snappy_compress((*C.char)(srcPtr), C.size_t(len(src)),
(*C.char)(unsafe.Pointer(&dst[0])), &dLen)
if status != C.SNAPPY_OK {
return nil, ErrCorrupt
}
return dst[:dLen], nil
}
// MaxEncodedLen returns the maximum length of a snappy block, given its
// uncompressed length.
func MaxEncodedLen(srcLen int) int {
// Compressed data can be defined as:
// compressed := item* literal*
// item := literal* copy
//
// The trailing literal sequence has a space blowup of at most 62/60
// since a literal of length 60 needs one tag byte + one extra byte
// for length information.
//
// Item blowup is trickier to measure. Suppose the "copy" op copies
// 4 bytes of data. Because of a special check in the encoding code,
// we produce a 4-byte copy only if the offset is < 65536. Therefore
// the copy op takes 3 bytes to encode, and this type of item leads
// to at most the 62/60 blowup for representing literals.
//
// Suppose the "copy" op copies 5 bytes of data. If the offset is big
// enough, it will take 5 bytes to encode the copy op. Therefore the
// worst case here is a one-byte literal followed by a five-byte copy.
// That is, 6 bytes of input turn into 7 bytes of "compressed" data.
//
// This last factor dominates the blowup, so the final estimate is:
return 32 + srcLen + srcLen/6
}
// NewWriter returns a new Writer that compresses to w, using the framing
// format described at
// https://code.google.com/p/snappy/source/browse/trunk/framing_format.txt
func NewWriter(w io.Writer) *Writer {
return &Writer{
w: w,
enc: make([]byte, MaxEncodedLen(maxUncompressedChunkLen)),
}
}
// Writer is an io.Writer than can write Snappy-compressed bytes.
type Writer struct {
w io.Writer
err error
enc []byte
buf [checksumSize + chunkHeaderSize]byte
wroteHeader bool
}
// Reset discards the writer's state and switches the Snappy writer to write to
// w. This permits reusing a Writer rather than allocating a new one.
func (w *Writer) Reset(writer io.Writer) {
w.w = writer
w.err = nil
w.wroteHeader = false
}
// Write satisfies the io.Writer interface.
func (w *Writer) Write(p []byte) (n int, errRet error) {
if w.err != nil {
return 0, w.err
}
if !w.wroteHeader {
copy(w.enc, magicChunk)
if _, err := w.w.Write(w.enc[:len(magicChunk)]); err != nil {
w.err = err
return n, err
}
w.wroteHeader = true
}
for len(p) > 0 {
var uncompressed []byte
if len(p) > maxUncompressedChunkLen {
uncompressed, p = p[:maxUncompressedChunkLen], p[maxUncompressedChunkLen:]
} else {
uncompressed, p = p, nil
}
checksum := crc(uncompressed)
// Compress the buffer, discarding the result if the improvement
// isn't at least 12.5%.
chunkType := uint8(chunkTypeCompressedData)
chunkBody, err := Encode(w.enc, uncompressed)
if err != nil {
w.err = err
return n, err
}
if len(chunkBody) >= len(uncompressed)-len(uncompressed)/8 {
chunkType, chunkBody = chunkTypeUncompressedData, uncompressed
}
chunkLen := 4 + len(chunkBody)
w.buf[0] = chunkType
w.buf[1] = uint8(chunkLen >> 0)
w.buf[2] = uint8(chunkLen >> 8)
w.buf[3] = uint8(chunkLen >> 16)
w.buf[4] = uint8(checksum >> 0)
w.buf[5] = uint8(checksum >> 8)
w.buf[6] = uint8(checksum >> 16)
w.buf[7] = uint8(checksum >> 24)
if _, err = w.w.Write(w.buf[:]); err != nil {
w.err = err
return n, err
}
if _, err = w.w.Write(chunkBody); err != nil {
w.err = err
return n, err
}
n += len(uncompressed)
}
return n, nil
}

16
vendor/github.com/cockroachdb/c-snappy/import.sh generated vendored Normal file
View File

@ -0,0 +1,16 @@
#!/usr/bin/env sh
set -eu
rm -rf *.cc internal/*
curl -sL https://github.com/google/snappy/archive/1.1.3.tar.gz | tar zxf - -C internal --strip-components=1
(cd internal && ./autogen.sh && ./configure)
# symlink so cgo compiles them
for source_file in $(make sources); do
ln -sf $source_file .
done
# restore the repo to what it would look like when first cloned.
# comment this line out while updating upstream.
git clean -dxf

View File

@ -0,0 +1,9 @@
#!/usr/bin/env sh
set -eux
yes | sudo add-apt-repository ppa:ubuntu-toolchain-r/test
yes | sudo add-apt-repository 'deb http://llvm.org/apt/precise/ llvm-toolchain-precise-3.6 main'
wget -O - http://llvm.org/apt/llvm-snapshot.gpg.key | sudo apt-key add -
sudo apt-get update -qq
sudo apt-get install -qq clang-3.6

View File

@ -0,0 +1 @@
opensource@google.com

View File

@ -0,0 +1,54 @@
Copyright 2011, Google Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
===
Some of the benchmark data in testdata/ is licensed differently:
- fireworks.jpeg is Copyright 2013 Steinar H. Gunderson, and
is licensed under the Creative Commons Attribution 3.0 license
(CC-BY-3.0). See https://creativecommons.org/licenses/by/3.0/
for more information.
- kppkn.gtb is taken from the Gaviota chess tablebase set, and
is licensed under the MIT License. See
https://sites.google.com/site/gaviotachessengine/Home/endgame-tablebases-1
for more information.
- paper-100k.pdf is an excerpt (bytes 92160 to 194560) from the paper
“Combinatorial Modeling of Chromatin Features Quantitatively Predicts DNA
Replication Timing in _Drosophila_” by Federico Comoglio and Renato Paro,
which is licensed under the CC-BY license. See
http://www.ploscompbiol.org/static/license for more ifnormation.
- alice29.txt, asyoulik.txt, plrabn12.txt and lcet10.txt are from Project
Gutenberg. The first three have expired copyrights and are in the public
domain; the latter does not have expired copyright, but is still in the
public domain according to the license information
(http://www.gutenberg.org/ebooks/53).

2468
vendor/github.com/cockroachdb/c-snappy/internal/ChangeLog generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,23 @@
ACLOCAL_AMFLAGS = -I m4
# Library.
lib_LTLIBRARIES = libsnappy.la
libsnappy_la_SOURCES = snappy.cc snappy-sinksource.cc snappy-stubs-internal.cc snappy-c.cc
libsnappy_la_LDFLAGS = -version-info $(SNAPPY_LTVERSION)
include_HEADERS = snappy.h snappy-sinksource.h snappy-stubs-public.h snappy-c.h
noinst_HEADERS = snappy-internal.h snappy-stubs-internal.h snappy-test.h
# Unit tests and benchmarks.
snappy_unittest_CPPFLAGS = $(gflags_CFLAGS) $(GTEST_CPPFLAGS)
snappy_unittest_SOURCES = snappy_unittest.cc snappy-test.cc
snappy_unittest_LDFLAGS = $(GTEST_LDFLAGS)
snappy_unittest_LDADD = libsnappy.la $(UNITTEST_LIBS) $(gflags_LIBS) $(GTEST_LIBS)
TESTS = snappy_unittest
noinst_PROGRAMS = $(TESTS)
EXTRA_DIST = autogen.sh testdata/alice29.txt testdata/asyoulik.txt testdata/baddata1.snappy testdata/baddata2.snappy testdata/baddata3.snappy testdata/geo.protodata testdata/fireworks.jpeg testdata/html testdata/html_x_4 testdata/kppkn.gtb testdata/lcet10.txt testdata/paper-100k.pdf testdata/plrabn12.txt testdata/urls.10K
dist_doc_DATA = ChangeLog COPYING INSTALL NEWS README format_description.txt framing_format.txt
libtool: $(LIBTOOL_DEPS)
$(SHELL) ./config.status --recheck

140
vendor/github.com/cockroachdb/c-snappy/internal/NEWS generated vendored Normal file
View File

@ -0,0 +1,140 @@
Snappy v1.1.3, July 6th 2015:
This is the first release to be done from GitHub, which means that
some minor things like the ChangeLog format has changed (git log
format instead of svn log).
* Add support for Uncompress() from a Source to a Sink.
* Various minor changes to improve MSVC support; in particular,
the unit tests now compile and run under MSVC.
Snappy v1.1.2, February 28th 2014:
This is a maintenance release with no changes to the actual library
source code.
* Stop distributing benchmark data files that have unclear
or unsuitable licensing.
* Add support for padding chunks in the framing format.
Snappy v1.1.1, October 15th 2013:
* Add support for uncompressing to iovecs (scatter I/O).
The bulk of this patch was contributed by Mohit Aron.
* Speed up decompression by ~2%; much more so (~13-20%) on
a few benchmarks on given compilers and CPUs.
* Fix a few issues with MSVC compilation.
* Support truncated test data in the benchmark.
Snappy v1.1.0, January 18th 2013:
* Snappy now uses 64 kB block size instead of 32 kB. On average,
this means it compresses about 3% denser (more so for some
inputs), at the same or better speeds.
* libsnappy no longer depends on iostream.
* Some small performance improvements in compression on x86
(0.51%).
* Various portability fixes for ARM-based platforms, for MSVC,
and for GNU/Hurd.
Snappy v1.0.5, February 24th 2012:
* More speed improvements. Exactly how big will depend on
the architecture:
- 310% faster decompression for the base case (x86-64).
- ARMv7 and higher can now use unaligned accesses,
and will see about 30% faster decompression and
2040% faster compression.
- 32-bit platforms (ARM and 32-bit x86) will see 25%
faster compression.
These are all cumulative (e.g., ARM gets all three speedups).
* Fixed an issue where the unit test would crash on system
with less than 256 MB address space available,
e.g. some embedded platforms.
* Added a framing format description, for use over e.g. HTTP,
or for a command-line compressor. We do not have any
implementations of this at the current point, but there seems
to be enough of a general interest in the topic.
Also make the format description slightly clearer.
* Remove some compile-time warnings in -Wall
(mostly signed/unsigned comparisons), for easier embedding
into projects that use -Wall -Werror.
Snappy v1.0.4, September 15th 2011:
* Speeded up the decompressor somewhat; typically about 28%
for Core i7, in 64-bit mode (comparable for Opteron).
Somewhat more for some tests, almost no gain for others.
* Make Snappy compile on certain platforms it didn't before
(Solaris with SunPro C++, HP-UX, AIX).
* Correct some minor errors in the format description.
Snappy v1.0.3, June 2nd 2011:
* Speeded up the decompressor somewhat; about 3-6% for Core 2,
6-13% for Core i7, and 5-12% for Opteron (all in 64-bit mode).
* Added compressed format documentation. This text is new,
but an earlier version from Zeev Tarantov was used as reference.
* Only link snappy_unittest against -lz and other autodetected
libraries, not libsnappy.so (which doesn't need any such dependency).
* Fixed some display issues in the microbenchmarks, one of which would
frequently make the test crash on GNU/Hurd.
Snappy v1.0.2, April 29th 2011:
* Relicense to a BSD-type license.
* Added C bindings, contributed by Martin Gieseking.
* More Win32 fixes, in particular for MSVC.
* Replace geo.protodata with a newer version.
* Fix timing inaccuracies in the unit test when comparing Snappy
to other algorithms.
Snappy v1.0.1, March 25th 2011:
This is a maintenance release, mostly containing minor fixes.
There is no new functionality. The most important fixes include:
* The COPYING file and all licensing headers now correctly state that
Snappy is licensed under the Apache 2.0 license.
* snappy_unittest should now compile natively under Windows,
as well as on embedded systems with no mmap().
* Various autotools nits have been fixed.
Snappy v1.0, March 17th 2011:
* Initial version.

135
vendor/github.com/cockroachdb/c-snappy/internal/README generated vendored Normal file
View File

@ -0,0 +1,135 @@
Snappy, a fast compressor/decompressor.
Introduction
============
Snappy is a compression/decompression library. It does not aim for maximum
compression, or compatibility with any other compression library; instead,
it aims for very high speeds and reasonable compression. For instance,
compared to the fastest mode of zlib, Snappy is an order of magnitude faster
for most inputs, but the resulting compressed files are anywhere from 20% to
100% bigger. (For more information, see "Performance", below.)
Snappy has the following properties:
* Fast: Compression speeds at 250 MB/sec and beyond, with no assembler code.
See "Performance" below.
* Stable: Over the last few years, Snappy has compressed and decompressed
petabytes of data in Google's production environment. The Snappy bitstream
format is stable and will not change between versions.
* Robust: The Snappy decompressor is designed not to crash in the face of
corrupted or malicious input.
* Free and open source software: Snappy is licensed under a BSD-type license.
For more information, see the included COPYING file.
Snappy has previously been called "Zippy" in some Google presentations
and the like.
Performance
===========
Snappy is intended to be fast. On a single core of a Core i7 processor
in 64-bit mode, it compresses at about 250 MB/sec or more and decompresses at
about 500 MB/sec or more. (These numbers are for the slowest inputs in our
benchmark suite; others are much faster.) In our tests, Snappy usually
is faster than algorithms in the same class (e.g. LZO, LZF, FastLZ, QuickLZ,
etc.) while achieving comparable compression ratios.
Typical compression ratios (based on the benchmark suite) are about 1.5-1.7x
for plain text, about 2-4x for HTML, and of course 1.0x for JPEGs, PNGs and
other already-compressed data. Similar numbers for zlib in its fastest mode
are 2.6-2.8x, 3-7x and 1.0x, respectively. More sophisticated algorithms are
capable of achieving yet higher compression rates, although usually at the
expense of speed. Of course, compression ratio will vary significantly with
the input.
Although Snappy should be fairly portable, it is primarily optimized
for 64-bit x86-compatible processors, and may run slower in other environments.
In particular:
- Snappy uses 64-bit operations in several places to process more data at
once than would otherwise be possible.
- Snappy assumes unaligned 32- and 64-bit loads and stores are cheap.
On some platforms, these must be emulated with single-byte loads
and stores, which is much slower.
- Snappy assumes little-endian throughout, and needs to byte-swap data in
several places if running on a big-endian platform.
Experience has shown that even heavily tuned code can be improved.
Performance optimizations, whether for 64-bit x86 or other platforms,
are of course most welcome; see "Contact", below.
Usage
=====
Note that Snappy, both the implementation and the main interface,
is written in C++. However, several third-party bindings to other languages
are available; see the Google Code page at http://code.google.com/p/snappy/
for more information. Also, if you want to use Snappy from C code, you can
use the included C bindings in snappy-c.h.
To use Snappy from your own C++ program, include the file "snappy.h" from
your calling file, and link against the compiled library.
There are many ways to call Snappy, but the simplest possible is
snappy::Compress(input.data(), input.size(), &output);
and similarly
snappy::Uncompress(input.data(), input.size(), &output);
where "input" and "output" are both instances of std::string.
There are other interfaces that are more flexible in various ways, including
support for custom (non-array) input sources. See the header file for more
information.
Tests and benchmarks
====================
When you compile Snappy, snappy_unittest is compiled in addition to the
library itself. You do not need it to use the compressor from your own library,
but it contains several useful components for Snappy development.
First of all, it contains unit tests, verifying correctness on your machine in
various scenarios. If you want to change or optimize Snappy, please run the
tests to verify you have not broken anything. Note that if you have the
Google Test library installed, unit test behavior (especially failures) will be
significantly more user-friendly. You can find Google Test at
http://code.google.com/p/googletest/
You probably also want the gflags library for handling of command-line flags;
you can find it at
http://code.google.com/p/google-gflags/
In addition to the unit tests, snappy contains microbenchmarks used to
tune compression and decompression performance. These are automatically run
before the unit tests, but you can disable them using the flag
--run_microbenchmarks=false if you have gflags installed (otherwise you will
need to edit the source).
Finally, snappy can benchmark Snappy against a few other compression libraries
(zlib, LZO, LZF, FastLZ and QuickLZ), if they were detected at configure time.
To benchmark using a given file, give the compression algorithm you want to test
Snappy against (e.g. --zlib) and then a list of one or more file names on the
command line. The testdata/ directory contains the files used by the
microbenchmark, which should provide a reasonably balanced starting point for
benchmarking. (Note that baddata[1-3].snappy are not intended as benchmarks; they
are used to verify correctness in the presence of corrupted data in the unit
test.)
Contact
=======
Snappy is distributed through Google Code. For the latest version, a bug tracker,
and other information, see
http://code.google.com/p/snappy/

7
vendor/github.com/cockroachdb/c-snappy/internal/autogen.sh generated vendored Executable file
View File

@ -0,0 +1,7 @@
#! /bin/sh -e
rm -rf autom4te.cache
aclocal -I m4
autoheader
libtoolize --copy
automake --add-missing --copy
autoconf

View File

@ -0,0 +1,134 @@
/* config.h. Generated from config.h.in by configure. */
/* config.h.in. Generated from configure.ac by autoheader. */
/* Define if building universal (internal helper macro) */
/* #undef AC_APPLE_UNIVERSAL_BUILD */
/* Define to 1 if the compiler supports __builtin_ctz and friends. */
#define HAVE_BUILTIN_CTZ 1
/* Define to 1 if the compiler supports __builtin_expect. */
#define HAVE_BUILTIN_EXPECT 1
/* Define to 1 if you have the <byteswap.h> header file. */
/* #undef HAVE_BYTESWAP_H */
/* Define to 1 if you have the <dlfcn.h> header file. */
#define HAVE_DLFCN_H 1
/* Use the gflags package for command-line parsing. */
/* #undef HAVE_GFLAGS */
/* Defined when Google Test is available. */
/* #undef HAVE_GTEST */
/* Define to 1 if you have the <inttypes.h> header file. */
#define HAVE_INTTYPES_H 1
/* Define to 1 if you have the `fastlz' library (-lfastlz). */
/* #undef HAVE_LIBFASTLZ */
/* Define to 1 if you have the `lzf' library (-llzf). */
/* #undef HAVE_LIBLZF */
/* Define to 1 if you have the `lzo2' library (-llzo2). */
/* #undef HAVE_LIBLZO2 */
/* Define to 1 if you have the `quicklz' library (-lquicklz). */
/* #undef HAVE_LIBQUICKLZ */
/* Define to 1 if you have the `z' library (-lz). */
#define HAVE_LIBZ 1
/* Define to 1 if you have the <memory.h> header file. */
#define HAVE_MEMORY_H 1
/* Define to 1 if you have the <stddef.h> header file. */
#define HAVE_STDDEF_H 1
/* Define to 1 if you have the <stdint.h> header file. */
#define HAVE_STDINT_H 1
/* Define to 1 if you have the <stdlib.h> header file. */
#define HAVE_STDLIB_H 1
/* Define to 1 if you have the <strings.h> header file. */
#define HAVE_STRINGS_H 1
/* Define to 1 if you have the <string.h> header file. */
#define HAVE_STRING_H 1
/* Define to 1 if you have the <sys/byteswap.h> header file. */
/* #undef HAVE_SYS_BYTESWAP_H */
/* Define to 1 if you have the <sys/endian.h> header file. */
/* #undef HAVE_SYS_ENDIAN_H */
/* Define to 1 if you have the <sys/mman.h> header file. */
#define HAVE_SYS_MMAN_H 1
/* Define to 1 if you have the <sys/resource.h> header file. */
#define HAVE_SYS_RESOURCE_H 1
/* Define to 1 if you have the <sys/stat.h> header file. */
#define HAVE_SYS_STAT_H 1
/* Define to 1 if you have the <sys/time.h> header file. */
#define HAVE_SYS_TIME_H 1
/* Define to 1 if you have the <sys/types.h> header file. */
#define HAVE_SYS_TYPES_H 1
/* Define to 1 if you have the <unistd.h> header file. */
#define HAVE_UNISTD_H 1
/* Define to 1 if you have the <windows.h> header file. */
/* #undef HAVE_WINDOWS_H */
/* Define to the sub-directory where libtool stores uninstalled libraries. */
#define LT_OBJDIR ".libs/"
/* Name of package */
#define PACKAGE "snappy"
/* Define to the address where bug reports for this package should be sent. */
#define PACKAGE_BUGREPORT ""
/* Define to the full name of this package. */
#define PACKAGE_NAME "snappy"
/* Define to the full name and version of this package. */
#define PACKAGE_STRING "snappy 1.1.3"
/* Define to the one symbol short name of this package. */
#define PACKAGE_TARNAME "snappy"
/* Define to the home page for this package. */
#define PACKAGE_URL ""
/* Define to the version of this package. */
#define PACKAGE_VERSION "1.1.3"
/* Define to 1 if you have the ANSI C header files. */
#define STDC_HEADERS 1
/* Version number of package */
#define VERSION "1.1.3"
/* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most
significant byte first (like Motorola and SPARC, unlike Intel). */
#if defined AC_APPLE_UNIVERSAL_BUILD
# if defined __BIG_ENDIAN__
# define WORDS_BIGENDIAN 1
# endif
#else
# ifndef WORDS_BIGENDIAN
/* # undef WORDS_BIGENDIAN */
# endif
#endif
/* Define to `unsigned int' if <sys/types.h> does not define. */
/* #undef size_t */
/* Define to `int' if <sys/types.h> does not define. */
/* #undef ssize_t */

View File

@ -0,0 +1,133 @@
m4_define([snappy_major], [1])
m4_define([snappy_minor], [1])
m4_define([snappy_patchlevel], [3])
# Libtool shared library interface versions (current:revision:age)
# Update this value for every release! (A:B:C will map to foo.so.(A-C).C.B)
# http://www.gnu.org/software/libtool/manual/html_node/Updating-version-info.html
m4_define([snappy_ltversion], [4:0:3])
AC_INIT([snappy], [snappy_major.snappy_minor.snappy_patchlevel])
AC_CONFIG_MACRO_DIR([m4])
# These are flags passed to automake (though they look like gcc flags!)
AM_INIT_AUTOMAKE([-Wall])
LT_INIT
AC_SUBST([LIBTOOL_DEPS])
AC_PROG_CXX
AC_LANG([C++])
AC_C_BIGENDIAN
AC_TYPE_SIZE_T
AC_TYPE_SSIZE_T
AC_CHECK_HEADERS([stdint.h stddef.h sys/mman.h sys/resource.h windows.h byteswap.h sys/byteswap.h sys/endian.h sys/time.h])
# Don't use AC_FUNC_MMAP, as it checks for mappings of already-mapped memory,
# which we don't need (and does not exist on Windows).
AC_CHECK_FUNC([mmap])
GTEST_LIB_CHECK([], [true], [true # Ignore; we can live without it.])
AC_ARG_WITH([gflags],
[AS_HELP_STRING(
[--with-gflags],
[use Google Flags package to enhance the unit test @<:@default=check@:>@])],
[],
[with_gflags=check])
if test "x$with_gflags" != "xno"; then
PKG_CHECK_MODULES(
[gflags],
[libgflags],
[AC_DEFINE([HAVE_GFLAGS], [1], [Use the gflags package for command-line parsing.])],
[if test "x$with_gflags" != "xcheck"; then
AC_MSG_FAILURE([--with-gflags was given, but test for gflags failed])
fi])
fi
# See if we have __builtin_expect.
# TODO: Use AC_CACHE.
AC_MSG_CHECKING([if the compiler supports __builtin_expect])
AC_TRY_COMPILE(, [
return __builtin_expect(1, 1) ? 1 : 0
], [
snappy_have_builtin_expect=yes
AC_MSG_RESULT([yes])
], [
snappy_have_builtin_expect=no
AC_MSG_RESULT([no])
])
if test x$snappy_have_builtin_expect = xyes ; then
AC_DEFINE([HAVE_BUILTIN_EXPECT], [1], [Define to 1 if the compiler supports __builtin_expect.])
fi
# See if we have working count-trailing-zeros intrinsics.
# TODO: Use AC_CACHE.
AC_MSG_CHECKING([if the compiler supports __builtin_ctzll])
AC_TRY_COMPILE(, [
return (__builtin_ctzll(0x100000000LL) == 32) ? 1 : 0
], [
snappy_have_builtin_ctz=yes
AC_MSG_RESULT([yes])
], [
snappy_have_builtin_ctz=no
AC_MSG_RESULT([no])
])
if test x$snappy_have_builtin_ctz = xyes ; then
AC_DEFINE([HAVE_BUILTIN_CTZ], [1], [Define to 1 if the compiler supports __builtin_ctz and friends.])
fi
# Other compression libraries; the unit test can use these for comparison
# if they are available. If they are not found, just ignore.
UNITTEST_LIBS=""
AC_DEFUN([CHECK_EXT_COMPRESSION_LIB], [
AH_CHECK_LIB([$1])
AC_CHECK_LIB(
[$1],
[$2],
[
AC_DEFINE_UNQUOTED(AS_TR_CPP(HAVE_LIB$1))
UNITTEST_LIBS="-l$1 $UNITTEST_LIBS"
],
[true]
)
])
CHECK_EXT_COMPRESSION_LIB([z], [zlibVersion])
CHECK_EXT_COMPRESSION_LIB([lzo2], [lzo1x_1_15_compress])
CHECK_EXT_COMPRESSION_LIB([lzf], [lzf_compress])
CHECK_EXT_COMPRESSION_LIB([fastlz], [fastlz_compress])
CHECK_EXT_COMPRESSION_LIB([quicklz], [qlz_compress])
AC_SUBST([UNITTEST_LIBS])
# These are used by snappy-stubs-public.h.in.
if test "$ac_cv_header_stdint_h" = "yes"; then
AC_SUBST([ac_cv_have_stdint_h], [1])
else
AC_SUBST([ac_cv_have_stdint_h], [0])
fi
if test "$ac_cv_header_stddef_h" = "yes"; then
AC_SUBST([ac_cv_have_stddef_h], [1])
else
AC_SUBST([ac_cv_have_stddef_h], [0])
fi
if test "$ac_cv_header_sys_uio_h" = "yes"; then
AC_SUBST([ac_cv_have_sys_uio_h], [1])
else
AC_SUBST([ac_cv_have_sys_uio_h], [0])
fi
# Export the version to snappy-stubs-public.h.
SNAPPY_MAJOR="snappy_major"
SNAPPY_MINOR="snappy_minor"
SNAPPY_PATCHLEVEL="snappy_patchlevel"
AC_SUBST([SNAPPY_MAJOR])
AC_SUBST([SNAPPY_MINOR])
AC_SUBST([SNAPPY_PATCHLEVEL])
AC_SUBST([SNAPPY_LTVERSION], snappy_ltversion)
AC_CONFIG_HEADERS([config.h])
AC_CONFIG_FILES([Makefile snappy-stubs-public.h])
AC_OUTPUT

View File

@ -0,0 +1,110 @@
Snappy compressed format description
Last revised: 2011-10-05
This is not a formal specification, but should suffice to explain most
relevant parts of how the Snappy format works. It is originally based on
text by Zeev Tarantov.
Snappy is a LZ77-type compressor with a fixed, byte-oriented encoding.
There is no entropy encoder backend nor framing layer -- the latter is
assumed to be handled by other parts of the system.
This document only describes the format, not how the Snappy compressor nor
decompressor actually works. The correctness of the decompressor should not
depend on implementation details of the compressor, and vice versa.
1. Preamble
The stream starts with the uncompressed length (up to a maximum of 2^32 - 1),
stored as a little-endian varint. Varints consist of a series of bytes,
where the lower 7 bits are data and the upper bit is set iff there are
more bytes to be read. In other words, an uncompressed length of 64 would
be stored as 0x40, and an uncompressed length of 2097150 (0x1FFFFE)
would be stored as 0xFE 0xFF 0x7F.
2. The compressed stream itself
There are two types of elements in a Snappy stream: Literals and
copies (backreferences). There is no restriction on the order of elements,
except that the stream naturally cannot start with a copy. (Having
two literals in a row is never optimal from a compression point of
view, but nevertheless fully permitted.) Each element starts with a tag byte,
and the lower two bits of this tag byte signal what type of element will
follow:
00: Literal
01: Copy with 1-byte offset
10: Copy with 2-byte offset
11: Copy with 4-byte offset
The interpretation of the upper six bits are element-dependent.
2.1. Literals (00)
Literals are uncompressed data stored directly in the byte stream.
The literal length is stored differently depending on the length
of the literal:
- For literals up to and including 60 bytes in length, the upper
six bits of the tag byte contain (len-1). The literal follows
immediately thereafter in the bytestream.
- For longer literals, the (len-1) value is stored after the tag byte,
little-endian. The upper six bits of the tag byte describe how
many bytes are used for the length; 60, 61, 62 or 63 for
1-4 bytes, respectively. The literal itself follows after the
length.
2.2. Copies
Copies are references back into previous decompressed data, telling
the decompressor to reuse data it has previously decoded.
They encode two values: The _offset_, saying how many bytes back
from the current position to read, and the _length_, how many bytes
to copy. Offsets of zero can be encoded, but are not legal;
similarly, it is possible to encode backreferences that would
go past the end of the block (offset > current decompressed position),
which is also nonsensical and thus not allowed.
As in most LZ77-based compressors, the length can be larger than the offset,
yielding a form of run-length encoding (RLE). For instance,
"xababab" could be encoded as
<literal: "xab"> <copy: offset=2 length=4>
Note that since the current Snappy compressor works in 32 kB
blocks and does not do matching across blocks, it will never produce
a bitstream with offsets larger than about 32768. However, the
decompressor should not rely on this, as it may change in the future.
There are several different kinds of copy elements, depending on
the amount of bytes to be copied (length), and how far back the
data to be copied is (offset).
2.2.1. Copy with 1-byte offset (01)
These elements can encode lengths between [4..11] bytes and offsets
between [0..2047] bytes. (len-4) occupies three bits and is stored
in bits [2..4] of the tag byte. The offset occupies 11 bits, of which the
upper three are stored in the upper three bits ([5..7]) of the tag byte,
and the lower eight are stored in a byte following the tag byte.
2.2.2. Copy with 2-byte offset (10)
These elements can encode lengths between [1..64] and offsets from
[0..65535]. (len-1) occupies six bits and is stored in the upper
six bits ([2..7]) of the tag byte. The offset is stored as a
little-endian 16-bit integer in the two bytes following the tag byte.
2.2.3. Copy with 4-byte offset (11)
These are like the copies with 2-byte offsets (see previous subsection),
except that the offset is stored as a 32-bit integer instead of a
16-bit integer (and thus will occupy four bytes).

View File

@ -0,0 +1,135 @@
Snappy framing format description
Last revised: 2013-10-25
This format decribes a framing format for Snappy, allowing compressing to
files or streams that can then more easily be decompressed without having
to hold the entire stream in memory. It also provides data checksums to
help verify integrity. It does not provide metadata checksums, so it does
not protect against e.g. all forms of truncations.
Implementation of the framing format is optional for Snappy compressors and
decompressor; it is not part of the Snappy core specification.
1. General structure
The file consists solely of chunks, lying back-to-back with no padding
in between. Each chunk consists first a single byte of chunk identifier,
then a three-byte little-endian length of the chunk in bytes (from 0 to
16777215, inclusive), and then the data if any. The four bytes of chunk
header is not counted in the data length.
The different chunk types are listed below. The first chunk must always
be the stream identifier chunk (see section 4.1, below). The stream
ends when the file ends -- there is no explicit end-of-file marker.
2. File type identification
The following identifiers for this format are recommended where appropriate.
However, note that none have been registered officially, so this is only to
be taken as a guideline. We use "Snappy framed" to distinguish between this
format and raw Snappy data.
File extension: .sz
MIME type: application/x-snappy-framed
HTTP Content-Encoding: x-snappy-framed
3. Checksum format
Some chunks have data protected by a checksum (the ones that do will say so
explicitly). The checksums are always masked CRC-32Cs.
A description of CRC-32C can be found in RFC 3720, section 12.1, with
examples in section B.4.
Checksums are not stored directly, but masked, as checksumming data and
then its own checksum can be problematic. The masking is the same as used
in Apache Hadoop: Rotate the checksum by 15 bits, then add the constant
0xa282ead8 (using wraparound as normal for unsigned integers). This is
equivalent to the following C code:
uint32_t mask_checksum(uint32_t x) {
return ((x >> 15) | (x << 17)) + 0xa282ead8;
}
Note that the masking is reversible.
The checksum is always stored as a four bytes long integer, in little-endian.
4. Chunk types
The currently supported chunk types are described below. The list may
be extended in the future.
4.1. Stream identifier (chunk type 0xff)
The stream identifier is always the first element in the stream.
It is exactly six bytes long and contains "sNaPpY" in ASCII. This means that
a valid Snappy framed stream always starts with the bytes
0xff 0x06 0x00 0x00 0x73 0x4e 0x61 0x50 0x70 0x59
The stream identifier chunk can come multiple times in the stream besides
the first; if such a chunk shows up, it should simply be ignored, assuming
it has the right length and contents. This allows for easy concatenation of
compressed files without the need for re-framing.
4.2. Compressed data (chunk type 0x00)
Compressed data chunks contain a normal Snappy compressed bitstream;
see the compressed format specification. The compressed data is preceded by
the CRC-32C (see section 3) of the _uncompressed_ data.
Note that the data portion of the chunk, i.e., the compressed contents,
can be at most 16777211 bytes (2^24 - 1, minus the checksum).
However, we place an additional restriction that the uncompressed data
in a chunk must be no longer than 65536 bytes. This allows consumers to
easily use small fixed-size buffers.
4.3. Uncompressed data (chunk type 0x01)
Uncompressed data chunks allow a compressor to send uncompressed,
raw data; this is useful if, for instance, uncompressible or
near-incompressible data is detected, and faster decompression is desired.
As in the compressed chunks, the data is preceded by its own masked
CRC-32C (see section 3).
An uncompressed data chunk, like compressed data chunks, should contain
no more than 65536 data bytes, so the maximum legal chunk length with the
checksum is 65540.
4.4. Padding (chunk type 0xfe)
Padding chunks allow a compressor to increase the size of the data stream
so that it complies with external demands, e.g. that the total number of
bytes is a multiple of some value.
All bytes of the padding chunk, except the chunk byte itself and the length,
should be zero, but decompressors must not try to interpret or verify the
padding data in any way.
4.5. Reserved unskippable chunks (chunk types 0x02-0x7f)
These are reserved for future expansion. A decoder that sees such a chunk
should immediately return an error, as it must assume it cannot decode the
stream correctly.
Future versions of this specification may define meanings for these chunks.
4.6. Reserved skippable chunks (chunk types 0x80-0xfd)
These are also reserved for future expansion, but unlike the chunks
described in 4.5, a decoder seeing these must skip them and continue
decoding.
Future versions of this specification may define meanings for these chunks.

View File

@ -0,0 +1,74 @@
dnl GTEST_LIB_CHECK([minimum version [,
dnl action if found [,action if not found]]])
dnl
dnl Check for the presence of the Google Test library, optionally at a minimum
dnl version, and indicate a viable version with the HAVE_GTEST flag. It defines
dnl standard variables for substitution including GTEST_CPPFLAGS,
dnl GTEST_CXXFLAGS, GTEST_LDFLAGS, and GTEST_LIBS. It also defines
dnl GTEST_VERSION as the version of Google Test found. Finally, it provides
dnl optional custom action slots in the event GTEST is found or not.
AC_DEFUN([GTEST_LIB_CHECK],
[
dnl Provide a flag to enable or disable Google Test usage.
AC_ARG_ENABLE([gtest],
[AS_HELP_STRING([--enable-gtest],
[Enable tests using the Google C++ Testing Framework.
(Default is enabled.)])],
[],
[enable_gtest=])
AC_ARG_VAR([GTEST_CONFIG],
[The exact path of Google Test's 'gtest-config' script.])
AC_ARG_VAR([GTEST_CPPFLAGS],
[C-like preprocessor flags for Google Test.])
AC_ARG_VAR([GTEST_CXXFLAGS],
[C++ compile flags for Google Test.])
AC_ARG_VAR([GTEST_LDFLAGS],
[Linker path and option flags for Google Test.])
AC_ARG_VAR([GTEST_LIBS],
[Library linking flags for Google Test.])
AC_ARG_VAR([GTEST_VERSION],
[The version of Google Test available.])
HAVE_GTEST="no"
AS_IF([test "x${enable_gtest}" != "xno"],
[AC_MSG_CHECKING([for 'gtest-config'])
AS_IF([test "x${enable_gtest}" = "xyes"],
[AS_IF([test -x "${enable_gtest}/scripts/gtest-config"],
[GTEST_CONFIG="${enable_gtest}/scripts/gtest-config"],
[GTEST_CONFIG="${enable_gtest}/bin/gtest-config"])
AS_IF([test -x "${GTEST_CONFIG}"], [],
[AC_MSG_RESULT([no])
AC_MSG_ERROR([dnl
Unable to locate either a built or installed Google Test.
The specific location '${enable_gtest}' was provided for a built or installed
Google Test, but no 'gtest-config' script could be found at this location.])
])],
[AC_PATH_PROG([GTEST_CONFIG], [gtest-config])])
AS_IF([test -x "${GTEST_CONFIG}"],
[AC_MSG_RESULT([${GTEST_CONFIG}])
m4_ifval([$1],
[_gtest_min_version="--min-version=$1"
AC_MSG_CHECKING([for Google Test at least version >= $1])],
[_gtest_min_version="--min-version=0"
AC_MSG_CHECKING([for Google Test])])
AS_IF([${GTEST_CONFIG} ${_gtest_min_version}],
[AC_MSG_RESULT([yes])
HAVE_GTEST='yes'],
[AC_MSG_RESULT([no])])],
[AC_MSG_RESULT([no])])
AS_IF([test "x${HAVE_GTEST}" = "xyes"],
[GTEST_CPPFLAGS=`${GTEST_CONFIG} --cppflags`
GTEST_CXXFLAGS=`${GTEST_CONFIG} --cxxflags`
GTEST_LDFLAGS=`${GTEST_CONFIG} --ldflags`
GTEST_LIBS=`${GTEST_CONFIG} --libs`
GTEST_VERSION=`${GTEST_CONFIG} --version`
AC_DEFINE([HAVE_GTEST],[1],[Defined when Google Test is available.])],
[AS_IF([test "x${enable_gtest}" = "xyes"],
[AC_MSG_ERROR([dnl
Google Test was enabled, but no viable version could be found.])
])])])
AC_SUBST([HAVE_GTEST])
AM_CONDITIONAL([HAVE_GTEST],[test "x$HAVE_GTEST" = "xyes"])
AS_IF([test "x$HAVE_GTEST" = "xyes"],
[m4_ifval([$2], [$2])],
[m4_ifval([$3], [$3])])
])

View File

@ -0,0 +1,90 @@
// Copyright 2011 Martin Gieseking <martin.gieseking@uos.de>.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "snappy.h"
#include "snappy-c.h"
extern "C" {
snappy_status snappy_compress(const char* input,
size_t input_length,
char* compressed,
size_t *compressed_length) {
if (*compressed_length < snappy_max_compressed_length(input_length)) {
return SNAPPY_BUFFER_TOO_SMALL;
}
snappy::RawCompress(input, input_length, compressed, compressed_length);
return SNAPPY_OK;
}
snappy_status snappy_uncompress(const char* compressed,
size_t compressed_length,
char* uncompressed,
size_t* uncompressed_length) {
size_t real_uncompressed_length;
if (!snappy::GetUncompressedLength(compressed,
compressed_length,
&real_uncompressed_length)) {
return SNAPPY_INVALID_INPUT;
}
if (*uncompressed_length < real_uncompressed_length) {
return SNAPPY_BUFFER_TOO_SMALL;
}
if (!snappy::RawUncompress(compressed, compressed_length, uncompressed)) {
return SNAPPY_INVALID_INPUT;
}
*uncompressed_length = real_uncompressed_length;
return SNAPPY_OK;
}
size_t snappy_max_compressed_length(size_t source_length) {
return snappy::MaxCompressedLength(source_length);
}
snappy_status snappy_uncompressed_length(const char *compressed,
size_t compressed_length,
size_t *result) {
if (snappy::GetUncompressedLength(compressed,
compressed_length,
result)) {
return SNAPPY_OK;
} else {
return SNAPPY_INVALID_INPUT;
}
}
snappy_status snappy_validate_compressed_buffer(const char *compressed,
size_t compressed_length) {
if (snappy::IsValidCompressedBuffer(compressed, compressed_length)) {
return SNAPPY_OK;
} else {
return SNAPPY_INVALID_INPUT;
}
}
} // extern "C"

View File

@ -0,0 +1,138 @@
/*
* Copyright 2011 Martin Gieseking <martin.gieseking@uos.de>.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Plain C interface (a wrapper around the C++ implementation).
*/
#ifndef THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_C_H_
#define THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_C_H_
#ifdef __cplusplus
extern "C" {
#endif
#include <stddef.h>
/*
* Return values; see the documentation for each function to know
* what each can return.
*/
typedef enum {
SNAPPY_OK = 0,
SNAPPY_INVALID_INPUT = 1,
SNAPPY_BUFFER_TOO_SMALL = 2
} snappy_status;
/*
* Takes the data stored in "input[0..input_length-1]" and stores
* it in the array pointed to by "compressed".
*
* <compressed_length> signals the space available in "compressed".
* If it is not at least equal to "snappy_max_compressed_length(input_length)",
* SNAPPY_BUFFER_TOO_SMALL is returned. After successful compression,
* <compressed_length> contains the true length of the compressed output,
* and SNAPPY_OK is returned.
*
* Example:
* size_t output_length = snappy_max_compressed_length(input_length);
* char* output = (char*)malloc(output_length);
* if (snappy_compress(input, input_length, output, &output_length)
* == SNAPPY_OK) {
* ... Process(output, output_length) ...
* }
* free(output);
*/
snappy_status snappy_compress(const char* input,
size_t input_length,
char* compressed,
size_t* compressed_length);
/*
* Given data in "compressed[0..compressed_length-1]" generated by
* calling the snappy_compress routine, this routine stores
* the uncompressed data to
* uncompressed[0..uncompressed_length-1].
* Returns failure (a value not equal to SNAPPY_OK) if the message
* is corrupted and could not be decrypted.
*
* <uncompressed_length> signals the space available in "uncompressed".
* If it is not at least equal to the value returned by
* snappy_uncompressed_length for this stream, SNAPPY_BUFFER_TOO_SMALL
* is returned. After successful decompression, <uncompressed_length>
* contains the true length of the decompressed output.
*
* Example:
* size_t output_length;
* if (snappy_uncompressed_length(input, input_length, &output_length)
* != SNAPPY_OK) {
* ... fail ...
* }
* char* output = (char*)malloc(output_length);
* if (snappy_uncompress(input, input_length, output, &output_length)
* == SNAPPY_OK) {
* ... Process(output, output_length) ...
* }
* free(output);
*/
snappy_status snappy_uncompress(const char* compressed,
size_t compressed_length,
char* uncompressed,
size_t* uncompressed_length);
/*
* Returns the maximal size of the compressed representation of
* input data that is "source_length" bytes in length.
*/
size_t snappy_max_compressed_length(size_t source_length);
/*
* REQUIRES: "compressed[]" was produced by snappy_compress()
* Returns SNAPPY_OK and stores the length of the uncompressed data in
* *result normally. Returns SNAPPY_INVALID_INPUT on parsing error.
* This operation takes O(1) time.
*/
snappy_status snappy_uncompressed_length(const char* compressed,
size_t compressed_length,
size_t* result);
/*
* Check if the contents of "compressed[]" can be uncompressed successfully.
* Does not return the uncompressed data; if so, returns SNAPPY_OK,
* or if not, returns SNAPPY_INVALID_INPUT.
* Takes time proportional to compressed_length, but is usually at least a
* factor of four faster than actual decompression.
*/
snappy_status snappy_validate_compressed_buffer(const char* compressed,
size_t compressed_length);
#ifdef __cplusplus
} // extern "C"
#endif
#endif /* THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_C_H_ */

View File

@ -0,0 +1,150 @@
// Copyright 2008 Google Inc. All Rights Reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Internals shared between the Snappy implementation and its unittest.
#ifndef THIRD_PARTY_SNAPPY_SNAPPY_INTERNAL_H_
#define THIRD_PARTY_SNAPPY_SNAPPY_INTERNAL_H_
#include "snappy-stubs-internal.h"
namespace snappy {
namespace internal {
class WorkingMemory {
public:
WorkingMemory() : large_table_(NULL) { }
~WorkingMemory() { delete[] large_table_; }
// Allocates and clears a hash table using memory in "*this",
// stores the number of buckets in "*table_size" and returns a pointer to
// the base of the hash table.
uint16* GetHashTable(size_t input_size, int* table_size);
private:
uint16 small_table_[1<<10]; // 2KB
uint16* large_table_; // Allocated only when needed
DISALLOW_COPY_AND_ASSIGN(WorkingMemory);
};
// Flat array compression that does not emit the "uncompressed length"
// prefix. Compresses "input" string to the "*op" buffer.
//
// REQUIRES: "input_length <= kBlockSize"
// REQUIRES: "op" points to an array of memory that is at least
// "MaxCompressedLength(input_length)" in size.
// REQUIRES: All elements in "table[0..table_size-1]" are initialized to zero.
// REQUIRES: "table_size" is a power of two
//
// Returns an "end" pointer into "op" buffer.
// "end - op" is the compressed size of "input".
char* CompressFragment(const char* input,
size_t input_length,
char* op,
uint16* table,
const int table_size);
// Return the largest n such that
//
// s1[0,n-1] == s2[0,n-1]
// and n <= (s2_limit - s2).
//
// Does not read *s2_limit or beyond.
// Does not read *(s1 + (s2_limit - s2)) or beyond.
// Requires that s2_limit >= s2.
//
// Separate implementation for x86_64, for speed. Uses the fact that
// x86_64 is little endian.
#if defined(ARCH_K8)
static inline int FindMatchLength(const char* s1,
const char* s2,
const char* s2_limit) {
assert(s2_limit >= s2);
int matched = 0;
// Find out how long the match is. We loop over the data 64 bits at a
// time until we find a 64-bit block that doesn't match; then we find
// the first non-matching bit and use that to calculate the total
// length of the match.
while (PREDICT_TRUE(s2 <= s2_limit - 8)) {
if (UNALIGNED_LOAD64(s2) == UNALIGNED_LOAD64(s1 + matched)) {
s2 += 8;
matched += 8;
} else {
// On current (mid-2008) Opteron models there is a 3% more
// efficient code sequence to find the first non-matching byte.
// However, what follows is ~10% better on Intel Core 2 and newer,
// and we expect AMD's bsf instruction to improve.
uint64 x = UNALIGNED_LOAD64(s2) ^ UNALIGNED_LOAD64(s1 + matched);
int matching_bits = Bits::FindLSBSetNonZero64(x);
matched += matching_bits >> 3;
return matched;
}
}
while (PREDICT_TRUE(s2 < s2_limit)) {
if (s1[matched] == *s2) {
++s2;
++matched;
} else {
return matched;
}
}
return matched;
}
#else
static inline int FindMatchLength(const char* s1,
const char* s2,
const char* s2_limit) {
// Implementation based on the x86-64 version, above.
assert(s2_limit >= s2);
int matched = 0;
while (s2 <= s2_limit - 4 &&
UNALIGNED_LOAD32(s2) == UNALIGNED_LOAD32(s1 + matched)) {
s2 += 4;
matched += 4;
}
if (LittleEndian::IsLittleEndian() && s2 <= s2_limit - 4) {
uint32 x = UNALIGNED_LOAD32(s2) ^ UNALIGNED_LOAD32(s1 + matched);
int matching_bits = Bits::FindLSBSetNonZero(x);
matched += matching_bits >> 3;
} else {
while ((s2 < s2_limit) && (s1[matched] == *s2)) {
++s2;
++matched;
}
}
return matched;
}
#endif
} // end namespace internal
} // end namespace snappy
#endif // THIRD_PARTY_SNAPPY_SNAPPY_INTERNAL_H_

View File

@ -0,0 +1,104 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <string.h>
#include "snappy-sinksource.h"
namespace snappy {
Source::~Source() { }
Sink::~Sink() { }
char* Sink::GetAppendBuffer(size_t length, char* scratch) {
return scratch;
}
char* Sink::GetAppendBufferVariable(
size_t min_size, size_t desired_size_hint, char* scratch,
size_t scratch_size, size_t* allocated_size) {
*allocated_size = scratch_size;
return scratch;
}
void Sink::AppendAndTakeOwnership(
char* bytes, size_t n,
void (*deleter)(void*, const char*, size_t),
void *deleter_arg) {
Append(bytes, n);
(*deleter)(deleter_arg, bytes, n);
}
ByteArraySource::~ByteArraySource() { }
size_t ByteArraySource::Available() const { return left_; }
const char* ByteArraySource::Peek(size_t* len) {
*len = left_;
return ptr_;
}
void ByteArraySource::Skip(size_t n) {
left_ -= n;
ptr_ += n;
}
UncheckedByteArraySink::~UncheckedByteArraySink() { }
void UncheckedByteArraySink::Append(const char* data, size_t n) {
// Do no copying if the caller filled in the result of GetAppendBuffer()
if (data != dest_) {
memcpy(dest_, data, n);
}
dest_ += n;
}
char* UncheckedByteArraySink::GetAppendBuffer(size_t len, char* scratch) {
return dest_;
}
void UncheckedByteArraySink::AppendAndTakeOwnership(
char* data, size_t n,
void (*deleter)(void*, const char*, size_t),
void *deleter_arg) {
if (data != dest_) {
memcpy(dest_, data, n);
(*deleter)(deleter_arg, data, n);
}
dest_ += n;
}
char* UncheckedByteArraySink::GetAppendBufferVariable(
size_t min_size, size_t desired_size_hint, char* scratch,
size_t scratch_size, size_t* allocated_size) {
*allocated_size = desired_size_hint;
return dest_;
}
} // namespace snappy

View File

@ -0,0 +1,182 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef THIRD_PARTY_SNAPPY_SNAPPY_SINKSOURCE_H_
#define THIRD_PARTY_SNAPPY_SNAPPY_SINKSOURCE_H_
#include <stddef.h>
namespace snappy {
// A Sink is an interface that consumes a sequence of bytes.
class Sink {
public:
Sink() { }
virtual ~Sink();
// Append "bytes[0,n-1]" to this.
virtual void Append(const char* bytes, size_t n) = 0;
// Returns a writable buffer of the specified length for appending.
// May return a pointer to the caller-owned scratch buffer which
// must have at least the indicated length. The returned buffer is
// only valid until the next operation on this Sink.
//
// After writing at most "length" bytes, call Append() with the
// pointer returned from this function and the number of bytes
// written. Many Append() implementations will avoid copying
// bytes if this function returned an internal buffer.
//
// If a non-scratch buffer is returned, the caller may only pass a
// prefix of it to Append(). That is, it is not correct to pass an
// interior pointer of the returned array to Append().
//
// The default implementation always returns the scratch buffer.
virtual char* GetAppendBuffer(size_t length, char* scratch);
// For higher performance, Sink implementations can provide custom
// AppendAndTakeOwnership() and GetAppendBufferVariable() methods.
// These methods can reduce the number of copies done during
// compression/decompression.
// Append "bytes[0,n-1] to the sink. Takes ownership of "bytes"
// and calls the deleter function as (*deleter)(deleter_arg, bytes, n)
// to free the buffer. deleter function must be non NULL.
//
// The default implementation just calls Append and frees "bytes".
// Other implementations may avoid a copy while appending the buffer.
virtual void AppendAndTakeOwnership(
char* bytes, size_t n, void (*deleter)(void*, const char*, size_t),
void *deleter_arg);
// Returns a writable buffer for appending and writes the buffer's capacity to
// *allocated_size. Guarantees *allocated_size >= min_size.
// May return a pointer to the caller-owned scratch buffer which must have
// scratch_size >= min_size.
//
// The returned buffer is only valid until the next operation
// on this ByteSink.
//
// After writing at most *allocated_size bytes, call Append() with the
// pointer returned from this function and the number of bytes written.
// Many Append() implementations will avoid copying bytes if this function
// returned an internal buffer.
//
// If the sink implementation allocates or reallocates an internal buffer,
// it should use the desired_size_hint if appropriate. If a caller cannot
// provide a reasonable guess at the desired capacity, it should set
// desired_size_hint = 0.
//
// If a non-scratch buffer is returned, the caller may only pass
// a prefix to it to Append(). That is, it is not correct to pass an
// interior pointer to Append().
//
// The default implementation always returns the scratch buffer.
virtual char* GetAppendBufferVariable(
size_t min_size, size_t desired_size_hint, char* scratch,
size_t scratch_size, size_t* allocated_size);
private:
// No copying
Sink(const Sink&);
void operator=(const Sink&);
};
// A Source is an interface that yields a sequence of bytes
class Source {
public:
Source() { }
virtual ~Source();
// Return the number of bytes left to read from the source
virtual size_t Available() const = 0;
// Peek at the next flat region of the source. Does not reposition
// the source. The returned region is empty iff Available()==0.
//
// Returns a pointer to the beginning of the region and store its
// length in *len.
//
// The returned region is valid until the next call to Skip() or
// until this object is destroyed, whichever occurs first.
//
// The returned region may be larger than Available() (for example
// if this ByteSource is a view on a substring of a larger source).
// The caller is responsible for ensuring that it only reads the
// Available() bytes.
virtual const char* Peek(size_t* len) = 0;
// Skip the next n bytes. Invalidates any buffer returned by
// a previous call to Peek().
// REQUIRES: Available() >= n
virtual void Skip(size_t n) = 0;
private:
// No copying
Source(const Source&);
void operator=(const Source&);
};
// A Source implementation that yields the contents of a flat array
class ByteArraySource : public Source {
public:
ByteArraySource(const char* p, size_t n) : ptr_(p), left_(n) { }
virtual ~ByteArraySource();
virtual size_t Available() const;
virtual const char* Peek(size_t* len);
virtual void Skip(size_t n);
private:
const char* ptr_;
size_t left_;
};
// A Sink implementation that writes to a flat array without any bound checks.
class UncheckedByteArraySink : public Sink {
public:
explicit UncheckedByteArraySink(char* dest) : dest_(dest) { }
virtual ~UncheckedByteArraySink();
virtual void Append(const char* data, size_t n);
virtual char* GetAppendBuffer(size_t len, char* scratch);
virtual char* GetAppendBufferVariable(
size_t min_size, size_t desired_size_hint, char* scratch,
size_t scratch_size, size_t* allocated_size);
virtual void AppendAndTakeOwnership(
char* bytes, size_t n, void (*deleter)(void*, const char*, size_t),
void *deleter_arg);
// Return the current output pointer so that a caller can see how
// many bytes were produced.
// Note: this is not a Sink method.
char* CurrentDestination() const { return dest_; }
private:
char* dest_;
};
} // namespace snappy
#endif // THIRD_PARTY_SNAPPY_SNAPPY_SINKSOURCE_H_

View File

@ -0,0 +1,42 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <algorithm>
#include <string>
#include "snappy-stubs-internal.h"
namespace snappy {
void Varint::Append32(string* s, uint32 value) {
char buf[Varint::kMax32];
const char* p = Varint::Encode32(buf, value);
s->append(buf, p - buf);
}
} // namespace snappy

View File

@ -0,0 +1,491 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Various stubs for the open-source version of Snappy.
#ifndef THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_
#define THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <string>
#include <assert.h>
#include <stdlib.h>
#include <string.h>
#ifdef HAVE_SYS_MMAN_H
#include <sys/mman.h>
#endif
#include "snappy-stubs-public.h"
#if defined(__x86_64__)
// Enable 64-bit optimized versions of some routines.
#define ARCH_K8 1
#endif
// Needed by OS X, among others.
#ifndef MAP_ANONYMOUS
#define MAP_ANONYMOUS MAP_ANON
#endif
// Pull in std::min, std::ostream, and the likes. This is safe because this
// header file is never used from any public header files.
using namespace std;
// The size of an array, if known at compile-time.
// Will give unexpected results if used on a pointer.
// We undefine it first, since some compilers already have a definition.
#ifdef ARRAYSIZE
#undef ARRAYSIZE
#endif
#define ARRAYSIZE(a) (sizeof(a) / sizeof(*(a)))
// Static prediction hints.
#ifdef HAVE_BUILTIN_EXPECT
#define PREDICT_FALSE(x) (__builtin_expect(x, 0))
#define PREDICT_TRUE(x) (__builtin_expect(!!(x), 1))
#else
#define PREDICT_FALSE(x) x
#define PREDICT_TRUE(x) x
#endif
// This is only used for recomputing the tag byte table used during
// decompression; for simplicity we just remove it from the open-source
// version (anyone who wants to regenerate it can just do the call
// themselves within main()).
#define DEFINE_bool(flag_name, default_value, description) \
bool FLAGS_ ## flag_name = default_value
#define DECLARE_bool(flag_name) \
extern bool FLAGS_ ## flag_name
namespace snappy {
static const uint32 kuint32max = static_cast<uint32>(0xFFFFFFFF);
static const int64 kint64max = static_cast<int64>(0x7FFFFFFFFFFFFFFFLL);
// Potentially unaligned loads and stores.
// x86 and PowerPC can simply do these loads and stores native.
#if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__)
#define UNALIGNED_LOAD16(_p) (*reinterpret_cast<const uint16 *>(_p))
#define UNALIGNED_LOAD32(_p) (*reinterpret_cast<const uint32 *>(_p))
#define UNALIGNED_LOAD64(_p) (*reinterpret_cast<const uint64 *>(_p))
#define UNALIGNED_STORE16(_p, _val) (*reinterpret_cast<uint16 *>(_p) = (_val))
#define UNALIGNED_STORE32(_p, _val) (*reinterpret_cast<uint32 *>(_p) = (_val))
#define UNALIGNED_STORE64(_p, _val) (*reinterpret_cast<uint64 *>(_p) = (_val))
// ARMv7 and newer support native unaligned accesses, but only of 16-bit
// and 32-bit values (not 64-bit); older versions either raise a fatal signal,
// do an unaligned read and rotate the words around a bit, or do the reads very
// slowly (trip through kernel mode). There's no simple #define that says just
// “ARMv7 or higher”, so we have to filter away all ARMv5 and ARMv6
// sub-architectures.
//
// This is a mess, but there's not much we can do about it.
#elif defined(__arm__) && \
!defined(__ARM_ARCH_4__) && \
!defined(__ARM_ARCH_4T__) && \
!defined(__ARM_ARCH_5__) && \
!defined(__ARM_ARCH_5T__) && \
!defined(__ARM_ARCH_5TE__) && \
!defined(__ARM_ARCH_5TEJ__) && \
!defined(__ARM_ARCH_6__) && \
!defined(__ARM_ARCH_6J__) && \
!defined(__ARM_ARCH_6K__) && \
!defined(__ARM_ARCH_6Z__) && \
!defined(__ARM_ARCH_6ZK__) && \
!defined(__ARM_ARCH_6T2__)
#define UNALIGNED_LOAD16(_p) (*reinterpret_cast<const uint16 *>(_p))
#define UNALIGNED_LOAD32(_p) (*reinterpret_cast<const uint32 *>(_p))
#define UNALIGNED_STORE16(_p, _val) (*reinterpret_cast<uint16 *>(_p) = (_val))
#define UNALIGNED_STORE32(_p, _val) (*reinterpret_cast<uint32 *>(_p) = (_val))
// TODO(user): NEON supports unaligned 64-bit loads and stores.
// See if that would be more efficient on platforms supporting it,
// at least for copies.
inline uint64 UNALIGNED_LOAD64(const void *p) {
uint64 t;
memcpy(&t, p, sizeof t);
return t;
}
inline void UNALIGNED_STORE64(void *p, uint64 v) {
memcpy(p, &v, sizeof v);
}
#else
// These functions are provided for architectures that don't support
// unaligned loads and stores.
inline uint16 UNALIGNED_LOAD16(const void *p) {
uint16 t;
memcpy(&t, p, sizeof t);
return t;
}
inline uint32 UNALIGNED_LOAD32(const void *p) {
uint32 t;
memcpy(&t, p, sizeof t);
return t;
}
inline uint64 UNALIGNED_LOAD64(const void *p) {
uint64 t;
memcpy(&t, p, sizeof t);
return t;
}
inline void UNALIGNED_STORE16(void *p, uint16 v) {
memcpy(p, &v, sizeof v);
}
inline void UNALIGNED_STORE32(void *p, uint32 v) {
memcpy(p, &v, sizeof v);
}
inline void UNALIGNED_STORE64(void *p, uint64 v) {
memcpy(p, &v, sizeof v);
}
#endif
// This can be more efficient than UNALIGNED_LOAD64 + UNALIGNED_STORE64
// on some platforms, in particular ARM.
inline void UnalignedCopy64(const void *src, void *dst) {
if (sizeof(void *) == 8) {
UNALIGNED_STORE64(dst, UNALIGNED_LOAD64(src));
} else {
const char *src_char = reinterpret_cast<const char *>(src);
char *dst_char = reinterpret_cast<char *>(dst);
UNALIGNED_STORE32(dst_char, UNALIGNED_LOAD32(src_char));
UNALIGNED_STORE32(dst_char + 4, UNALIGNED_LOAD32(src_char + 4));
}
}
// The following guarantees declaration of the byte swap functions.
#ifdef WORDS_BIGENDIAN
#ifdef HAVE_SYS_BYTEORDER_H
#include <sys/byteorder.h>
#endif
#ifdef HAVE_SYS_ENDIAN_H
#include <sys/endian.h>
#endif
#ifdef _MSC_VER
#include <stdlib.h>
#define bswap_16(x) _byteswap_ushort(x)
#define bswap_32(x) _byteswap_ulong(x)
#define bswap_64(x) _byteswap_uint64(x)
#elif defined(__APPLE__)
// Mac OS X / Darwin features
#include <libkern/OSByteOrder.h>
#define bswap_16(x) OSSwapInt16(x)
#define bswap_32(x) OSSwapInt32(x)
#define bswap_64(x) OSSwapInt64(x)
#elif defined(HAVE_BYTESWAP_H)
#include <byteswap.h>
#elif defined(bswap32)
// FreeBSD defines bswap{16,32,64} in <sys/endian.h> (already #included).
#define bswap_16(x) bswap16(x)
#define bswap_32(x) bswap32(x)
#define bswap_64(x) bswap64(x)
#elif defined(BSWAP_64)
// Solaris 10 defines BSWAP_{16,32,64} in <sys/byteorder.h> (already #included).
#define bswap_16(x) BSWAP_16(x)
#define bswap_32(x) BSWAP_32(x)
#define bswap_64(x) BSWAP_64(x)
#else
inline uint16 bswap_16(uint16 x) {
return (x << 8) | (x >> 8);
}
inline uint32 bswap_32(uint32 x) {
x = ((x & 0xff00ff00UL) >> 8) | ((x & 0x00ff00ffUL) << 8);
return (x >> 16) | (x << 16);
}
inline uint64 bswap_64(uint64 x) {
x = ((x & 0xff00ff00ff00ff00ULL) >> 8) | ((x & 0x00ff00ff00ff00ffULL) << 8);
x = ((x & 0xffff0000ffff0000ULL) >> 16) | ((x & 0x0000ffff0000ffffULL) << 16);
return (x >> 32) | (x << 32);
}
#endif
#endif // WORDS_BIGENDIAN
// Convert to little-endian storage, opposite of network format.
// Convert x from host to little endian: x = LittleEndian.FromHost(x);
// convert x from little endian to host: x = LittleEndian.ToHost(x);
//
// Store values into unaligned memory converting to little endian order:
// LittleEndian.Store16(p, x);
//
// Load unaligned values stored in little endian converting to host order:
// x = LittleEndian.Load16(p);
class LittleEndian {
public:
// Conversion functions.
#ifdef WORDS_BIGENDIAN
static uint16 FromHost16(uint16 x) { return bswap_16(x); }
static uint16 ToHost16(uint16 x) { return bswap_16(x); }
static uint32 FromHost32(uint32 x) { return bswap_32(x); }
static uint32 ToHost32(uint32 x) { return bswap_32(x); }
static bool IsLittleEndian() { return false; }
#else // !defined(WORDS_BIGENDIAN)
static uint16 FromHost16(uint16 x) { return x; }
static uint16 ToHost16(uint16 x) { return x; }
static uint32 FromHost32(uint32 x) { return x; }
static uint32 ToHost32(uint32 x) { return x; }
static bool IsLittleEndian() { return true; }
#endif // !defined(WORDS_BIGENDIAN)
// Functions to do unaligned loads and stores in little-endian order.
static uint16 Load16(const void *p) {
return ToHost16(UNALIGNED_LOAD16(p));
}
static void Store16(void *p, uint16 v) {
UNALIGNED_STORE16(p, FromHost16(v));
}
static uint32 Load32(const void *p) {
return ToHost32(UNALIGNED_LOAD32(p));
}
static void Store32(void *p, uint32 v) {
UNALIGNED_STORE32(p, FromHost32(v));
}
};
// Some bit-manipulation functions.
class Bits {
public:
// Return floor(log2(n)) for positive integer n. Returns -1 iff n == 0.
static int Log2Floor(uint32 n);
// Return the first set least / most significant bit, 0-indexed. Returns an
// undefined value if n == 0. FindLSBSetNonZero() is similar to ffs() except
// that it's 0-indexed.
static int FindLSBSetNonZero(uint32 n);
static int FindLSBSetNonZero64(uint64 n);
private:
DISALLOW_COPY_AND_ASSIGN(Bits);
};
#ifdef HAVE_BUILTIN_CTZ
inline int Bits::Log2Floor(uint32 n) {
return n == 0 ? -1 : 31 ^ __builtin_clz(n);
}
inline int Bits::FindLSBSetNonZero(uint32 n) {
return __builtin_ctz(n);
}
inline int Bits::FindLSBSetNonZero64(uint64 n) {
return __builtin_ctzll(n);
}
#else // Portable versions.
inline int Bits::Log2Floor(uint32 n) {
if (n == 0)
return -1;
int log = 0;
uint32 value = n;
for (int i = 4; i >= 0; --i) {
int shift = (1 << i);
uint32 x = value >> shift;
if (x != 0) {
value = x;
log += shift;
}
}
assert(value == 1);
return log;
}
inline int Bits::FindLSBSetNonZero(uint32 n) {
int rc = 31;
for (int i = 4, shift = 1 << 4; i >= 0; --i) {
const uint32 x = n << shift;
if (x != 0) {
n = x;
rc -= shift;
}
shift >>= 1;
}
return rc;
}
// FindLSBSetNonZero64() is defined in terms of FindLSBSetNonZero().
inline int Bits::FindLSBSetNonZero64(uint64 n) {
const uint32 bottombits = static_cast<uint32>(n);
if (bottombits == 0) {
// Bottom bits are zero, so scan in top bits
return 32 + FindLSBSetNonZero(static_cast<uint32>(n >> 32));
} else {
return FindLSBSetNonZero(bottombits);
}
}
#endif // End portable versions.
// Variable-length integer encoding.
class Varint {
public:
// Maximum lengths of varint encoding of uint32.
static const int kMax32 = 5;
// Attempts to parse a varint32 from a prefix of the bytes in [ptr,limit-1].
// Never reads a character at or beyond limit. If a valid/terminated varint32
// was found in the range, stores it in *OUTPUT and returns a pointer just
// past the last byte of the varint32. Else returns NULL. On success,
// "result <= limit".
static const char* Parse32WithLimit(const char* ptr, const char* limit,
uint32* OUTPUT);
// REQUIRES "ptr" points to a buffer of length sufficient to hold "v".
// EFFECTS Encodes "v" into "ptr" and returns a pointer to the
// byte just past the last encoded byte.
static char* Encode32(char* ptr, uint32 v);
// EFFECTS Appends the varint representation of "value" to "*s".
static void Append32(string* s, uint32 value);
};
inline const char* Varint::Parse32WithLimit(const char* p,
const char* l,
uint32* OUTPUT) {
const unsigned char* ptr = reinterpret_cast<const unsigned char*>(p);
const unsigned char* limit = reinterpret_cast<const unsigned char*>(l);
uint32 b, result;
if (ptr >= limit) return NULL;
b = *(ptr++); result = b & 127; if (b < 128) goto done;
if (ptr >= limit) return NULL;
b = *(ptr++); result |= (b & 127) << 7; if (b < 128) goto done;
if (ptr >= limit) return NULL;
b = *(ptr++); result |= (b & 127) << 14; if (b < 128) goto done;
if (ptr >= limit) return NULL;
b = *(ptr++); result |= (b & 127) << 21; if (b < 128) goto done;
if (ptr >= limit) return NULL;
b = *(ptr++); result |= (b & 127) << 28; if (b < 16) goto done;
return NULL; // Value is too long to be a varint32
done:
*OUTPUT = result;
return reinterpret_cast<const char*>(ptr);
}
inline char* Varint::Encode32(char* sptr, uint32 v) {
// Operate on characters as unsigneds
unsigned char* ptr = reinterpret_cast<unsigned char*>(sptr);
static const int B = 128;
if (v < (1<<7)) {
*(ptr++) = v;
} else if (v < (1<<14)) {
*(ptr++) = v | B;
*(ptr++) = v>>7;
} else if (v < (1<<21)) {
*(ptr++) = v | B;
*(ptr++) = (v>>7) | B;
*(ptr++) = v>>14;
} else if (v < (1<<28)) {
*(ptr++) = v | B;
*(ptr++) = (v>>7) | B;
*(ptr++) = (v>>14) | B;
*(ptr++) = v>>21;
} else {
*(ptr++) = v | B;
*(ptr++) = (v>>7) | B;
*(ptr++) = (v>>14) | B;
*(ptr++) = (v>>21) | B;
*(ptr++) = v>>28;
}
return reinterpret_cast<char*>(ptr);
}
// If you know the internal layout of the std::string in use, you can
// replace this function with one that resizes the string without
// filling the new space with zeros (if applicable) --
// it will be non-portable but faster.
inline void STLStringResizeUninitialized(string* s, size_t new_size) {
s->resize(new_size);
}
// Return a mutable char* pointing to a string's internal buffer,
// which may not be null-terminated. Writing through this pointer will
// modify the string.
//
// string_as_array(&str)[i] is valid for 0 <= i < str.size() until the
// next call to a string method that invalidates iterators.
//
// As of 2006-04, there is no standard-blessed way of getting a
// mutable reference to a string's internal buffer. However, issue 530
// (http://www.open-std.org/JTC1/SC22/WG21/docs/lwg-defects.html#530)
// proposes this as the method. It will officially be part of the standard
// for C++0x. This should already work on all current implementations.
inline char* string_as_array(string* str) {
return str->empty() ? NULL : &*str->begin();
}
} // namespace snappy
#endif // THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_

View File

@ -0,0 +1,98 @@
// Copyright 2011 Google Inc. All Rights Reserved.
// Author: sesse@google.com (Steinar H. Gunderson)
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Various type stubs for the open-source version of Snappy.
//
// This file cannot include config.h, as it is included from snappy.h,
// which is a public header. Instead, snappy-stubs-public.h is generated by
// from snappy-stubs-public.h.in at configure time.
#ifndef THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
#define THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
#if 1
#include <stdint.h>
#endif
#if 1
#include <stddef.h>
#endif
#if 0
#include <sys/uio.h>
#endif
#define SNAPPY_MAJOR 1
#define SNAPPY_MINOR 1
#define SNAPPY_PATCHLEVEL 3
#define SNAPPY_VERSION \
((SNAPPY_MAJOR << 16) | (SNAPPY_MINOR << 8) | SNAPPY_PATCHLEVEL)
#include <string>
namespace snappy {
#if 1
typedef int8_t int8;
typedef uint8_t uint8;
typedef int16_t int16;
typedef uint16_t uint16;
typedef int32_t int32;
typedef uint32_t uint32;
typedef int64_t int64;
typedef uint64_t uint64;
#else
typedef signed char int8;
typedef unsigned char uint8;
typedef short int16;
typedef unsigned short uint16;
typedef int int32;
typedef unsigned int uint32;
typedef long long int64;
typedef unsigned long long uint64;
#endif
typedef std::string string;
#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
TypeName(const TypeName&); \
void operator=(const TypeName&)
#if !0
// Windows does not have an iovec type, yet the concept is universally useful.
// It is simple to define it ourselves, so we put it inside our own namespace.
struct iovec {
void* iov_base;
size_t iov_len;
};
#endif
} // namespace snappy
#endif // THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_

View File

@ -0,0 +1,98 @@
// Copyright 2011 Google Inc. All Rights Reserved.
// Author: sesse@google.com (Steinar H. Gunderson)
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Various type stubs for the open-source version of Snappy.
//
// This file cannot include config.h, as it is included from snappy.h,
// which is a public header. Instead, snappy-stubs-public.h is generated by
// from snappy-stubs-public.h.in at configure time.
#ifndef THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
#define THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
#if @ac_cv_have_stdint_h@
#include <stdint.h>
#endif
#if @ac_cv_have_stddef_h@
#include <stddef.h>
#endif
#if @ac_cv_have_sys_uio_h@
#include <sys/uio.h>
#endif
#define SNAPPY_MAJOR @SNAPPY_MAJOR@
#define SNAPPY_MINOR @SNAPPY_MINOR@
#define SNAPPY_PATCHLEVEL @SNAPPY_PATCHLEVEL@
#define SNAPPY_VERSION \
((SNAPPY_MAJOR << 16) | (SNAPPY_MINOR << 8) | SNAPPY_PATCHLEVEL)
#include <string>
namespace snappy {
#if @ac_cv_have_stdint_h@
typedef int8_t int8;
typedef uint8_t uint8;
typedef int16_t int16;
typedef uint16_t uint16;
typedef int32_t int32;
typedef uint32_t uint32;
typedef int64_t int64;
typedef uint64_t uint64;
#else
typedef signed char int8;
typedef unsigned char uint8;
typedef short int16;
typedef unsigned short uint16;
typedef int int32;
typedef unsigned int uint32;
typedef long long int64;
typedef unsigned long long uint64;
#endif
typedef std::string string;
#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
TypeName(const TypeName&); \
void operator=(const TypeName&)
#if !@ac_cv_have_sys_uio_h@
// Windows does not have an iovec type, yet the concept is universally useful.
// It is simple to define it ourselves, so we put it inside our own namespace.
struct iovec {
void* iov_base;
size_t iov_len;
};
#endif
} // namespace snappy
#endif // THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_

View File

@ -0,0 +1,609 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Various stubs for the unit tests for the open-source version of Snappy.
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#ifdef HAVE_WINDOWS_H
#include <windows.h>
#endif
#include "snappy-test.h"
#include <algorithm>
DEFINE_bool(run_microbenchmarks, true,
"Run microbenchmarks before doing anything else.");
namespace snappy {
string ReadTestDataFile(const string& base, size_t size_limit) {
string contents;
const char* srcdir = getenv("srcdir"); // This is set by Automake.
string prefix;
if (srcdir) {
prefix = string(srcdir) + "/";
}
file::GetContents(prefix + "testdata/" + base, &contents, file::Defaults()
).CheckSuccess();
if (size_limit > 0) {
contents = contents.substr(0, size_limit);
}
return contents;
}
string ReadTestDataFile(const string& base) {
return ReadTestDataFile(base, 0);
}
string StringPrintf(const char* format, ...) {
char buf[4096];
va_list ap;
va_start(ap, format);
vsnprintf(buf, sizeof(buf), format, ap);
va_end(ap);
return buf;
}
bool benchmark_running = false;
int64 benchmark_real_time_us = 0;
int64 benchmark_cpu_time_us = 0;
string *benchmark_label = NULL;
int64 benchmark_bytes_processed = 0;
void ResetBenchmarkTiming() {
benchmark_real_time_us = 0;
benchmark_cpu_time_us = 0;
}
#ifdef WIN32
LARGE_INTEGER benchmark_start_real;
FILETIME benchmark_start_cpu;
#else // WIN32
struct timeval benchmark_start_real;
struct rusage benchmark_start_cpu;
#endif // WIN32
void StartBenchmarkTiming() {
#ifdef WIN32
QueryPerformanceCounter(&benchmark_start_real);
FILETIME dummy;
CHECK(GetProcessTimes(
GetCurrentProcess(), &dummy, &dummy, &dummy, &benchmark_start_cpu));
#else
gettimeofday(&benchmark_start_real, NULL);
if (getrusage(RUSAGE_SELF, &benchmark_start_cpu) == -1) {
perror("getrusage(RUSAGE_SELF)");
exit(1);
}
#endif
benchmark_running = true;
}
void StopBenchmarkTiming() {
if (!benchmark_running) {
return;
}
#ifdef WIN32
LARGE_INTEGER benchmark_stop_real;
LARGE_INTEGER benchmark_frequency;
QueryPerformanceCounter(&benchmark_stop_real);
QueryPerformanceFrequency(&benchmark_frequency);
double elapsed_real = static_cast<double>(
benchmark_stop_real.QuadPart - benchmark_start_real.QuadPart) /
benchmark_frequency.QuadPart;
benchmark_real_time_us += elapsed_real * 1e6 + 0.5;
FILETIME benchmark_stop_cpu, dummy;
CHECK(GetProcessTimes(
GetCurrentProcess(), &dummy, &dummy, &dummy, &benchmark_stop_cpu));
ULARGE_INTEGER start_ulargeint;
start_ulargeint.LowPart = benchmark_start_cpu.dwLowDateTime;
start_ulargeint.HighPart = benchmark_start_cpu.dwHighDateTime;
ULARGE_INTEGER stop_ulargeint;
stop_ulargeint.LowPart = benchmark_stop_cpu.dwLowDateTime;
stop_ulargeint.HighPart = benchmark_stop_cpu.dwHighDateTime;
benchmark_cpu_time_us +=
(stop_ulargeint.QuadPart - start_ulargeint.QuadPart + 5) / 10;
#else // WIN32
struct timeval benchmark_stop_real;
gettimeofday(&benchmark_stop_real, NULL);
benchmark_real_time_us +=
1000000 * (benchmark_stop_real.tv_sec - benchmark_start_real.tv_sec);
benchmark_real_time_us +=
(benchmark_stop_real.tv_usec - benchmark_start_real.tv_usec);
struct rusage benchmark_stop_cpu;
if (getrusage(RUSAGE_SELF, &benchmark_stop_cpu) == -1) {
perror("getrusage(RUSAGE_SELF)");
exit(1);
}
benchmark_cpu_time_us += 1000000 * (benchmark_stop_cpu.ru_utime.tv_sec -
benchmark_start_cpu.ru_utime.tv_sec);
benchmark_cpu_time_us += (benchmark_stop_cpu.ru_utime.tv_usec -
benchmark_start_cpu.ru_utime.tv_usec);
#endif // WIN32
benchmark_running = false;
}
void SetBenchmarkLabel(const string& str) {
if (benchmark_label) {
delete benchmark_label;
}
benchmark_label = new string(str);
}
void SetBenchmarkBytesProcessed(int64 bytes) {
benchmark_bytes_processed = bytes;
}
struct BenchmarkRun {
int64 real_time_us;
int64 cpu_time_us;
};
struct BenchmarkCompareCPUTime {
bool operator() (const BenchmarkRun& a, const BenchmarkRun& b) const {
return a.cpu_time_us < b.cpu_time_us;
}
};
void Benchmark::Run() {
for (int test_case_num = start_; test_case_num <= stop_; ++test_case_num) {
// Run a few iterations first to find out approximately how fast
// the benchmark is.
const int kCalibrateIterations = 100;
ResetBenchmarkTiming();
StartBenchmarkTiming();
(*function_)(kCalibrateIterations, test_case_num);
StopBenchmarkTiming();
// Let each test case run for about 200ms, but at least as many
// as we used to calibrate.
// Run five times and pick the median.
const int kNumRuns = 5;
const int kMedianPos = kNumRuns / 2;
int num_iterations = 0;
if (benchmark_real_time_us > 0) {
num_iterations = 200000 * kCalibrateIterations / benchmark_real_time_us;
}
num_iterations = max(num_iterations, kCalibrateIterations);
BenchmarkRun benchmark_runs[kNumRuns];
for (int run = 0; run < kNumRuns; ++run) {
ResetBenchmarkTiming();
StartBenchmarkTiming();
(*function_)(num_iterations, test_case_num);
StopBenchmarkTiming();
benchmark_runs[run].real_time_us = benchmark_real_time_us;
benchmark_runs[run].cpu_time_us = benchmark_cpu_time_us;
}
string heading = StringPrintf("%s/%d", name_.c_str(), test_case_num);
string human_readable_speed;
nth_element(benchmark_runs,
benchmark_runs + kMedianPos,
benchmark_runs + kNumRuns,
BenchmarkCompareCPUTime());
int64 real_time_us = benchmark_runs[kMedianPos].real_time_us;
int64 cpu_time_us = benchmark_runs[kMedianPos].cpu_time_us;
if (cpu_time_us <= 0) {
human_readable_speed = "?";
} else {
int64 bytes_per_second =
benchmark_bytes_processed * 1000000 / cpu_time_us;
if (bytes_per_second < 1024) {
human_readable_speed = StringPrintf("%dB/s", bytes_per_second);
} else if (bytes_per_second < 1024 * 1024) {
human_readable_speed = StringPrintf(
"%.1fkB/s", bytes_per_second / 1024.0f);
} else if (bytes_per_second < 1024 * 1024 * 1024) {
human_readable_speed = StringPrintf(
"%.1fMB/s", bytes_per_second / (1024.0f * 1024.0f));
} else {
human_readable_speed = StringPrintf(
"%.1fGB/s", bytes_per_second / (1024.0f * 1024.0f * 1024.0f));
}
}
fprintf(stderr,
#ifdef WIN32
"%-18s %10I64d %10I64d %10d %s %s\n",
#else
"%-18s %10lld %10lld %10d %s %s\n",
#endif
heading.c_str(),
static_cast<long long>(real_time_us * 1000 / num_iterations),
static_cast<long long>(cpu_time_us * 1000 / num_iterations),
num_iterations,
human_readable_speed.c_str(),
benchmark_label->c_str());
}
}
#ifdef HAVE_LIBZ
ZLib::ZLib()
: comp_init_(false),
uncomp_init_(false) {
Reinit();
}
ZLib::~ZLib() {
if (comp_init_) { deflateEnd(&comp_stream_); }
if (uncomp_init_) { inflateEnd(&uncomp_stream_); }
}
void ZLib::Reinit() {
compression_level_ = Z_DEFAULT_COMPRESSION;
window_bits_ = MAX_WBITS;
mem_level_ = 8; // DEF_MEM_LEVEL
if (comp_init_) {
deflateEnd(&comp_stream_);
comp_init_ = false;
}
if (uncomp_init_) {
inflateEnd(&uncomp_stream_);
uncomp_init_ = false;
}
first_chunk_ = true;
}
void ZLib::Reset() {
first_chunk_ = true;
}
// --------- COMPRESS MODE
// Initialization method to be called if we hit an error while
// compressing. On hitting an error, call this method before returning
// the error.
void ZLib::CompressErrorInit() {
deflateEnd(&comp_stream_);
comp_init_ = false;
Reset();
}
int ZLib::DeflateInit() {
return deflateInit2(&comp_stream_,
compression_level_,
Z_DEFLATED,
window_bits_,
mem_level_,
Z_DEFAULT_STRATEGY);
}
int ZLib::CompressInit(Bytef *dest, uLongf *destLen,
const Bytef *source, uLong *sourceLen) {
int err;
comp_stream_.next_in = (Bytef*)source;
comp_stream_.avail_in = (uInt)*sourceLen;
if ((uLong)comp_stream_.avail_in != *sourceLen) return Z_BUF_ERROR;
comp_stream_.next_out = dest;
comp_stream_.avail_out = (uInt)*destLen;
if ((uLong)comp_stream_.avail_out != *destLen) return Z_BUF_ERROR;
if ( !first_chunk_ ) // only need to set up stream the first time through
return Z_OK;
if (comp_init_) { // we've already initted it
err = deflateReset(&comp_stream_);
if (err != Z_OK) {
LOG(WARNING) << "ERROR: Can't reset compress object; creating a new one";
deflateEnd(&comp_stream_);
comp_init_ = false;
}
}
if (!comp_init_) { // first use
comp_stream_.zalloc = (alloc_func)0;
comp_stream_.zfree = (free_func)0;
comp_stream_.opaque = (voidpf)0;
err = DeflateInit();
if (err != Z_OK) return err;
comp_init_ = true;
}
return Z_OK;
}
// In a perfect world we'd always have the full buffer to compress
// when the time came, and we could just call Compress(). Alas, we
// want to do chunked compression on our webserver. In this
// application, we compress the header, send it off, then compress the
// results, send them off, then compress the footer. Thus we need to
// use the chunked compression features of zlib.
int ZLib::CompressAtMostOrAll(Bytef *dest, uLongf *destLen,
const Bytef *source, uLong *sourceLen,
int flush_mode) { // Z_FULL_FLUSH or Z_FINISH
int err;
if ( (err=CompressInit(dest, destLen, source, sourceLen)) != Z_OK )
return err;
// This is used to figure out how many bytes we wrote *this chunk*
int compressed_size = comp_stream_.total_out;
// Some setup happens only for the first chunk we compress in a run
if ( first_chunk_ ) {
first_chunk_ = false;
}
// flush_mode is Z_FINISH for all mode, Z_SYNC_FLUSH for incremental
// compression.
err = deflate(&comp_stream_, flush_mode);
*sourceLen = comp_stream_.avail_in;
if ((err == Z_STREAM_END || err == Z_OK)
&& comp_stream_.avail_in == 0
&& comp_stream_.avail_out != 0 ) {
// we processed everything ok and the output buffer was large enough.
;
} else if (err == Z_STREAM_END && comp_stream_.avail_in > 0) {
return Z_BUF_ERROR; // should never happen
} else if (err != Z_OK && err != Z_STREAM_END && err != Z_BUF_ERROR) {
// an error happened
CompressErrorInit();
return err;
} else if (comp_stream_.avail_out == 0) { // not enough space
err = Z_BUF_ERROR;
}
assert(err == Z_OK || err == Z_STREAM_END || err == Z_BUF_ERROR);
if (err == Z_STREAM_END)
err = Z_OK;
// update the crc and other metadata
compressed_size = comp_stream_.total_out - compressed_size; // delta
*destLen = compressed_size;
return err;
}
int ZLib::CompressChunkOrAll(Bytef *dest, uLongf *destLen,
const Bytef *source, uLong sourceLen,
int flush_mode) { // Z_FULL_FLUSH or Z_FINISH
const int ret =
CompressAtMostOrAll(dest, destLen, source, &sourceLen, flush_mode);
if (ret == Z_BUF_ERROR)
CompressErrorInit();
return ret;
}
// This routine only initializes the compression stream once. Thereafter, it
// just does a deflateReset on the stream, which should be faster.
int ZLib::Compress(Bytef *dest, uLongf *destLen,
const Bytef *source, uLong sourceLen) {
int err;
if ( (err=CompressChunkOrAll(dest, destLen, source, sourceLen,
Z_FINISH)) != Z_OK )
return err;
Reset(); // reset for next call to Compress
return Z_OK;
}
// --------- UNCOMPRESS MODE
int ZLib::InflateInit() {
return inflateInit2(&uncomp_stream_, MAX_WBITS);
}
// Initialization method to be called if we hit an error while
// uncompressing. On hitting an error, call this method before
// returning the error.
void ZLib::UncompressErrorInit() {
inflateEnd(&uncomp_stream_);
uncomp_init_ = false;
Reset();
}
int ZLib::UncompressInit(Bytef *dest, uLongf *destLen,
const Bytef *source, uLong *sourceLen) {
int err;
uncomp_stream_.next_in = (Bytef*)source;
uncomp_stream_.avail_in = (uInt)*sourceLen;
// Check for source > 64K on 16-bit machine:
if ((uLong)uncomp_stream_.avail_in != *sourceLen) return Z_BUF_ERROR;
uncomp_stream_.next_out = dest;
uncomp_stream_.avail_out = (uInt)*destLen;
if ((uLong)uncomp_stream_.avail_out != *destLen) return Z_BUF_ERROR;
if ( !first_chunk_ ) // only need to set up stream the first time through
return Z_OK;
if (uncomp_init_) { // we've already initted it
err = inflateReset(&uncomp_stream_);
if (err != Z_OK) {
LOG(WARNING)
<< "ERROR: Can't reset uncompress object; creating a new one";
UncompressErrorInit();
}
}
if (!uncomp_init_) {
uncomp_stream_.zalloc = (alloc_func)0;
uncomp_stream_.zfree = (free_func)0;
uncomp_stream_.opaque = (voidpf)0;
err = InflateInit();
if (err != Z_OK) return err;
uncomp_init_ = true;
}
return Z_OK;
}
// If you compressed your data a chunk at a time, with CompressChunk,
// you can uncompress it a chunk at a time with UncompressChunk.
// Only difference bewteen chunked and unchunked uncompression
// is the flush mode we use: Z_SYNC_FLUSH (chunked) or Z_FINISH (unchunked).
int ZLib::UncompressAtMostOrAll(Bytef *dest, uLongf *destLen,
const Bytef *source, uLong *sourceLen,
int flush_mode) { // Z_SYNC_FLUSH or Z_FINISH
int err = Z_OK;
if ( (err=UncompressInit(dest, destLen, source, sourceLen)) != Z_OK ) {
LOG(WARNING) << "UncompressInit: Error: " << err << " SourceLen: "
<< *sourceLen;
return err;
}
// This is used to figure out how many output bytes we wrote *this chunk*:
const uLong old_total_out = uncomp_stream_.total_out;
// This is used to figure out how many input bytes we read *this chunk*:
const uLong old_total_in = uncomp_stream_.total_in;
// Some setup happens only for the first chunk we compress in a run
if ( first_chunk_ ) {
first_chunk_ = false; // so we don't do this again
// For the first chunk *only* (to avoid infinite troubles), we let
// there be no actual data to uncompress. This sometimes triggers
// when the input is only the gzip header, say.
if ( *sourceLen == 0 ) {
*destLen = 0;
return Z_OK;
}
}
// We'll uncompress as much as we can. If we end OK great, otherwise
// if we get an error that seems to be the gzip footer, we store the
// gzip footer and return OK, otherwise we return the error.
// flush_mode is Z_SYNC_FLUSH for chunked mode, Z_FINISH for all mode.
err = inflate(&uncomp_stream_, flush_mode);
// Figure out how many bytes of the input zlib slurped up:
const uLong bytes_read = uncomp_stream_.total_in - old_total_in;
CHECK_LE(source + bytes_read, source + *sourceLen);
*sourceLen = uncomp_stream_.avail_in;
if ((err == Z_STREAM_END || err == Z_OK) // everything went ok
&& uncomp_stream_.avail_in == 0) { // and we read it all
;
} else if (err == Z_STREAM_END && uncomp_stream_.avail_in > 0) {
LOG(WARNING)
<< "UncompressChunkOrAll: Received some extra data, bytes total: "
<< uncomp_stream_.avail_in << " bytes: "
<< string(reinterpret_cast<const char *>(uncomp_stream_.next_in),
min(int(uncomp_stream_.avail_in), 20));
UncompressErrorInit();
return Z_DATA_ERROR; // what's the extra data for?
} else if (err != Z_OK && err != Z_STREAM_END && err != Z_BUF_ERROR) {
// an error happened
LOG(WARNING) << "UncompressChunkOrAll: Error: " << err
<< " avail_out: " << uncomp_stream_.avail_out;
UncompressErrorInit();
return err;
} else if (uncomp_stream_.avail_out == 0) {
err = Z_BUF_ERROR;
}
assert(err == Z_OK || err == Z_BUF_ERROR || err == Z_STREAM_END);
if (err == Z_STREAM_END)
err = Z_OK;
*destLen = uncomp_stream_.total_out - old_total_out; // size for this call
return err;
}
int ZLib::UncompressChunkOrAll(Bytef *dest, uLongf *destLen,
const Bytef *source, uLong sourceLen,
int flush_mode) { // Z_SYNC_FLUSH or Z_FINISH
const int ret =
UncompressAtMostOrAll(dest, destLen, source, &sourceLen, flush_mode);
if (ret == Z_BUF_ERROR)
UncompressErrorInit();
return ret;
}
int ZLib::UncompressAtMost(Bytef *dest, uLongf *destLen,
const Bytef *source, uLong *sourceLen) {
return UncompressAtMostOrAll(dest, destLen, source, sourceLen, Z_SYNC_FLUSH);
}
// We make sure we've uncompressed everything, that is, the current
// uncompress stream is at a compressed-buffer-EOF boundary. In gzip
// mode, we also check the gzip footer to make sure we pass the gzip
// consistency checks. We RETURN true iff both types of checks pass.
bool ZLib::UncompressChunkDone() {
assert(!first_chunk_ && uncomp_init_);
// Make sure we're at the end-of-compressed-data point. This means
// if we call inflate with Z_FINISH we won't consume any input or
// write any output
Bytef dummyin, dummyout;
uLongf dummylen = 0;
if ( UncompressChunkOrAll(&dummyout, &dummylen, &dummyin, 0, Z_FINISH)
!= Z_OK ) {
return false;
}
// Make sure that when we exit, we can start a new round of chunks later
Reset();
return true;
}
// Uncompresses the source buffer into the destination buffer.
// The destination buffer must be long enough to hold the entire
// decompressed contents.
//
// We only initialize the uncomp_stream once. Thereafter, we use
// inflateReset, which should be faster.
//
// Returns Z_OK on success, otherwise, it returns a zlib error code.
int ZLib::Uncompress(Bytef *dest, uLongf *destLen,
const Bytef *source, uLong sourceLen) {
int err;
if ( (err=UncompressChunkOrAll(dest, destLen, source, sourceLen,
Z_FINISH)) != Z_OK ) {
Reset(); // let us try to compress again
return err;
}
if ( !UncompressChunkDone() ) // calls Reset()
return Z_DATA_ERROR;
return Z_OK; // stream_end is ok
}
#endif // HAVE_LIBZ
} // namespace snappy

View File

@ -0,0 +1,597 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Various stubs for the unit tests for the open-source version of Snappy.
#ifndef THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_TEST_H_
#define THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_TEST_H_
#include <iostream>
#include <string>
#include "snappy-stubs-internal.h"
#include <stdio.h>
#include <stdarg.h>
#ifdef HAVE_SYS_MMAN_H
#include <sys/mman.h>
#endif
#ifdef HAVE_SYS_RESOURCE_H
#include <sys/resource.h>
#endif
#ifdef HAVE_SYS_TIME_H
#include <sys/time.h>
#endif
#ifdef HAVE_WINDOWS_H
#include <windows.h>
#endif
#include <string>
#ifdef HAVE_GTEST
#include <gtest/gtest.h>
#undef TYPED_TEST
#define TYPED_TEST TEST
#define INIT_GTEST(argc, argv) ::testing::InitGoogleTest(argc, *argv)
#else
// Stubs for if the user doesn't have Google Test installed.
#define TEST(test_case, test_subcase) \
void Test_ ## test_case ## _ ## test_subcase()
#define INIT_GTEST(argc, argv)
#define TYPED_TEST TEST
#define EXPECT_EQ CHECK_EQ
#define EXPECT_NE CHECK_NE
#define EXPECT_FALSE(cond) CHECK(!(cond))
#endif
#ifdef HAVE_GFLAGS
#include <gflags/gflags.h>
// This is tricky; both gflags and Google Test want to look at the command line
// arguments. Google Test seems to be the most happy with unknown arguments,
// though, so we call it first and hope for the best.
#define InitGoogle(argv0, argc, argv, remove_flags) \
INIT_GTEST(argc, argv); \
google::ParseCommandLineFlags(argc, argv, remove_flags);
#else
// If we don't have the gflags package installed, these can only be
// changed at compile time.
#define DEFINE_int32(flag_name, default_value, description) \
static int FLAGS_ ## flag_name = default_value;
#define InitGoogle(argv0, argc, argv, remove_flags) \
INIT_GTEST(argc, argv)
#endif
#ifdef HAVE_LIBZ
#include "zlib.h"
#endif
#ifdef HAVE_LIBLZO2
#include "lzo/lzo1x.h"
#endif
#ifdef HAVE_LIBLZF
extern "C" {
#include "lzf.h"
}
#endif
#ifdef HAVE_LIBFASTLZ
#include "fastlz.h"
#endif
#ifdef HAVE_LIBQUICKLZ
#include "quicklz.h"
#endif
namespace {
namespace File {
void Init() { }
} // namespace File
namespace file {
int Defaults() { return 0; }
class DummyStatus {
public:
void CheckSuccess() { }
};
DummyStatus GetContents(const string& filename, string* data, int unused) {
FILE* fp = fopen(filename.c_str(), "rb");
if (fp == NULL) {
perror(filename.c_str());
exit(1);
}
data->clear();
while (!feof(fp)) {
char buf[4096];
size_t ret = fread(buf, 1, 4096, fp);
if (ret == 0 && ferror(fp)) {
perror("fread");
exit(1);
}
data->append(string(buf, ret));
}
fclose(fp);
return DummyStatus();
}
DummyStatus SetContents(const string& filename,
const string& str,
int unused) {
FILE* fp = fopen(filename.c_str(), "wb");
if (fp == NULL) {
perror(filename.c_str());
exit(1);
}
int ret = fwrite(str.data(), str.size(), 1, fp);
if (ret != 1) {
perror("fwrite");
exit(1);
}
fclose(fp);
return DummyStatus();
}
} // namespace file
} // namespace
namespace snappy {
#define FLAGS_test_random_seed 301
typedef string TypeParam;
void Test_CorruptedTest_VerifyCorrupted();
void Test_Snappy_SimpleTests();
void Test_Snappy_MaxBlowup();
void Test_Snappy_RandomData();
void Test_Snappy_FourByteOffset();
void Test_SnappyCorruption_TruncatedVarint();
void Test_SnappyCorruption_UnterminatedVarint();
void Test_Snappy_ReadPastEndOfBuffer();
void Test_Snappy_FindMatchLength();
void Test_Snappy_FindMatchLengthRandom();
string ReadTestDataFile(const string& base, size_t size_limit);
string ReadTestDataFile(const string& base);
// A sprintf() variant that returns a std::string.
// Not safe for general use due to truncation issues.
string StringPrintf(const char* format, ...);
// A simple, non-cryptographically-secure random generator.
class ACMRandom {
public:
explicit ACMRandom(uint32 seed) : seed_(seed) {}
int32 Next();
int32 Uniform(int32 n) {
return Next() % n;
}
uint8 Rand8() {
return static_cast<uint8>((Next() >> 1) & 0x000000ff);
}
bool OneIn(int X) { return Uniform(X) == 0; }
// Skewed: pick "base" uniformly from range [0,max_log] and then
// return "base" random bits. The effect is to pick a number in the
// range [0,2^max_log-1] with bias towards smaller numbers.
int32 Skewed(int max_log);
private:
static const uint32 M = 2147483647L; // 2^31-1
uint32 seed_;
};
inline int32 ACMRandom::Next() {
static const uint64 A = 16807; // bits 14, 8, 7, 5, 2, 1, 0
// We are computing
// seed_ = (seed_ * A) % M, where M = 2^31-1
//
// seed_ must not be zero or M, or else all subsequent computed values
// will be zero or M respectively. For all other values, seed_ will end
// up cycling through every number in [1,M-1]
uint64 product = seed_ * A;
// Compute (product % M) using the fact that ((x << 31) % M) == x.
seed_ = (product >> 31) + (product & M);
// The first reduction may overflow by 1 bit, so we may need to repeat.
// mod == M is not possible; using > allows the faster sign-bit-based test.
if (seed_ > M) {
seed_ -= M;
}
return seed_;
}
inline int32 ACMRandom::Skewed(int max_log) {
const int32 base = (Next() - 1) % (max_log+1);
return (Next() - 1) & ((1u << base)-1);
}
// A wall-time clock. This stub is not super-accurate, nor resistant to the
// system time changing.
class CycleTimer {
public:
CycleTimer() : real_time_us_(0) {}
void Start() {
#ifdef WIN32
QueryPerformanceCounter(&start_);
#else
gettimeofday(&start_, NULL);
#endif
}
void Stop() {
#ifdef WIN32
LARGE_INTEGER stop;
LARGE_INTEGER frequency;
QueryPerformanceCounter(&stop);
QueryPerformanceFrequency(&frequency);
double elapsed = static_cast<double>(stop.QuadPart - start_.QuadPart) /
frequency.QuadPart;
real_time_us_ += elapsed * 1e6 + 0.5;
#else
struct timeval stop;
gettimeofday(&stop, NULL);
real_time_us_ += 1000000 * (stop.tv_sec - start_.tv_sec);
real_time_us_ += (stop.tv_usec - start_.tv_usec);
#endif
}
double Get() {
return real_time_us_ * 1e-6;
}
private:
int64 real_time_us_;
#ifdef WIN32
LARGE_INTEGER start_;
#else
struct timeval start_;
#endif
};
// Minimalistic microbenchmark framework.
typedef void (*BenchmarkFunction)(int, int);
class Benchmark {
public:
Benchmark(const string& name, BenchmarkFunction function) :
name_(name), function_(function) {}
Benchmark* DenseRange(int start, int stop) {
start_ = start;
stop_ = stop;
return this;
}
void Run();
private:
const string name_;
const BenchmarkFunction function_;
int start_, stop_;
};
#define BENCHMARK(benchmark_name) \
Benchmark* Benchmark_ ## benchmark_name = \
(new Benchmark(#benchmark_name, benchmark_name))
extern Benchmark* Benchmark_BM_UFlat;
extern Benchmark* Benchmark_BM_UIOVec;
extern Benchmark* Benchmark_BM_UValidate;
extern Benchmark* Benchmark_BM_ZFlat;
void ResetBenchmarkTiming();
void StartBenchmarkTiming();
void StopBenchmarkTiming();
void SetBenchmarkLabel(const string& str);
void SetBenchmarkBytesProcessed(int64 bytes);
#ifdef HAVE_LIBZ
// Object-oriented wrapper around zlib.
class ZLib {
public:
ZLib();
~ZLib();
// Wipe a ZLib object to a virgin state. This differs from Reset()
// in that it also breaks any state.
void Reinit();
// Call this to make a zlib buffer as good as new. Here's the only
// case where they differ:
// CompressChunk(a); CompressChunk(b); CompressChunkDone(); vs
// CompressChunk(a); Reset(); CompressChunk(b); CompressChunkDone();
// You'll want to use Reset(), then, when you interrupt a compress
// (or uncompress) in the middle of a chunk and want to start over.
void Reset();
// According to the zlib manual, when you Compress, the destination
// buffer must have size at least src + .1%*src + 12. This function
// helps you calculate that. Augment this to account for a potential
// gzip header and footer, plus a few bytes of slack.
static int MinCompressbufSize(int uncompress_size) {
return uncompress_size + uncompress_size/1000 + 40;
}
// Compresses the source buffer into the destination buffer.
// sourceLen is the byte length of the source buffer.
// Upon entry, destLen is the total size of the destination buffer,
// which must be of size at least MinCompressbufSize(sourceLen).
// Upon exit, destLen is the actual size of the compressed buffer.
//
// This function can be used to compress a whole file at once if the
// input file is mmap'ed.
//
// Returns Z_OK if success, Z_MEM_ERROR if there was not
// enough memory, Z_BUF_ERROR if there was not enough room in the
// output buffer. Note that if the output buffer is exactly the same
// size as the compressed result, we still return Z_BUF_ERROR.
// (check CL#1936076)
int Compress(Bytef *dest, uLongf *destLen,
const Bytef *source, uLong sourceLen);
// Uncompresses the source buffer into the destination buffer.
// The destination buffer must be long enough to hold the entire
// decompressed contents.
//
// Returns Z_OK on success, otherwise, it returns a zlib error code.
int Uncompress(Bytef *dest, uLongf *destLen,
const Bytef *source, uLong sourceLen);
// Uncompress data one chunk at a time -- ie you can call this
// more than once. To get this to work you need to call per-chunk
// and "done" routines.
//
// Returns Z_OK if success, Z_MEM_ERROR if there was not
// enough memory, Z_BUF_ERROR if there was not enough room in the
// output buffer.
int UncompressAtMost(Bytef *dest, uLongf *destLen,
const Bytef *source, uLong *sourceLen);
// Checks gzip footer information, as needed. Mostly this just
// makes sure the checksums match. Whenever you call this, it
// will assume the last 8 bytes from the previous UncompressChunk
// call are the footer. Returns true iff everything looks ok.
bool UncompressChunkDone();
private:
int InflateInit(); // sets up the zlib inflate structure
int DeflateInit(); // sets up the zlib deflate structure
// These init the zlib data structures for compressing/uncompressing
int CompressInit(Bytef *dest, uLongf *destLen,
const Bytef *source, uLong *sourceLen);
int UncompressInit(Bytef *dest, uLongf *destLen,
const Bytef *source, uLong *sourceLen);
// Initialization method to be called if we hit an error while
// uncompressing. On hitting an error, call this method before
// returning the error.
void UncompressErrorInit();
// Helper function for Compress
int CompressChunkOrAll(Bytef *dest, uLongf *destLen,
const Bytef *source, uLong sourceLen,
int flush_mode);
int CompressAtMostOrAll(Bytef *dest, uLongf *destLen,
const Bytef *source, uLong *sourceLen,
int flush_mode);
// Likewise for UncompressAndUncompressChunk
int UncompressChunkOrAll(Bytef *dest, uLongf *destLen,
const Bytef *source, uLong sourceLen,
int flush_mode);
int UncompressAtMostOrAll(Bytef *dest, uLongf *destLen,
const Bytef *source, uLong *sourceLen,
int flush_mode);
// Initialization method to be called if we hit an error while
// compressing. On hitting an error, call this method before
// returning the error.
void CompressErrorInit();
int compression_level_; // compression level
int window_bits_; // log base 2 of the window size used in compression
int mem_level_; // specifies the amount of memory to be used by
// compressor (1-9)
z_stream comp_stream_; // Zlib stream data structure
bool comp_init_; // True if we have initialized comp_stream_
z_stream uncomp_stream_; // Zlib stream data structure
bool uncomp_init_; // True if we have initialized uncomp_stream_
// These are used only with chunked compression.
bool first_chunk_; // true if we need to emit headers with this chunk
};
#endif // HAVE_LIBZ
} // namespace snappy
DECLARE_bool(run_microbenchmarks);
static void RunSpecifiedBenchmarks() {
if (!FLAGS_run_microbenchmarks) {
return;
}
fprintf(stderr, "Running microbenchmarks.\n");
#ifndef NDEBUG
fprintf(stderr, "WARNING: Compiled with assertions enabled, will be slow.\n");
#endif
#ifndef __OPTIMIZE__
fprintf(stderr, "WARNING: Compiled without optimization, will be slow.\n");
#endif
fprintf(stderr, "Benchmark Time(ns) CPU(ns) Iterations\n");
fprintf(stderr, "---------------------------------------------------\n");
snappy::Benchmark_BM_UFlat->Run();
snappy::Benchmark_BM_UIOVec->Run();
snappy::Benchmark_BM_UValidate->Run();
snappy::Benchmark_BM_ZFlat->Run();
fprintf(stderr, "\n");
}
#ifndef HAVE_GTEST
static inline int RUN_ALL_TESTS() {
fprintf(stderr, "Running correctness tests.\n");
snappy::Test_CorruptedTest_VerifyCorrupted();
snappy::Test_Snappy_SimpleTests();
snappy::Test_Snappy_MaxBlowup();
snappy::Test_Snappy_RandomData();
snappy::Test_Snappy_FourByteOffset();
snappy::Test_SnappyCorruption_TruncatedVarint();
snappy::Test_SnappyCorruption_UnterminatedVarint();
snappy::Test_Snappy_ReadPastEndOfBuffer();
snappy::Test_Snappy_FindMatchLength();
snappy::Test_Snappy_FindMatchLengthRandom();
fprintf(stderr, "All tests passed.\n");
return 0;
}
#endif // HAVE_GTEST
// For main().
namespace snappy {
static void CompressFile(const char* fname);
static void UncompressFile(const char* fname);
static void MeasureFile(const char* fname);
// Logging.
#define LOG(level) LogMessage()
#define VLOG(level) true ? (void)0 : \
snappy::LogMessageVoidify() & snappy::LogMessage()
class LogMessage {
public:
LogMessage() { }
~LogMessage() {
cerr << endl;
}
LogMessage& operator<<(const std::string& msg) {
cerr << msg;
return *this;
}
LogMessage& operator<<(int x) {
cerr << x;
return *this;
}
};
// Asserts, both versions activated in debug mode only,
// and ones that are always active.
#define CRASH_UNLESS(condition) \
PREDICT_TRUE(condition) ? (void)0 : \
snappy::LogMessageVoidify() & snappy::LogMessageCrash()
#ifdef _MSC_VER
// ~LogMessageCrash calls abort() and therefore never exits. This is by design
// so temporarily disable warning C4722.
#pragma warning(push)
#pragma warning(disable:4722)
#endif
class LogMessageCrash : public LogMessage {
public:
LogMessageCrash() { }
~LogMessageCrash() {
cerr << endl;
abort();
}
};
#ifdef _MSC_VER
#pragma warning(pop)
#endif
// This class is used to explicitly ignore values in the conditional
// logging macros. This avoids compiler warnings like "value computed
// is not used" and "statement has no effect".
class LogMessageVoidify {
public:
LogMessageVoidify() { }
// This has to be an operator with a precedence lower than << but
// higher than ?:
void operator&(const LogMessage&) { }
};
#define CHECK(cond) CRASH_UNLESS(cond)
#define CHECK_LE(a, b) CRASH_UNLESS((a) <= (b))
#define CHECK_GE(a, b) CRASH_UNLESS((a) >= (b))
#define CHECK_EQ(a, b) CRASH_UNLESS((a) == (b))
#define CHECK_NE(a, b) CRASH_UNLESS((a) != (b))
#define CHECK_LT(a, b) CRASH_UNLESS((a) < (b))
#define CHECK_GT(a, b) CRASH_UNLESS((a) > (b))
#define CHECK_OK(cond) (cond).CheckSuccess()
} // namespace
using snappy::CompressFile;
using snappy::UncompressFile;
using snappy::MeasureFile;
#endif // THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_TEST_H_

1553
vendor/github.com/cockroachdb/c-snappy/internal/snappy.cc generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,203 @@
// Copyright 2005 and onwards Google Inc.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// A light-weight compression algorithm. It is designed for speed of
// compression and decompression, rather than for the utmost in space
// savings.
//
// For getting better compression ratios when you are compressing data
// with long repeated sequences or compressing data that is similar to
// other data, while still compressing fast, you might look at first
// using BMDiff and then compressing the output of BMDiff with
// Snappy.
#ifndef THIRD_PARTY_SNAPPY_SNAPPY_H__
#define THIRD_PARTY_SNAPPY_SNAPPY_H__
#include <stddef.h>
#include <string>
#include "snappy-stubs-public.h"
namespace snappy {
class Source;
class Sink;
// ------------------------------------------------------------------------
// Generic compression/decompression routines.
// ------------------------------------------------------------------------
// Compress the bytes read from "*source" and append to "*sink". Return the
// number of bytes written.
size_t Compress(Source* source, Sink* sink);
// Find the uncompressed length of the given stream, as given by the header.
// Note that the true length could deviate from this; the stream could e.g.
// be truncated.
//
// Also note that this leaves "*source" in a state that is unsuitable for
// further operations, such as RawUncompress(). You will need to rewind
// or recreate the source yourself before attempting any further calls.
bool GetUncompressedLength(Source* source, uint32* result);
// ------------------------------------------------------------------------
// Higher-level string based routines (should be sufficient for most users)
// ------------------------------------------------------------------------
// Sets "*output" to the compressed version of "input[0,input_length-1]".
// Original contents of *output are lost.
//
// REQUIRES: "input[]" is not an alias of "*output".
size_t Compress(const char* input, size_t input_length, string* output);
// Decompresses "compressed[0,compressed_length-1]" to "*uncompressed".
// Original contents of "*uncompressed" are lost.
//
// REQUIRES: "compressed[]" is not an alias of "*uncompressed".
//
// returns false if the message is corrupted and could not be decompressed
bool Uncompress(const char* compressed, size_t compressed_length,
string* uncompressed);
// Decompresses "compressed" to "*uncompressed".
//
// returns false if the message is corrupted and could not be decompressed
bool Uncompress(Source* compressed, Sink* uncompressed);
// This routine uncompresses as much of the "compressed" as possible
// into sink. It returns the number of valid bytes added to sink
// (extra invalid bytes may have been added due to errors; the caller
// should ignore those). The emitted data typically has length
// GetUncompressedLength(), but may be shorter if an error is
// encountered.
size_t UncompressAsMuchAsPossible(Source* compressed, Sink* uncompressed);
// ------------------------------------------------------------------------
// Lower-level character array based routines. May be useful for
// efficiency reasons in certain circumstances.
// ------------------------------------------------------------------------
// REQUIRES: "compressed" must point to an area of memory that is at
// least "MaxCompressedLength(input_length)" bytes in length.
//
// Takes the data stored in "input[0..input_length]" and stores
// it in the array pointed to by "compressed".
//
// "*compressed_length" is set to the length of the compressed output.
//
// Example:
// char* output = new char[snappy::MaxCompressedLength(input_length)];
// size_t output_length;
// RawCompress(input, input_length, output, &output_length);
// ... Process(output, output_length) ...
// delete [] output;
void RawCompress(const char* input,
size_t input_length,
char* compressed,
size_t* compressed_length);
// Given data in "compressed[0..compressed_length-1]" generated by
// calling the Snappy::Compress routine, this routine
// stores the uncompressed data to
// uncompressed[0..GetUncompressedLength(compressed)-1]
// returns false if the message is corrupted and could not be decrypted
bool RawUncompress(const char* compressed, size_t compressed_length,
char* uncompressed);
// Given data from the byte source 'compressed' generated by calling
// the Snappy::Compress routine, this routine stores the uncompressed
// data to
// uncompressed[0..GetUncompressedLength(compressed,compressed_length)-1]
// returns false if the message is corrupted and could not be decrypted
bool RawUncompress(Source* compressed, char* uncompressed);
// Given data in "compressed[0..compressed_length-1]" generated by
// calling the Snappy::Compress routine, this routine
// stores the uncompressed data to the iovec "iov". The number of physical
// buffers in "iov" is given by iov_cnt and their cumulative size
// must be at least GetUncompressedLength(compressed). The individual buffers
// in "iov" must not overlap with each other.
//
// returns false if the message is corrupted and could not be decrypted
bool RawUncompressToIOVec(const char* compressed, size_t compressed_length,
const struct iovec* iov, size_t iov_cnt);
// Given data from the byte source 'compressed' generated by calling
// the Snappy::Compress routine, this routine stores the uncompressed
// data to the iovec "iov". The number of physical
// buffers in "iov" is given by iov_cnt and their cumulative size
// must be at least GetUncompressedLength(compressed). The individual buffers
// in "iov" must not overlap with each other.
//
// returns false if the message is corrupted and could not be decrypted
bool RawUncompressToIOVec(Source* compressed, const struct iovec* iov,
size_t iov_cnt);
// Returns the maximal size of the compressed representation of
// input data that is "source_bytes" bytes in length;
size_t MaxCompressedLength(size_t source_bytes);
// REQUIRES: "compressed[]" was produced by RawCompress() or Compress()
// Returns true and stores the length of the uncompressed data in
// *result normally. Returns false on parsing error.
// This operation takes O(1) time.
bool GetUncompressedLength(const char* compressed, size_t compressed_length,
size_t* result);
// Returns true iff the contents of "compressed[]" can be uncompressed
// successfully. Does not return the uncompressed data. Takes
// time proportional to compressed_length, but is usually at least
// a factor of four faster than actual decompression.
bool IsValidCompressedBuffer(const char* compressed,
size_t compressed_length);
// Returns true iff the contents of "compressed" can be uncompressed
// successfully. Does not return the uncompressed data. Takes
// time proportional to *compressed length, but is usually at least
// a factor of four faster than actual decompression.
// On success, consumes all of *compressed. On failure, consumes an
// unspecified prefix of *compressed.
bool IsValidCompressed(Source* compressed);
// The size of a compression block. Note that many parts of the compression
// code assumes that kBlockSize <= 65536; in particular, the hash table
// can only store 16-bit offsets, and EmitCopy() also assumes the offset
// is 65535 bytes or less. Note also that if you change this, it will
// affect the framing format (see framing_format.txt).
//
// Note that there might be older data around that is compressed with larger
// block sizes, so the decompression code should not rely on the
// non-existence of long backreferences.
static const int kBlockLog = 16;
static const size_t kBlockSize = 1 << kBlockLog;
static const int kMaxHashTableBits = 14;
static const size_t kMaxHashTableSize = 1 << kMaxHashTableBits;
} // end namespace snappy
#endif // THIRD_PARTY_SNAPPY_SNAPPY_H__

File diff suppressed because it is too large Load Diff

1
vendor/github.com/cockroachdb/c-snappy/snappy-c.cc generated vendored Symbolic link
View File

@ -0,0 +1 @@
internal/snappy-c.cc

View File

@ -0,0 +1 @@
internal/snappy-sinksource.cc

View File

@ -0,0 +1 @@
internal/snappy-stubs-internal.cc

1
vendor/github.com/cockroachdb/c-snappy/snappy.cc generated vendored Symbolic link
View File

@ -0,0 +1 @@
internal/snappy.cc

68
vendor/github.com/cockroachdb/c-snappy/snappy.go generated vendored Normal file
View File

@ -0,0 +1,68 @@
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package snappy implements the snappy block-based compression format.
// It aims for very high speeds and reasonable compression.
//
// The C++ snappy implementation is at http://code.google.com/p/snappy/
package snappy
import (
"hash/crc32"
)
/*
Each encoded block begins with the varint-encoded length of the decoded data,
followed by a sequence of chunks. Chunks begin and end on byte boundaries. The
first byte of each chunk is broken into its 2 least and 6 most significant bits
called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag.
Zero means a literal tag. All other values mean a copy tag.
For literal tags:
- If m < 60, the next 1 + m bytes are literal bytes.
- Otherwise, let n be the little-endian unsigned integer denoted by the next
m - 59 bytes. The next 1 + n bytes after that are literal bytes.
For copy tags, length bytes are copied from offset bytes ago, in the style of
Lempel-Ziv compression algorithms. In particular:
- For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12).
The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10
of the offset. The next byte is bits 0-7 of the offset.
- For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65).
The length is 1 + m. The offset is the little-endian unsigned integer
denoted by the next 2 bytes.
- For l == 3, this tag is a legacy format that is no longer supported.
*/
const (
tagLiteral = 0x00
tagCopy1 = 0x01
tagCopy2 = 0x02
tagCopy4 = 0x03
)
const (
checksumSize = 4
chunkHeaderSize = 4
magicChunk = "\xff\x06\x00\x00" + magicBody
magicBody = "sNaPpY"
// https://code.google.com/p/snappy/source/browse/trunk/framing_format.txt says
// that "the uncompressed data in a chunk must be no longer than 65536 bytes".
maxUncompressedChunkLen = 65536
)
const (
chunkTypeCompressedData = 0x00
chunkTypeUncompressedData = 0x01
chunkTypePadding = 0xfe
chunkTypeStreamIdentifier = 0xff
)
var crcTable = crc32.MakeTable(crc32.Castagnoli)
// crc implements the checksum specified in section 3 of
// https://code.google.com/p/snappy/source/browse/trunk/framing_format.txt
func crc(b []byte) uint32 {
c := crc32.Update(0, crcTable, b)
return uint32(c>>15|c<<17) + 0xa282ead8
}

View File

@ -24,6 +24,8 @@ import (
"sync"
"time"
"github.com/coreos/etcd/pkg/compress"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
@ -122,6 +124,29 @@ func (c *Client) Dial(endpoint string) (*grpc.ClientConn, error) {
opts = append(opts, grpc.WithInsecure())
}
switch c.cfg.CompressType {
case compress.CGzip:
opts = append(opts,
grpc.WithCompressor(compress.NewCGzipCompressor()),
grpc.WithDecompressor(compress.NewCGzipDecompressor()))
case compress.CGzipLv2:
opts = append(opts,
grpc.WithCompressor(compress.NewCGzipLv2Compressor()),
grpc.WithDecompressor(compress.NewCGzipLv2Decompressor()))
case compress.Gzip:
opts = append(opts,
grpc.WithCompressor(compress.NewGzipCompressor()),
grpc.WithDecompressor(compress.NewGzipDecompressor()))
case compress.Snappy:
opts = append(opts,
grpc.WithCompressor(compress.NewSnappyCompressor()),
grpc.WithDecompressor(compress.NewSnappyDecompressor()))
case compress.SnappyCpp:
opts = append(opts,
grpc.WithCompressor(compress.NewSnappyCppCompressor()),
grpc.WithDecompressor(compress.NewSnappyCppDecompressor()))
}
proto := "tcp"
if url, uerr := url.Parse(endpoint); uerr == nil && url.Scheme == "unix" {
proto = "unix"

View File

@ -20,6 +20,7 @@ import (
"io/ioutil"
"time"
"github.com/coreos/etcd/pkg/compress"
"github.com/coreos/etcd/pkg/tlsutil"
"github.com/ghodss/yaml"
"google.golang.org/grpc"
@ -43,6 +44,8 @@ type Config struct {
// Logger is the logger used by client library.
Logger Logger
CompressType compress.Type
}
type yamlConfig struct {

300
vendor/github.com/coreos/etcd/pkg/compress/compress.go generated vendored Normal file
View File

@ -0,0 +1,300 @@
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package compress
import (
"compress/gzip"
"io"
"io/ioutil"
"net/http"
"strings"
"sync"
snappyCpp "github.com/cockroachdb/c-snappy"
"github.com/golang/snappy"
"github.com/youtube/vitess/go/cgzip"
)
type Type int
const (
NoCompress Type = iota
CGzip
CGzipLv2
Gzip
Snappy
SnappyCpp
)
var (
headers = [...]string{
"",
"cgzip",
"cgzip-lv2",
"gzip",
"snappy",
"snappy-cpp",
}
// re-use between goroutines
gzipWriterPool sync.Pool
gzipReaderPool sync.Pool
snappyWriterPool sync.Pool
snappyReaderPool sync.Pool
snappyCppWriterPool sync.Pool
snappyCppReaderPool sync.Pool
)
func (ct Type) String() string {
return headers[ct]
}
func ParseType(opt string) Type {
switch strings.ToLower(opt) {
case "cgzip":
return CGzip
case "cgzip-lv2":
return CGzipLv2
case "gzip":
return Gzip
case "snappy":
return Snappy
case "snappy-cpp":
return SnappyCpp
default:
return NoCompress
}
}
func NewRequest(req *http.Request, comp Type) *http.Request {
req.Header.Set("Accept-Encoding", headers[comp])
return req
}
type ResponseWriter struct {
rw http.ResponseWriter
writer io.Writer
cgzipWriter *cgzip.Writer
gzipWriter *gzip.Writer
snappyWriter *snappy.Writer
snappyCppWriter *snappyCpp.Writer
}
// NewResponseWriter returns http.ResponseWriter wrapper with compressions.
// If 'Accept-Encoding' header is not specified, it defaults to regular http.ResponseWriter.
func NewResponseWriter(rw http.ResponseWriter, req *http.Request) *ResponseWriter {
rw.Header().Set("Vary", "Accept-Encoding")
rw.Header().Set("Cache-Control", "no-cache") // disable response caching
crw := &ResponseWriter{}
crw.rw = rw
switch req.Header.Get("Accept-Encoding") {
case "cgzip": // TODO: optimize based on level, buffer size
rw.Header().Set("Content-Encoding", "cgzip")
crw.cgzipWriter, _ = cgzip.NewWriterLevelBuffer(rw, cgzip.Z_BEST_SPEED, cgzip.DEFAULT_COMPRESSED_BUFFER_SIZE)
crw.writer = crw.cgzipWriter
case "cgzip-lv2":
rw.Header().Set("Content-Encoding", "cgzip-lv2")
crw.cgzipWriter, _ = cgzip.NewWriterLevelBuffer(rw, 2, cgzip.DEFAULT_COMPRESSED_BUFFER_SIZE)
crw.writer = crw.cgzipWriter
case "gzip":
rw.Header().Set("Content-Encoding", "gzip")
if wp := gzipWriterPool.Get(); wp != nil {
gzipWriter := wp.(*gzip.Writer)
gzipWriter.Reset(rw)
crw.gzipWriter = gzipWriter
} else {
crw.gzipWriter = gzip.NewWriter(rw)
}
crw.writer = crw.gzipWriter
case "snappy":
rw.Header().Set("Content-Encoding", "snappy")
if wp := snappyWriterPool.Get(); wp != nil {
snappyWriter := wp.(*snappy.Writer)
snappyWriter.Reset(rw)
crw.snappyWriter = snappyWriter
} else {
crw.snappyWriter = snappy.NewBufferedWriter(rw)
}
crw.writer = crw.snappyWriter
case "snappy-cpp":
rw.Header().Set("Content-Encoding", "snappy-cpp")
if wp := snappyCppWriterPool.Get(); wp != nil {
snappyCppWriter := wp.(*snappyCpp.Writer)
snappyCppWriter.Reset(rw)
crw.snappyCppWriter = snappyCppWriter
} else {
crw.snappyCppWriter = snappyCpp.NewWriter(rw)
}
crw.writer = crw.snappyCppWriter
default:
// default to plain-text
crw.writer = rw
}
return crw
}
// Header satisfies http.ResponseWriter interface.
func (crw *ResponseWriter) Header() http.Header {
return crw.rw.Header()
}
// Write satisfies io.Writer and http.ResponseWriter interfaces.
func (crw *ResponseWriter) Write(b []byte) (int, error) {
return crw.writer.Write(b)
}
// WriteHeader satisfies http.ResponseWriter interface.
func (crw *ResponseWriter) WriteHeader(status int) {
crw.rw.WriteHeader(status)
}
// Close closes ResponseWriter putting writers back into pool.
func (crw *ResponseWriter) Close() {
switch {
case crw.cgzipWriter != nil:
crw.cgzipWriter.Close()
crw.cgzipWriter = nil
case crw.gzipWriter != nil:
crw.gzipWriter.Close()
gzipWriterPool.Put(crw.gzipWriter)
crw.gzipWriter = nil
case crw.snappyWriter != nil:
crw.snappyWriter.Close()
snappyWriterPool.Put(crw.snappyWriter)
crw.snappyWriter = nil
case crw.snappyCppWriter != nil:
snappyCppWriterPool.Put(crw.snappyCppWriter)
crw.snappyCppWriter = nil
default:
crw.writer = nil
}
}
// NewHandler wraps a http.Handler to support compressions.
func NewHandler(h http.Handler) http.Handler {
return http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
respWriter := NewResponseWriter(rw, req)
defer respWriter.Close()
h.ServeHTTP(respWriter, req)
})
}
type ResponseReader struct {
resp *http.Response
reader io.Reader
cgzipReadCloser io.ReadCloser
gzipReader *gzip.Reader
snappyReader *snappy.Reader
snappyCppReader *snappyCpp.Reader
}
// NewResponseReader returns http.Response wrapper with compression reader.
func NewResponseReader(resp *http.Response) *ResponseReader {
crd := &ResponseReader{}
crd.resp = resp
switch resp.Header.Get("Content-Encoding") {
case "cgzip", "cgzip-lv2": // TODO: optimize based on buffer size
crd.cgzipReadCloser, _ = cgzip.NewReaderBuffer(resp.Body, cgzip.DEFAULT_COMPRESSED_BUFFER_SIZE)
crd.reader = crd.cgzipReadCloser
case "gzip":
if rp := gzipReaderPool.Get(); rp != nil {
gzipReader := rp.(*gzip.Reader)
gzipReader.Reset(resp.Body)
crd.gzipReader = gzipReader
} else {
crd.gzipReader, _ = gzip.NewReader(resp.Body)
}
crd.reader = crd.gzipReader
case "snappy":
if rp := snappyReaderPool.Get(); rp != nil {
snappyReader := rp.(*snappy.Reader)
snappyReader.Reset(resp.Body)
crd.snappyReader = snappyReader
} else {
crd.snappyReader = snappy.NewReader(resp.Body)
}
crd.reader = crd.snappyReader
case "snappy-cpp":
if rp := snappyCppReaderPool.Get(); rp != nil {
snappyCppReader := rp.(*snappyCpp.Reader)
snappyCppReader.Reset(resp.Body)
crd.snappyCppReader = snappyCppReader
} else {
crd.snappyCppReader = snappyCpp.NewReader(resp.Body)
}
crd.reader = crd.snappyCppReader
default:
// default to plain-text
crd.reader = resp.Body
}
return crd
}
func (crd *ResponseReader) Read(p []byte) (int, error) {
return crd.reader.Read(p)
}
func (crd *ResponseReader) Close() {
switch {
case crd.cgzipReadCloser != nil:
crd.cgzipReadCloser.Close()
crd.cgzipReadCloser = nil
case crd.gzipReader != nil:
crd.gzipReader.Close()
gzipReaderPool.Put(crd.gzipReader)
crd.gzipReader = nil
case crd.snappyReader != nil:
snappyReaderPool.Put(crd.snappyReader)
crd.snappyReader = nil
case crd.snappyCppReader != nil:
snappyCppReaderPool.Put(crd.snappyCppReader)
crd.snappyCppReader = nil
default:
crd.reader = nil
}
// drains http.Response.Body until it hits EOF
// and closes it. This prevents TCP/TLS connections from closing,
// therefore available for reuse.
io.Copy(ioutil.Discard, crd.resp.Body)
crd.resp.Body.Close()
}

16
vendor/github.com/coreos/etcd/pkg/compress/doc.go generated vendored Normal file
View File

@ -0,0 +1,16 @@
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package compress wraps compress algorithms.
package compress

59
vendor/github.com/coreos/etcd/pkg/compress/grpc.go generated vendored Normal file
View File

@ -0,0 +1,59 @@
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package compress
import "io"
type Compressor interface {
Do(w io.Writer, p []byte) error
Type() string
}
func NewCompressor(ct Type) Compressor {
switch ct {
case CGzip:
return NewCGzipCompressor()
case CGzipLv2:
return NewCGzipLv2Compressor()
case Gzip:
return NewGzipCompressor()
case Snappy:
return NewSnappyCompressor()
case SnappyCpp:
return NewSnappyCppCompressor()
}
return nil
}
type Decompressor interface {
Do(r io.Reader) ([]byte, error)
Type() string
}
func NewDecompressor(ct Type) Decompressor {
switch ct {
case CGzip:
return NewCGzipDecompressor()
case CGzipLv2:
return NewCGzipLv2Decompressor()
case Gzip:
return NewGzipDecompressor()
case Snappy:
return NewSnappyDecompressor()
case SnappyCpp:
return NewSnappyCppDecompressor()
}
return nil
}

View File

@ -0,0 +1,106 @@
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package compress
import (
"io"
"io/ioutil"
"github.com/youtube/vitess/go/cgzip"
)
// NewCGzipCompressor creates a Compressor based on github.com/youtube/vitess/go/cgzip.
func NewCGzipCompressor() *CGzipCompressor {
return &CGzipCompressor{}
}
type CGzipCompressor struct{}
func (c *CGzipCompressor) Do(w io.Writer, p []byte) error {
writer, err := cgzip.NewWriterLevelBuffer(w, cgzip.Z_BEST_SPEED, cgzip.DEFAULT_COMPRESSED_BUFFER_SIZE)
if err != nil {
return err
}
if _, err := writer.Write(p); err != nil {
return err
}
return writer.Close()
}
func (c *CGzipCompressor) Type() string {
return headers[CGzip]
}
// NewCGzipDecompressor creates a Decompressor based on github.com/youtube/vitess/go/cgzip.
func NewCGzipDecompressor() *CGzipDecompressor {
return &CGzipDecompressor{}
}
type CGzipDecompressor struct{}
func (c *CGzipDecompressor) Do(r io.Reader) ([]byte, error) {
readCloser, err := cgzip.NewReaderBuffer(r, cgzip.DEFAULT_COMPRESSED_BUFFER_SIZE)
if err != nil {
return nil, err
}
defer readCloser.Close()
return ioutil.ReadAll(readCloser)
}
func (c *CGzipDecompressor) Type() string {
return headers[CGzip]
}
// NewCGzipLv2Compressor creates a Compressor based on github.com/youtube/vitess/go/cgzip.
func NewCGzipLv2Compressor() *CGzipLv2Compressor {
return &CGzipLv2Compressor{}
}
type CGzipLv2Compressor struct{}
func (c *CGzipLv2Compressor) Do(w io.Writer, p []byte) error {
writer, err := cgzip.NewWriterLevelBuffer(w, 2, cgzip.DEFAULT_COMPRESSED_BUFFER_SIZE)
if err != nil {
return err
}
if _, err := writer.Write(p); err != nil {
return err
}
return writer.Close()
}
func (c *CGzipLv2Compressor) Type() string {
return headers[CGzipLv2]
}
// NewCGzipLv2Decompressor creates a Decompressor based on github.com/youtube/vitess/go/cgzip.
func NewCGzipLv2Decompressor() *CGzipLv2Decompressor {
return &CGzipLv2Decompressor{}
}
type CGzipLv2Decompressor struct{}
func (c *CGzipLv2Decompressor) Do(r io.Reader) ([]byte, error) {
readCloser, err := cgzip.NewReaderBuffer(r, cgzip.DEFAULT_COMPRESSED_BUFFER_SIZE)
if err != nil {
return nil, err
}
defer readCloser.Close()
return ioutil.ReadAll(readCloser)
}
func (c *CGzipLv2Decompressor) Type() string {
return headers[CGzipLv2]
}

View File

@ -0,0 +1,78 @@
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package compress
import (
"compress/gzip"
"io"
"io/ioutil"
)
// NewGzipCompressor creates a Compressor based on gzip.
func NewGzipCompressor() *GzipCompressor {
return &GzipCompressor{}
}
type GzipCompressor struct{}
func (c *GzipCompressor) Do(w io.Writer, p []byte) error {
var writer *gzip.Writer
if wp := gzipWriterPool.Get(); wp != nil {
writer = wp.(*gzip.Writer)
writer.Reset(w)
} else {
writer = gzip.NewWriter(w)
}
if _, err := writer.Write(p); err != nil {
return err
}
err := writer.Close()
gzipWriterPool.Put(writer)
return err
}
func (c *GzipCompressor) Type() string {
return headers[Gzip]
}
// NewGzipDecompressor creates a Compressor based on gzip.
func NewGzipDecompressor() *GzipDecompressor {
return &GzipDecompressor{}
}
type GzipDecompressor struct{}
func (c *GzipDecompressor) Do(r io.Reader) ([]byte, error) {
var reader *gzip.Reader
if wp := gzipReaderPool.Get(); wp != nil {
reader = wp.(*gzip.Reader)
reader.Reset(r)
} else {
var err error
reader, err = gzip.NewReader(r)
if err != nil {
return nil, err
}
}
defer func() {
reader.Close()
gzipReaderPool.Put(reader)
}()
return ioutil.ReadAll(reader)
}
func (c *GzipDecompressor) Type() string {
return headers[Gzip]
}

View File

@ -0,0 +1,122 @@
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package compress
import (
"io"
"io/ioutil"
snappyCpp "github.com/cockroachdb/c-snappy"
"github.com/golang/snappy"
)
// NewSnappyCompressor creates a Compressor based on github.com/golang/snappy.
func NewSnappyCompressor() *SnappyCompressor {
return &SnappyCompressor{}
}
type SnappyCompressor struct{}
func (c *SnappyCompressor) Do(w io.Writer, p []byte) error {
var writer *snappy.Writer
if wp := snappyWriterPool.Get(); wp != nil {
writer = wp.(*snappy.Writer)
writer.Reset(w)
} else {
writer = snappy.NewBufferedWriter(w)
}
if _, err := writer.Write(p); err != nil {
return err
}
err := writer.Close()
snappyWriterPool.Put(writer)
return err
}
func (c *SnappyCompressor) Type() string {
return headers[Snappy]
}
// NewSnappyDecompressor creates a Decompressor based on github.com/golang/snappy.
func NewSnappyDecompressor() *SnappyDecompressor {
return &SnappyDecompressor{}
}
type SnappyDecompressor struct{}
func (c *SnappyDecompressor) Do(r io.Reader) ([]byte, error) {
var reader *snappy.Reader
if wp := snappyReaderPool.Get(); wp != nil {
reader = wp.(*snappy.Reader)
reader.Reset(r)
} else {
reader = snappy.NewReader(r)
}
defer snappyReaderPool.Put(reader)
return ioutil.ReadAll(reader)
}
func (c *SnappyDecompressor) Type() string {
return headers[Snappy]
}
// NewSnappyCppCompressor creates a Compressor based on github.com/cockroachdb/c-snappy.
func NewSnappyCppCompressor() *SnappyCppCompressor {
return &SnappyCppCompressor{}
}
type SnappyCppCompressor struct{}
func (c *SnappyCppCompressor) Do(w io.Writer, p []byte) error {
var writer *snappyCpp.Writer
if wp := snappyCppWriterPool.Get(); wp != nil {
writer = wp.(*snappyCpp.Writer)
writer.Reset(w)
} else {
writer = snappyCpp.NewWriter(w)
}
if _, err := writer.Write(p); err != nil {
return err
}
snappyWriterPool.Put(writer)
return nil
}
func (c *SnappyCppCompressor) Type() string {
return headers[SnappyCpp]
}
// NewSnappyCppDecompressor creates a Decompressor based on github.com/cockroachdb/c-snappy.
func NewSnappyCppDecompressor() *SnappyCppDecompressor {
return &SnappyCppDecompressor{}
}
type SnappyCppDecompressor struct{}
func (c *SnappyCppDecompressor) Do(r io.Reader) ([]byte, error) {
var reader *snappyCpp.Reader
if wp := snappyCppReaderPool.Get(); wp != nil {
reader = wp.(*snappyCpp.Reader)
reader.Reset(r)
} else {
reader = snappyCpp.NewReader(r)
}
defer snappyCppReaderPool.Put(reader)
return ioutil.ReadAll(reader)
}
func (c *SnappyCppDecompressor) Type() string {
return headers[SnappyCpp]
}

View File

@ -22,12 +22,16 @@ var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
const _ = proto.GoGoProtoPackageIsVersion1
var E_GoprotoEnumPrefix = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.EnumOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 62001,
Name: "gogoproto.goproto_enum_prefix",
Tag: "varint,62001,opt,name=goproto_enum_prefix",
Tag: "varint,62001,opt,name=goproto_enum_prefix,json=goprotoEnumPrefix",
}
var E_GoprotoEnumStringer = &proto.ExtensionDesc{
@ -35,7 +39,7 @@ var E_GoprotoEnumStringer = &proto.ExtensionDesc{
ExtensionType: (*bool)(nil),
Field: 62021,
Name: "gogoproto.goproto_enum_stringer",
Tag: "varint,62021,opt,name=goproto_enum_stringer",
Tag: "varint,62021,opt,name=goproto_enum_stringer,json=goprotoEnumStringer",
}
var E_EnumStringer = &proto.ExtensionDesc{
@ -43,7 +47,23 @@ var E_EnumStringer = &proto.ExtensionDesc{
ExtensionType: (*bool)(nil),
Field: 62022,
Name: "gogoproto.enum_stringer",
Tag: "varint,62022,opt,name=enum_stringer",
Tag: "varint,62022,opt,name=enum_stringer,json=enumStringer",
}
var E_EnumCustomname = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.EnumOptions)(nil),
ExtensionType: (*string)(nil),
Field: 62023,
Name: "gogoproto.enum_customname",
Tag: "bytes,62023,opt,name=enum_customname,json=enumCustomname",
}
var E_EnumvalueCustomname = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.EnumValueOptions)(nil),
ExtensionType: (*string)(nil),
Field: 66001,
Name: "gogoproto.enumvalue_customname",
Tag: "bytes,66001,opt,name=enumvalue_customname,json=enumvalueCustomname",
}
var E_GoprotoGettersAll = &proto.ExtensionDesc{
@ -51,7 +71,7 @@ var E_GoprotoGettersAll = &proto.ExtensionDesc{
ExtensionType: (*bool)(nil),
Field: 63001,
Name: "gogoproto.goproto_getters_all",
Tag: "varint,63001,opt,name=goproto_getters_all",
Tag: "varint,63001,opt,name=goproto_getters_all,json=goprotoGettersAll",
}
var E_GoprotoEnumPrefixAll = &proto.ExtensionDesc{
@ -59,7 +79,7 @@ var E_GoprotoEnumPrefixAll = &proto.ExtensionDesc{
ExtensionType: (*bool)(nil),
Field: 63002,
Name: "gogoproto.goproto_enum_prefix_all",
Tag: "varint,63002,opt,name=goproto_enum_prefix_all",
Tag: "varint,63002,opt,name=goproto_enum_prefix_all,json=goprotoEnumPrefixAll",
}
var E_GoprotoStringerAll = &proto.ExtensionDesc{
@ -67,7 +87,7 @@ var E_GoprotoStringerAll = &proto.ExtensionDesc{
ExtensionType: (*bool)(nil),
Field: 63003,
Name: "gogoproto.goproto_stringer_all",
Tag: "varint,63003,opt,name=goproto_stringer_all",
Tag: "varint,63003,opt,name=goproto_stringer_all,json=goprotoStringerAll",
}
var E_VerboseEqualAll = &proto.ExtensionDesc{
@ -75,7 +95,7 @@ var E_VerboseEqualAll = &proto.ExtensionDesc{
ExtensionType: (*bool)(nil),
Field: 63004,
Name: "gogoproto.verbose_equal_all",
Tag: "varint,63004,opt,name=verbose_equal_all",
Tag: "varint,63004,opt,name=verbose_equal_all,json=verboseEqualAll",
}
var E_FaceAll = &proto.ExtensionDesc{
@ -83,7 +103,7 @@ var E_FaceAll = &proto.ExtensionDesc{
ExtensionType: (*bool)(nil),
Field: 63005,
Name: "gogoproto.face_all",
Tag: "varint,63005,opt,name=face_all",
Tag: "varint,63005,opt,name=face_all,json=faceAll",
}
var E_GostringAll = &proto.ExtensionDesc{
@ -91,7 +111,7 @@ var E_GostringAll = &proto.ExtensionDesc{
ExtensionType: (*bool)(nil),
Field: 63006,
Name: "gogoproto.gostring_all",
Tag: "varint,63006,opt,name=gostring_all",
Tag: "varint,63006,opt,name=gostring_all,json=gostringAll",
}
var E_PopulateAll = &proto.ExtensionDesc{
@ -99,7 +119,7 @@ var E_PopulateAll = &proto.ExtensionDesc{
ExtensionType: (*bool)(nil),
Field: 63007,
Name: "gogoproto.populate_all",
Tag: "varint,63007,opt,name=populate_all",
Tag: "varint,63007,opt,name=populate_all,json=populateAll",
}
var E_StringerAll = &proto.ExtensionDesc{
@ -107,7 +127,7 @@ var E_StringerAll = &proto.ExtensionDesc{
ExtensionType: (*bool)(nil),
Field: 63008,
Name: "gogoproto.stringer_all",
Tag: "varint,63008,opt,name=stringer_all",
Tag: "varint,63008,opt,name=stringer_all,json=stringerAll",
}
var E_OnlyoneAll = &proto.ExtensionDesc{
@ -115,7 +135,7 @@ var E_OnlyoneAll = &proto.ExtensionDesc{
ExtensionType: (*bool)(nil),
Field: 63009,
Name: "gogoproto.onlyone_all",
Tag: "varint,63009,opt,name=onlyone_all",
Tag: "varint,63009,opt,name=onlyone_all,json=onlyoneAll",
}
var E_EqualAll = &proto.ExtensionDesc{
@ -123,7 +143,7 @@ var E_EqualAll = &proto.ExtensionDesc{
ExtensionType: (*bool)(nil),
Field: 63013,
Name: "gogoproto.equal_all",
Tag: "varint,63013,opt,name=equal_all",
Tag: "varint,63013,opt,name=equal_all,json=equalAll",
}
var E_DescriptionAll = &proto.ExtensionDesc{
@ -131,7 +151,7 @@ var E_DescriptionAll = &proto.ExtensionDesc{
ExtensionType: (*bool)(nil),
Field: 63014,
Name: "gogoproto.description_all",
Tag: "varint,63014,opt,name=description_all",
Tag: "varint,63014,opt,name=description_all,json=descriptionAll",
}
var E_TestgenAll = &proto.ExtensionDesc{
@ -139,7 +159,7 @@ var E_TestgenAll = &proto.ExtensionDesc{
ExtensionType: (*bool)(nil),
Field: 63015,
Name: "gogoproto.testgen_all",
Tag: "varint,63015,opt,name=testgen_all",
Tag: "varint,63015,opt,name=testgen_all,json=testgenAll",
}
var E_BenchgenAll = &proto.ExtensionDesc{
@ -147,7 +167,7 @@ var E_BenchgenAll = &proto.ExtensionDesc{
ExtensionType: (*bool)(nil),
Field: 63016,
Name: "gogoproto.benchgen_all",
Tag: "varint,63016,opt,name=benchgen_all",
Tag: "varint,63016,opt,name=benchgen_all,json=benchgenAll",
}
var E_MarshalerAll = &proto.ExtensionDesc{
@ -155,7 +175,7 @@ var E_MarshalerAll = &proto.ExtensionDesc{
ExtensionType: (*bool)(nil),
Field: 63017,
Name: "gogoproto.marshaler_all",
Tag: "varint,63017,opt,name=marshaler_all",
Tag: "varint,63017,opt,name=marshaler_all,json=marshalerAll",
}
var E_UnmarshalerAll = &proto.ExtensionDesc{
@ -163,7 +183,15 @@ var E_UnmarshalerAll = &proto.ExtensionDesc{
ExtensionType: (*bool)(nil),
Field: 63018,
Name: "gogoproto.unmarshaler_all",
Tag: "varint,63018,opt,name=unmarshaler_all",
Tag: "varint,63018,opt,name=unmarshaler_all,json=unmarshalerAll",
}
var E_StableMarshalerAll = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.FileOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 63019,
Name: "gogoproto.stable_marshaler_all",
Tag: "varint,63019,opt,name=stable_marshaler_all,json=stableMarshalerAll",
}
var E_SizerAll = &proto.ExtensionDesc{
@ -171,7 +199,7 @@ var E_SizerAll = &proto.ExtensionDesc{
ExtensionType: (*bool)(nil),
Field: 63020,
Name: "gogoproto.sizer_all",
Tag: "varint,63020,opt,name=sizer_all",
Tag: "varint,63020,opt,name=sizer_all,json=sizerAll",
}
var E_GoprotoEnumStringerAll = &proto.ExtensionDesc{
@ -179,7 +207,7 @@ var E_GoprotoEnumStringerAll = &proto.ExtensionDesc{
ExtensionType: (*bool)(nil),
Field: 63021,
Name: "gogoproto.goproto_enum_stringer_all",
Tag: "varint,63021,opt,name=goproto_enum_stringer_all",
Tag: "varint,63021,opt,name=goproto_enum_stringer_all,json=goprotoEnumStringerAll",
}
var E_EnumStringerAll = &proto.ExtensionDesc{
@ -187,7 +215,7 @@ var E_EnumStringerAll = &proto.ExtensionDesc{
ExtensionType: (*bool)(nil),
Field: 63022,
Name: "gogoproto.enum_stringer_all",
Tag: "varint,63022,opt,name=enum_stringer_all",
Tag: "varint,63022,opt,name=enum_stringer_all,json=enumStringerAll",
}
var E_UnsafeMarshalerAll = &proto.ExtensionDesc{
@ -195,7 +223,7 @@ var E_UnsafeMarshalerAll = &proto.ExtensionDesc{
ExtensionType: (*bool)(nil),
Field: 63023,
Name: "gogoproto.unsafe_marshaler_all",
Tag: "varint,63023,opt,name=unsafe_marshaler_all",
Tag: "varint,63023,opt,name=unsafe_marshaler_all,json=unsafeMarshalerAll",
}
var E_UnsafeUnmarshalerAll = &proto.ExtensionDesc{
@ -203,7 +231,7 @@ var E_UnsafeUnmarshalerAll = &proto.ExtensionDesc{
ExtensionType: (*bool)(nil),
Field: 63024,
Name: "gogoproto.unsafe_unmarshaler_all",
Tag: "varint,63024,opt,name=unsafe_unmarshaler_all",
Tag: "varint,63024,opt,name=unsafe_unmarshaler_all,json=unsafeUnmarshalerAll",
}
var E_GoprotoExtensionsMapAll = &proto.ExtensionDesc{
@ -211,7 +239,7 @@ var E_GoprotoExtensionsMapAll = &proto.ExtensionDesc{
ExtensionType: (*bool)(nil),
Field: 63025,
Name: "gogoproto.goproto_extensions_map_all",
Tag: "varint,63025,opt,name=goproto_extensions_map_all",
Tag: "varint,63025,opt,name=goproto_extensions_map_all,json=goprotoExtensionsMapAll",
}
var E_GoprotoUnrecognizedAll = &proto.ExtensionDesc{
@ -219,7 +247,7 @@ var E_GoprotoUnrecognizedAll = &proto.ExtensionDesc{
ExtensionType: (*bool)(nil),
Field: 63026,
Name: "gogoproto.goproto_unrecognized_all",
Tag: "varint,63026,opt,name=goproto_unrecognized_all",
Tag: "varint,63026,opt,name=goproto_unrecognized_all,json=goprotoUnrecognizedAll",
}
var E_GogoprotoImport = &proto.ExtensionDesc{
@ -227,7 +255,15 @@ var E_GogoprotoImport = &proto.ExtensionDesc{
ExtensionType: (*bool)(nil),
Field: 63027,
Name: "gogoproto.gogoproto_import",
Tag: "varint,63027,opt,name=gogoproto_import",
Tag: "varint,63027,opt,name=gogoproto_import,json=gogoprotoImport",
}
var E_ProtosizerAll = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.FileOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 63028,
Name: "gogoproto.protosizer_all",
Tag: "varint,63028,opt,name=protosizer_all,json=protosizerAll",
}
var E_GoprotoGetters = &proto.ExtensionDesc{
@ -235,7 +271,7 @@ var E_GoprotoGetters = &proto.ExtensionDesc{
ExtensionType: (*bool)(nil),
Field: 64001,
Name: "gogoproto.goproto_getters",
Tag: "varint,64001,opt,name=goproto_getters",
Tag: "varint,64001,opt,name=goproto_getters,json=goprotoGetters",
}
var E_GoprotoStringer = &proto.ExtensionDesc{
@ -243,7 +279,7 @@ var E_GoprotoStringer = &proto.ExtensionDesc{
ExtensionType: (*bool)(nil),
Field: 64003,
Name: "gogoproto.goproto_stringer",
Tag: "varint,64003,opt,name=goproto_stringer",
Tag: "varint,64003,opt,name=goproto_stringer,json=goprotoStringer",
}
var E_VerboseEqual = &proto.ExtensionDesc{
@ -251,7 +287,7 @@ var E_VerboseEqual = &proto.ExtensionDesc{
ExtensionType: (*bool)(nil),
Field: 64004,
Name: "gogoproto.verbose_equal",
Tag: "varint,64004,opt,name=verbose_equal",
Tag: "varint,64004,opt,name=verbose_equal,json=verboseEqual",
}
var E_Face = &proto.ExtensionDesc{
@ -342,6 +378,14 @@ var E_Unmarshaler = &proto.ExtensionDesc{
Tag: "varint,64018,opt,name=unmarshaler",
}
var E_StableMarshaler = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.MessageOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 64019,
Name: "gogoproto.stable_marshaler",
Tag: "varint,64019,opt,name=stable_marshaler,json=stableMarshaler",
}
var E_Sizer = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.MessageOptions)(nil),
ExtensionType: (*bool)(nil),
@ -355,7 +399,7 @@ var E_UnsafeMarshaler = &proto.ExtensionDesc{
ExtensionType: (*bool)(nil),
Field: 64023,
Name: "gogoproto.unsafe_marshaler",
Tag: "varint,64023,opt,name=unsafe_marshaler",
Tag: "varint,64023,opt,name=unsafe_marshaler,json=unsafeMarshaler",
}
var E_UnsafeUnmarshaler = &proto.ExtensionDesc{
@ -363,7 +407,7 @@ var E_UnsafeUnmarshaler = &proto.ExtensionDesc{
ExtensionType: (*bool)(nil),
Field: 64024,
Name: "gogoproto.unsafe_unmarshaler",
Tag: "varint,64024,opt,name=unsafe_unmarshaler",
Tag: "varint,64024,opt,name=unsafe_unmarshaler,json=unsafeUnmarshaler",
}
var E_GoprotoExtensionsMap = &proto.ExtensionDesc{
@ -371,7 +415,7 @@ var E_GoprotoExtensionsMap = &proto.ExtensionDesc{
ExtensionType: (*bool)(nil),
Field: 64025,
Name: "gogoproto.goproto_extensions_map",
Tag: "varint,64025,opt,name=goproto_extensions_map",
Tag: "varint,64025,opt,name=goproto_extensions_map,json=goprotoExtensionsMap",
}
var E_GoprotoUnrecognized = &proto.ExtensionDesc{
@ -379,7 +423,15 @@ var E_GoprotoUnrecognized = &proto.ExtensionDesc{
ExtensionType: (*bool)(nil),
Field: 64026,
Name: "gogoproto.goproto_unrecognized",
Tag: "varint,64026,opt,name=goproto_unrecognized",
Tag: "varint,64026,opt,name=goproto_unrecognized,json=goprotoUnrecognized",
}
var E_Protosizer = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.MessageOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 64028,
Name: "gogoproto.protosizer",
Tag: "varint,64028,opt,name=protosizer",
}
var E_Nullable = &proto.ExtensionDesc{
@ -458,6 +510,8 @@ func init() {
proto.RegisterExtension(E_GoprotoEnumPrefix)
proto.RegisterExtension(E_GoprotoEnumStringer)
proto.RegisterExtension(E_EnumStringer)
proto.RegisterExtension(E_EnumCustomname)
proto.RegisterExtension(E_EnumvalueCustomname)
proto.RegisterExtension(E_GoprotoGettersAll)
proto.RegisterExtension(E_GoprotoEnumPrefixAll)
proto.RegisterExtension(E_GoprotoStringerAll)
@ -473,6 +527,7 @@ func init() {
proto.RegisterExtension(E_BenchgenAll)
proto.RegisterExtension(E_MarshalerAll)
proto.RegisterExtension(E_UnmarshalerAll)
proto.RegisterExtension(E_StableMarshalerAll)
proto.RegisterExtension(E_SizerAll)
proto.RegisterExtension(E_GoprotoEnumStringerAll)
proto.RegisterExtension(E_EnumStringerAll)
@ -481,6 +536,7 @@ func init() {
proto.RegisterExtension(E_GoprotoExtensionsMapAll)
proto.RegisterExtension(E_GoprotoUnrecognizedAll)
proto.RegisterExtension(E_GogoprotoImport)
proto.RegisterExtension(E_ProtosizerAll)
proto.RegisterExtension(E_GoprotoGetters)
proto.RegisterExtension(E_GoprotoStringer)
proto.RegisterExtension(E_VerboseEqual)
@ -495,11 +551,13 @@ func init() {
proto.RegisterExtension(E_Benchgen)
proto.RegisterExtension(E_Marshaler)
proto.RegisterExtension(E_Unmarshaler)
proto.RegisterExtension(E_StableMarshaler)
proto.RegisterExtension(E_Sizer)
proto.RegisterExtension(E_UnsafeMarshaler)
proto.RegisterExtension(E_UnsafeUnmarshaler)
proto.RegisterExtension(E_GoprotoExtensionsMap)
proto.RegisterExtension(E_GoprotoUnrecognized)
proto.RegisterExtension(E_Protosizer)
proto.RegisterExtension(E_Nullable)
proto.RegisterExtension(E_Embed)
proto.RegisterExtension(E_Customtype)
@ -510,3 +568,75 @@ func init() {
proto.RegisterExtension(E_Castkey)
proto.RegisterExtension(E_Castvalue)
}
var fileDescriptorGogo = []byte{
// 1073 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x94, 0x97, 0x4b, 0x6f, 0xdc, 0x54,
0x14, 0xc7, 0x85, 0x48, 0x95, 0x99, 0x93, 0x17, 0x99, 0x84, 0x50, 0x2a, 0x10, 0xed, 0x8e, 0x55,
0xba, 0x42, 0xa8, 0xae, 0x10, 0x6a, 0xab, 0x34, 0x2a, 0x22, 0x10, 0x05, 0x52, 0x40, 0x2c, 0x46,
0x9e, 0xc9, 0x8d, 0x3b, 0xe0, 0xf1, 0x35, 0xbe, 0x76, 0xd5, 0xb0, 0x43, 0xe5, 0x21, 0x84, 0x78,
0x23, 0x41, 0x4b, 0x79, 0x2d, 0x78, 0x3f, 0x0b, 0x7c, 0x00, 0x36, 0xc0, 0x9a, 0xef, 0xc0, 0x06,
0x28, 0x2f, 0x29, 0xbb, 0x6c, 0x7a, 0x8f, 0x7d, 0x8e, 0xe7, 0xda, 0xb1, 0x74, 0xef, 0xec, 0x9c,
0xcc, 0xfd, 0xfd, 0xe6, 0xfa, 0x1c, 0xdf, 0xf3, 0x1f, 0x03, 0x04, 0x32, 0x90, 0xcb, 0x71, 0x22,
0x53, 0xd9, 0x69, 0xe3, 0x75, 0x7e, 0x79, 0xe8, 0x70, 0x20, 0x65, 0x10, 0x8a, 0xa3, 0xf9, 0x5f,
0xbd, 0x6c, 0xfb, 0xe8, 0x96, 0x50, 0xfd, 0x64, 0x10, 0xa7, 0x32, 0x29, 0x16, 0x7b, 0x0f, 0xc0,
0x02, 0x2d, 0xee, 0x8a, 0x28, 0x1b, 0x76, 0xe3, 0x44, 0x6c, 0x0f, 0x2e, 0x74, 0x6e, 0x5b, 0x2e,
0xc8, 0x65, 0x26, 0x97, 0x57, 0xf4, 0xa7, 0x0f, 0xc6, 0xe9, 0x40, 0x46, 0xea, 0xe0, 0xd5, 0xdf,
0x6f, 0x3c, 0x7c, 0xc3, 0x9d, 0xad, 0x8d, 0x79, 0x42, 0xf1, 0xb3, 0xf5, 0x1c, 0xf4, 0x36, 0xe0,
0xe6, 0x8a, 0x4f, 0xa5, 0xc9, 0x20, 0x0a, 0x44, 0x62, 0x31, 0xfe, 0x4c, 0xc6, 0x05, 0xc3, 0xf8,
0x10, 0xa1, 0xde, 0x29, 0x98, 0x19, 0xc7, 0xf5, 0x0b, 0xb9, 0xa6, 0x85, 0x29, 0x59, 0x85, 0xb9,
0x5c, 0xd2, 0xcf, 0x54, 0x2a, 0x87, 0x91, 0x3f, 0x14, 0x16, 0xcd, 0xaf, 0xb9, 0xa6, 0xbd, 0x31,
0x8b, 0xd8, 0xa9, 0x92, 0xf2, 0xce, 0xc2, 0x22, 0xfe, 0xe7, 0xbc, 0x1f, 0x66, 0xc2, 0xb4, 0x1d,
0x69, 0xb4, 0x9d, 0xc5, 0x65, 0xac, 0xfc, 0xed, 0xe2, 0x44, 0xae, 0x5c, 0x28, 0x05, 0x86, 0xd7,
0xe8, 0x44, 0x20, 0xd2, 0x54, 0x24, 0xaa, 0xeb, 0x87, 0x61, 0xc3, 0x26, 0x4f, 0x0f, 0xc2, 0xd2,
0x78, 0xe9, 0x5a, 0xb5, 0x13, 0xab, 0x05, 0x79, 0x22, 0x0c, 0xbd, 0x4d, 0xb8, 0xa5, 0xa1, 0xb3,
0x0e, 0xce, 0xcb, 0xe4, 0x5c, 0xdc, 0xd7, 0x5d, 0xd4, 0xae, 0x03, 0xff, 0xbf, 0xec, 0x87, 0x83,
0xf3, 0x5d, 0x72, 0x76, 0x88, 0xe5, 0xb6, 0xa0, 0xf1, 0x3e, 0x98, 0x3f, 0x2f, 0x92, 0x9e, 0x54,
0xa2, 0x2b, 0x9e, 0xca, 0xfc, 0xd0, 0x41, 0x77, 0x85, 0x74, 0x73, 0x04, 0xae, 0x20, 0x87, 0xae,
0x63, 0xd0, 0xda, 0xf6, 0xfb, 0xc2, 0x41, 0xf1, 0x1e, 0x29, 0x26, 0x71, 0x3d, 0xa2, 0x27, 0x60,
0x3a, 0x90, 0xc5, 0x2d, 0x39, 0xe0, 0xef, 0x13, 0x3e, 0xc5, 0x0c, 0x29, 0x62, 0x19, 0x67, 0xa1,
0x9f, 0xba, 0xec, 0xe0, 0x03, 0x56, 0x30, 0x43, 0x8a, 0x31, 0xca, 0xfa, 0x21, 0x2b, 0x94, 0x51,
0xcf, 0x7b, 0x61, 0x4a, 0x46, 0xe1, 0x8e, 0x8c, 0x5c, 0x36, 0xf1, 0x11, 0x19, 0x80, 0x10, 0x14,
0x1c, 0x87, 0xb6, 0x6b, 0x23, 0x3e, 0x26, 0xbc, 0x25, 0xb8, 0x03, 0xfa, 0x9c, 0xf1, 0x90, 0xd1,
0x2b, 0x1c, 0x14, 0x9f, 0x90, 0x62, 0xd6, 0xc0, 0xe8, 0x36, 0x52, 0xa1, 0xd2, 0x40, 0xb8, 0x48,
0x3e, 0xe5, 0xdb, 0x20, 0x84, 0x4a, 0xd9, 0x13, 0x51, 0xff, 0x9c, 0x9b, 0xe1, 0x33, 0x2e, 0x25,
0x33, 0xa8, 0xd0, 0x93, 0x67, 0xe8, 0x27, 0xea, 0x9c, 0x1f, 0x3a, 0xb5, 0xe3, 0x73, 0x72, 0x4c,
0x97, 0x10, 0x55, 0x24, 0x8b, 0xc6, 0xd1, 0x7c, 0xc1, 0x15, 0x31, 0x30, 0x3a, 0x7a, 0x2a, 0xf5,
0x7b, 0xa1, 0xe8, 0x8e, 0x63, 0xfb, 0x92, 0x8f, 0x5e, 0xc1, 0xae, 0x99, 0x46, 0xdd, 0x69, 0x35,
0x78, 0xda, 0x49, 0xf3, 0x15, 0x77, 0x3a, 0x07, 0x10, 0x7e, 0x0c, 0x6e, 0x6d, 0x1c, 0xf5, 0x0e,
0xb2, 0xaf, 0x49, 0xb6, 0xd4, 0x30, 0xee, 0x69, 0x24, 0x8c, 0xab, 0xfc, 0x86, 0x47, 0x82, 0xa8,
0xb9, 0x74, 0xd5, 0xb2, 0x48, 0xf9, 0xdb, 0xe3, 0x55, 0xed, 0x5b, 0xae, 0x5a, 0xc1, 0x56, 0xaa,
0xf6, 0x30, 0x2c, 0x91, 0x71, 0xbc, 0xbe, 0x7e, 0xc7, 0x83, 0xb5, 0xa0, 0x37, 0xab, 0xdd, 0x7d,
0x1c, 0x0e, 0x95, 0xe5, 0xbc, 0x90, 0x8a, 0x48, 0x21, 0xa3, 0xf7, 0x1c, 0x3b, 0x98, 0xaf, 0x92,
0x99, 0x27, 0xfe, 0x4a, 0x29, 0x58, 0xf3, 0x63, 0x94, 0x3f, 0x0a, 0x07, 0x59, 0x9e, 0x45, 0x89,
0xe8, 0xcb, 0x20, 0xd2, 0x6d, 0xdc, 0x72, 0x50, 0x7f, 0x5f, 0x6b, 0xd5, 0xa6, 0x81, 0xa3, 0xf9,
0x0c, 0xdc, 0x54, 0xfe, 0xde, 0xe8, 0x0e, 0x86, 0xb1, 0x4c, 0x52, 0x8b, 0xf1, 0x07, 0xee, 0x54,
0xc9, 0x9d, 0xc9, 0x31, 0x6f, 0x05, 0x66, 0xf3, 0x3f, 0x5d, 0x1f, 0xc9, 0x1f, 0x49, 0x34, 0x33,
0xa2, 0x8a, 0x87, 0x67, 0xae, 0x16, 0xa4, 0x9d, 0x3b, 0xf6, 0x79, 0xd6, 0x84, 0x52, 0x7e, 0x50,
0xaa, 0x9e, 0xd9, 0xa5, 0x23, 0x57, 0xcd, 0x51, 0xef, 0x7e, 0xbc, 0xbb, 0x6a, 0xda, 0xd9, 0x65,
0x17, 0x77, 0xcb, 0x1b, 0xac, 0x84, 0x9d, 0x77, 0x1a, 0x66, 0x2a, 0x49, 0x67, 0x57, 0x3d, 0x4b,
0xaa, 0x69, 0x33, 0xe8, 0xbc, 0xbb, 0x60, 0x02, 0x53, 0xcb, 0x8e, 0x3f, 0x47, 0x78, 0xbe, 0xdc,
0xbb, 0x07, 0x5a, 0x9c, 0x56, 0x76, 0xf4, 0x79, 0x42, 0x4b, 0x04, 0x71, 0x4e, 0x2a, 0x3b, 0xfe,
0x02, 0xe3, 0x8c, 0x20, 0xee, 0x5e, 0xc2, 0x9f, 0x5e, 0x9a, 0xa0, 0x69, 0xc3, 0xb5, 0x3b, 0x0e,
0x93, 0x14, 0x51, 0x76, 0xfa, 0x45, 0xfa, 0x72, 0x26, 0xbc, 0xbb, 0xe1, 0x80, 0x63, 0xc1, 0x5f,
0x26, 0xb4, 0x58, 0xaf, 0x03, 0x60, 0xca, 0x88, 0x25, 0x3b, 0xfe, 0x0a, 0xe1, 0x26, 0x85, 0x5b,
0xa7, 0x58, 0xb2, 0x0b, 0x5e, 0xe5, 0xad, 0x13, 0x81, 0x65, 0xe3, 0x44, 0xb2, 0xd3, 0xaf, 0x71,
0xd5, 0x19, 0xd1, 0x29, 0xda, 0x2e, 0xa7, 0x8c, 0x9d, 0x7f, 0x9d, 0xf8, 0x11, 0x83, 0x15, 0x30,
0xa6, 0x9c, 0x5d, 0xf1, 0x06, 0x57, 0xc0, 0xa0, 0xf0, 0x18, 0xd5, 0x93, 0xcb, 0x6e, 0x7a, 0x93,
0x8f, 0x51, 0x2d, 0xb8, 0xb0, 0x9b, 0xf9, 0x61, 0xb7, 0x2b, 0xde, 0xe2, 0x6e, 0xe6, 0xeb, 0x71,
0x1b, 0xf5, 0x28, 0xb0, 0x3b, 0xde, 0xe6, 0x6d, 0xd4, 0x92, 0x40, 0x07, 0x4b, 0x67, 0x7f, 0x0c,
0xd8, 0x7d, 0xef, 0x90, 0x6f, 0x7e, 0x5f, 0x0a, 0x78, 0x8f, 0xc0, 0x52, 0x73, 0x04, 0xd8, 0xad,
0x97, 0x76, 0x6b, 0x3f, 0xda, 0xcd, 0x04, 0xd0, 0x89, 0xb5, 0xd8, 0x34, 0xfe, 0xed, 0xda, 0xcb,
0xbb, 0xd5, 0xf7, 0x32, 0x73, 0xfa, 0xeb, 0x1f, 0x58, 0x30, 0x9a, 0xbc, 0x76, 0xd7, 0x15, 0x72,
0x19, 0x90, 0x3e, 0x1a, 0xad, 0x28, 0x0b, 0x43, 0xec, 0x6f, 0xe7, 0xf6, 0x86, 0x61, 0x2f, 0xc2,
0x2d, 0xc6, 0xff, 0xd8, 0xa3, 0x67, 0x9b, 0x01, 0x3d, 0x06, 0x0f, 0x88, 0x61, 0x4f, 0xdf, 0x86,
0x85, 0xfc, 0x73, 0x8f, 0xcf, 0x34, 0xae, 0xd6, 0x47, 0x02, 0x8a, 0xd7, 0xb6, 0x74, 0x27, 0xb6,
0x7e, 0xeb, 0x5f, 0x7b, 0xc5, 0x5b, 0xa0, 0x81, 0x8c, 0x04, 0xf9, 0x7b, 0x9f, 0x45, 0x70, 0xad,
0x2a, 0xc8, 0x5f, 0xf5, 0x8e, 0xc1, 0xe4, 0x13, 0x4a, 0x46, 0xa9, 0x1f, 0xd8, 0xe8, 0xbf, 0x89,
0xe6, 0xf5, 0x58, 0xb0, 0xa1, 0x4c, 0x84, 0xbe, 0x54, 0x36, 0xf6, 0x1f, 0x62, 0x4b, 0x00, 0xe1,
0xbe, 0xaf, 0x52, 0x97, 0xfb, 0xfe, 0x97, 0x61, 0x06, 0x70, 0xd3, 0x78, 0xfd, 0xa4, 0xd8, 0xb1,
0xb1, 0xff, 0xf1, 0xa6, 0x69, 0xbd, 0x9e, 0x61, 0x6d, 0xbc, 0xcc, 0xdf, 0x78, 0x6d, 0xf0, 0xff,
0x04, 0x8f, 0x88, 0x93, 0x47, 0x60, 0xa1, 0x2f, 0x87, 0x75, 0xec, 0x24, 0xac, 0xca, 0x55, 0xb9,
0x9e, 0x3f, 0x4b, 0xd7, 0x03, 0x00, 0x00, 0xff, 0xff, 0xf4, 0xbc, 0x14, 0xc8, 0x00, 0x11, 0x00,
0x00,
}

View File

@ -36,6 +36,11 @@ extend google.protobuf.EnumOptions {
optional bool goproto_enum_prefix = 62001;
optional bool goproto_enum_stringer = 62021;
optional bool enum_stringer = 62022;
optional string enum_customname = 62023;
}
extend google.protobuf.EnumValueOptions {
optional string enumvalue_customname = 66001;
}
extend google.protobuf.FileOptions {
@ -55,6 +60,7 @@ extend google.protobuf.FileOptions {
optional bool benchgen_all = 63016;
optional bool marshaler_all = 63017;
optional bool unmarshaler_all = 63018;
optional bool stable_marshaler_all = 63019;
optional bool sizer_all = 63020;
@ -67,6 +73,7 @@ extend google.protobuf.FileOptions {
optional bool goproto_extensions_map_all = 63025;
optional bool goproto_unrecognized_all = 63026;
optional bool gogoproto_import = 63027;
optional bool protosizer_all = 63028;
}
extend google.protobuf.MessageOptions {
@ -76,7 +83,7 @@ extend google.protobuf.MessageOptions {
optional bool face = 64005;
optional bool gostring = 64006;
optional bool populate = 64007;
optional bool stringer = 67008;
optional bool stringer = 67008;
optional bool onlyone = 64009;
optional bool equal = 64013;
@ -85,6 +92,7 @@ extend google.protobuf.MessageOptions {
optional bool benchgen = 64016;
optional bool marshaler = 64017;
optional bool unmarshaler = 64018;
optional bool stable_marshaler = 64019;
optional bool sizer = 64020;
@ -93,17 +101,18 @@ extend google.protobuf.MessageOptions {
optional bool goproto_extensions_map = 64025;
optional bool goproto_unrecognized = 64026;
optional bool protosizer = 64028;
}
extend google.protobuf.FieldOptions {
optional bool nullable = 65001;
optional bool embed = 65002;
optional string customtype = 65003;
optional string customname = 65004;
optional string jsontag = 65005;
optional string moretags = 65006;
optional string casttype = 65007;
optional string castkey = 65008;
optional string castvalue = 65009;
optional bool nullable = 65001;
optional bool embed = 65002;
optional string customtype = 65003;
optional string customname = 65004;
optional string jsontag = 65005;
optional string moretags = 65006;
optional string casttype = 65007;
optional string castkey = 65008;
optional string castvalue = 65009;
}

View File

@ -37,6 +37,17 @@ func IsNullable(field *google_protobuf.FieldDescriptorProto) bool {
return proto.GetBoolExtension(field.Options, E_Nullable, true)
}
func NeedsNilCheck(proto3 bool, field *google_protobuf.FieldDescriptorProto) bool {
nullable := IsNullable(field)
if field.IsMessage() || IsCustomType(field) {
return nullable
}
if proto3 {
return false
}
return nullable || *field.Type == google_protobuf.FieldDescriptorProto_TYPE_BYTES
}
func IsCustomType(field *google_protobuf.FieldDescriptorProto) bool {
typ := GetCustomType(field)
if len(typ) > 0 {
@ -117,6 +128,22 @@ func IsCustomName(field *google_protobuf.FieldDescriptorProto) bool {
return false
}
func IsEnumCustomName(field *google_protobuf.EnumDescriptorProto) bool {
name := GetEnumCustomName(field)
if len(name) > 0 {
return true
}
return false
}
func IsEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) bool {
name := GetEnumValueCustomName(field)
if len(name) > 0 {
return true
}
return false
}
func GetCustomName(field *google_protobuf.FieldDescriptorProto) string {
if field.Options != nil {
v, err := proto.GetExtension(field.Options, E_Customname)
@ -127,6 +154,26 @@ func GetCustomName(field *google_protobuf.FieldDescriptorProto) string {
return ""
}
func GetEnumCustomName(field *google_protobuf.EnumDescriptorProto) string {
if field.Options != nil {
v, err := proto.GetExtension(field.Options, E_EnumCustomname)
if err == nil && v.(*string) != nil {
return *(v.(*string))
}
}
return ""
}
func GetEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) string {
if field.Options != nil {
v, err := proto.GetExtension(field.Options, E_EnumvalueCustomname)
if err == nil && v.(*string) != nil {
return *(v.(*string))
}
}
return ""
}
func GetJsonTag(field *google_protobuf.FieldDescriptorProto) *string {
if field.Options != nil {
v, err := proto.GetExtension(field.Options, E_Jsontag)
@ -209,10 +256,18 @@ func IsUnmarshaler(file *google_protobuf.FileDescriptorProto, message *google_pr
return proto.GetBoolExtension(message.Options, E_Unmarshaler, proto.GetBoolExtension(file.Options, E_UnmarshalerAll, false))
}
func IsStableMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
return proto.GetBoolExtension(message.Options, E_StableMarshaler, proto.GetBoolExtension(file.Options, E_StableMarshalerAll, false))
}
func IsSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
return proto.GetBoolExtension(message.Options, E_Sizer, proto.GetBoolExtension(file.Options, E_SizerAll, false))
}
func IsProtoSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
return proto.GetBoolExtension(message.Options, E_Protosizer, proto.GetBoolExtension(file.Options, E_ProtosizerAll, false))
}
func IsGoEnumStringer(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool {
return proto.GetBoolExtension(enum.Options, E_GoprotoEnumStringer, proto.GetBoolExtension(file.Options, E_GoprotoEnumStringerAll, true))
}

View File

@ -115,14 +115,8 @@ func setCustomType(base structPointer, f field, value interface{}) {
oldHeader.Len = v.Len()
oldHeader.Cap = v.Cap()
default:
l := 1
size := reflect.TypeOf(value).Elem().Size()
if kind == reflect.Array {
l = reflect.TypeOf(value).Elem().Len()
size = reflect.TypeOf(value).Size()
}
total := int(size) * l
structPointer_Copy(toStructPointer(reflect.ValueOf(value)), structPointer_Add(base, f), total)
structPointer_Copy(toStructPointer(reflect.ValueOf(value)), structPointer_Add(base, f), int(size))
}
}

View File

@ -105,6 +105,11 @@ func (p *Buffer) EncodeVarint(x uint64) error {
return nil
}
// SizeVarint returns the varint encoding size of an integer.
func SizeVarint(x uint64) int {
return sizeVarint(x)
}
func sizeVarint(x uint64) (n int) {
for {
n++
@ -1248,24 +1253,9 @@ func size_struct(prop *StructProperties, base structPointer) (n int) {
}
// Factor in any oneof fields.
// TODO: This could be faster and use less reflection.
if prop.oneofMarshaler != nil {
sv := reflect.ValueOf(structPointer_Interface(base, prop.stype)).Elem()
for i := 0; i < prop.stype.NumField(); i++ {
fv := sv.Field(i)
if fv.Kind() != reflect.Interface || fv.IsNil() {
continue
}
if prop.stype.Field(i).Tag.Get("protobuf_oneof") == "" {
continue
}
spv := fv.Elem() // interface -> *T
sv := spv.Elem() // *T -> T
sf := sv.Type().Field(0) // StructField inside T
var prop Properties
prop.Init(sf.Type, "whatever", sf.Tag.Get("protobuf"), &sf)
n += prop.size(&prop, toStructPointer(spv))
}
if prop.oneofSizer != nil {
m := structPointer_Interface(base, prop.stype).(Message)
n += prop.oneofSizer(m)
}
return

View File

@ -50,7 +50,9 @@ Equality is defined in this way:
are equal, and extensions sets are equal.
- Two set scalar fields are equal iff their values are equal.
If the fields are of a floating-point type, remember that
NaN != x for all x, including NaN.
NaN != x for all x, including NaN. If the message is defined
in a proto3 .proto file, fields are not "set"; specifically,
zero length proto3 "bytes" fields are equal (nil == {}).
- Two repeated fields are equal iff their lengths are the same,
and their corresponding elements are equal (a "bytes" field,
although represented by []byte, is not a repeated field)
@ -88,6 +90,7 @@ func Equal(a, b Message) bool {
// v1 and v2 are known to have the same type.
func equalStruct(v1, v2 reflect.Value) bool {
sprop := GetProperties(v1.Type())
for i := 0; i < v1.NumField(); i++ {
f := v1.Type().Field(i)
if strings.HasPrefix(f.Name, "XXX_") {
@ -113,7 +116,7 @@ func equalStruct(v1, v2 reflect.Value) bool {
}
f1, f2 = f1.Elem(), f2.Elem()
}
if !equalAny(f1, f2) {
if !equalAny(f1, f2, sprop.Prop[i]) {
return false
}
}
@ -140,7 +143,8 @@ func equalStruct(v1, v2 reflect.Value) bool {
}
// v1 and v2 are known to have the same type.
func equalAny(v1, v2 reflect.Value) bool {
// prop may be nil.
func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
if v1.Type() == protoMessageType {
m1, _ := v1.Interface().(Message)
m2, _ := v2.Interface().(Message)
@ -163,7 +167,7 @@ func equalAny(v1, v2 reflect.Value) bool {
if e1.Type() != e2.Type() {
return false
}
return equalAny(e1, e2)
return equalAny(e1, e2, nil)
case reflect.Map:
if v1.Len() != v2.Len() {
return false
@ -174,16 +178,22 @@ func equalAny(v1, v2 reflect.Value) bool {
// This key was not found in the second map.
return false
}
if !equalAny(v1.MapIndex(key), val2) {
if !equalAny(v1.MapIndex(key), val2, nil) {
return false
}
}
return true
case reflect.Ptr:
return equalAny(v1.Elem(), v2.Elem())
return equalAny(v1.Elem(), v2.Elem(), prop)
case reflect.Slice:
if v1.Type().Elem().Kind() == reflect.Uint8 {
// short circuit: []byte
// Edge case: if this is in a proto3 message, a zero length
// bytes field is considered the zero value.
if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 {
return true
}
if v1.IsNil() != v2.IsNil() {
return false
}
@ -194,7 +204,7 @@ func equalAny(v1, v2 reflect.Value) bool {
return false
}
for i := 0; i < v1.Len(); i++ {
if !equalAny(v1.Index(i), v2.Index(i)) {
if !equalAny(v1.Index(i), v2.Index(i), prop) {
return false
}
}
@ -229,7 +239,7 @@ func equalExtensions(base reflect.Type, em1, em2 map[int32]Extension) bool {
if m1 != nil && m2 != nil {
// Both are unencoded.
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2)) {
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
return false
}
continue
@ -257,7 +267,7 @@ func equalExtensions(base reflect.Type, em1, em2 map[int32]Extension) bool {
log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
return false
}
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2)) {
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
return false
}
}

View File

@ -403,7 +403,6 @@ func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
o := NewBuffer(b)
t := reflect.TypeOf(extension.ExtensionType)
rep := extension.repeated()
props := extensionProperties(extension)
@ -425,7 +424,7 @@ func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
return nil, err
}
if !rep || o.index >= len(o.buf) {
if o.index >= len(o.buf) {
break
}
}

View File

@ -185,6 +185,17 @@ func NewExtension(e []byte) Extension {
return ee
}
func AppendExtension(e extendableProto, tag int32, buf []byte) {
if ee, eok := e.(extensionsMap); eok {
ext := ee.ExtensionMap()[int32(tag)] // may be missing
ext.enc = append(ext.enc, buf...)
ee.ExtensionMap()[int32(tag)] = ext
} else if ee, eok := e.(extensionsBytes); eok {
ext := ee.GetExtensions()
*ext = append(*ext, buf...)
}
}
func (this Extension) GoString() string {
if this.enc == nil {
if err := encodeExtension(&this); err != nil {

View File

@ -70,6 +70,12 @@ for a protocol buffer variable v:
with distinguished wrapper types for each possible field value.
- Marshal and Unmarshal are functions to encode and decode the wire format.
When the .proto file specifies `syntax="proto3"`, there are some differences:
- Non-repeated fields of non-message type are values instead of pointers.
- Getters are only generated for message and oneof fields.
- Enum types do not get an Enum method.
The simplest way to describe this is to see an example.
Given file test.proto, containing
@ -229,6 +235,7 @@ To create and play with a Test object:
test := &pb.Test{
Label: proto.String("hello"),
Type: proto.Int32(17),
Reps: []int64{1, 2, 3},
Optionalgroup: &pb.Test_OptionalGroup{
RequiredField: proto.String("good bye"),
},
@ -441,7 +448,7 @@ func (p *Buffer) DebugPrint(s string, b []byte) {
var u uint64
obuf := p.buf
index := p.index
sindex := p.index
p.buf = b
p.index = 0
depth := 0
@ -536,7 +543,7 @@ out:
fmt.Printf("\n")
p.buf = obuf
p.index = index
p.index = sindex
}
// SetDefaults sets unset protocol buffer fields to their default values.
@ -881,3 +888,7 @@ func isProto3Zero(v reflect.Value) bool {
}
return false
}
// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
// to assert that that code is compatible with this version of the proto package.
const GoGoProtoPackageIsVersion1 = true

View File

@ -96,6 +96,9 @@ type oneofMarshaler func(Message, *Buffer) error
// A oneofUnmarshaler does the unmarshaling for a oneof field in a message.
type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error)
// A oneofSizer does the sizing for all oneof fields in a message.
type oneofSizer func(Message) int
// tagMap is an optimization over map[int]int for typical protocol buffer
// use-cases. Encoded protocol buffers are often in tag order with small tag
// numbers.
@ -147,6 +150,7 @@ type StructProperties struct {
oneofMarshaler oneofMarshaler
oneofUnmarshaler oneofUnmarshaler
oneofSizer oneofSizer
stype reflect.Type
// OneofTypes contains information about the oneof fields in this message.
@ -174,6 +178,7 @@ func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order
type Properties struct {
Name string // name of the field, for error messages
OrigName string // original name before protocol compiler (always set)
JSONName string // name to use for JSON; determined by protoc
Wire string
WireType int
Tag int
@ -233,8 +238,9 @@ func (p *Properties) String() string {
if p.Packed {
s += ",packed"
}
if p.OrigName != p.Name {
s += ",name=" + p.OrigName
s += ",name=" + p.OrigName
if p.JSONName != p.OrigName {
s += ",json=" + p.JSONName
}
if p.proto3 {
s += ",proto3"
@ -314,6 +320,8 @@ func (p *Properties) Parse(s string) {
p.Packed = true
case strings.HasPrefix(f, "name="):
p.OrigName = f[5:]
case strings.HasPrefix(f, "json="):
p.JSONName = f[5:]
case strings.HasPrefix(f, "enum="):
p.Enum = f[5:]
case f == "proto3":
@ -784,11 +792,11 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
sort.Sort(prop)
type oneofMessage interface {
XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), []interface{})
XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
}
if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); isOneofMessage && ok {
var oots []interface{}
prop.oneofMarshaler, prop.oneofUnmarshaler, oots = om.XXX_OneofFuncs()
prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs()
prop.stype = t
// Interpret oneof metadata.

View File

@ -573,12 +573,12 @@ func writeUnknownStruct(w *textWriter, data []byte) (err error) {
return ferr
}
if wire != WireStartGroup {
if err := w.WriteByte(':'); err != nil {
if err = w.WriteByte(':'); err != nil {
return err
}
}
if !w.compact || wire == WireStartGroup {
if err := w.WriteByte(' '); err != nil {
if err = w.WriteByte(' '); err != nil {
return err
}
}

View File

@ -124,6 +124,14 @@ func isWhitespace(c byte) bool {
return false
}
func isQuote(c byte) bool {
switch c {
case '"', '\'':
return true
}
return false
}
func (p *textParser) skipWhitespace() {
i := 0
for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
@ -338,13 +346,13 @@ func (p *textParser) next() *token {
p.advance()
if p.done {
p.cur.value = ""
} else if len(p.cur.value) > 0 && p.cur.value[0] == '"' {
} else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
// Look for multiple quoted strings separated by whitespace,
// and concatenate them.
cat := p.cur
for {
p.skipWhitespace()
if p.done || p.s[0] != '"' {
if p.done || !isQuote(p.s[0]) {
break
}
p.advance()
@ -724,15 +732,15 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error {
if err != nil {
return err
}
tok := p.next()
if tok.err != nil {
return tok.err
ntok := p.next()
if ntok.err != nil {
return ntok.err
}
if tok.value == "]" {
if ntok.value == "]" {
break
}
if tok.value != "," {
return p.errorf("Expected ']' or ',' found %q", tok.value)
if ntok.value != "," {
return p.errorf("Expected ']' or ',' found %q", ntok.value)
}
}
return nil

View File

@ -39,6 +39,10 @@ var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
const _ = proto.GoGoProtoPackageIsVersion1
type FieldDescriptorProto_Type int32
const (
@ -126,6 +130,9 @@ func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error {
*x = FieldDescriptorProto_Type(value)
return nil
}
func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) {
return fileDescriptorDescriptor, []int{3, 0}
}
type FieldDescriptorProto_Label int32
@ -163,6 +170,9 @@ func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error {
*x = FieldDescriptorProto_Label(value)
return nil
}
func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) {
return fileDescriptorDescriptor, []int{3, 1}
}
// Generated classes can be optimized for speed or code size.
type FileOptions_OptimizeMode int32
@ -201,6 +211,9 @@ func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error {
*x = FileOptions_OptimizeMode(value)
return nil
}
func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) {
return fileDescriptorDescriptor, []int{9, 0}
}
type FieldOptions_CType int32
@ -238,6 +251,9 @@ func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error {
*x = FieldOptions_CType(value)
return nil
}
func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) {
return fileDescriptorDescriptor, []int{11, 0}
}
type FieldOptions_JSType int32
@ -277,6 +293,9 @@ func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error {
*x = FieldOptions_JSType(value)
return nil
}
func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) {
return fileDescriptorDescriptor, []int{11, 1}
}
// The protocol compiler can output a FileDescriptorSet containing the .proto
// files it parses.
@ -285,9 +304,10 @@ type FileDescriptorSet struct {
XXX_unrecognized []byte `json:"-"`
}
func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} }
func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) }
func (*FileDescriptorSet) ProtoMessage() {}
func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} }
func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) }
func (*FileDescriptorSet) ProtoMessage() {}
func (*FileDescriptorSet) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{0} }
func (m *FileDescriptorSet) GetFile() []*FileDescriptorProto {
if m != nil {
@ -303,13 +323,13 @@ type FileDescriptorProto struct {
// Names of files imported by this file.
Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"`
// Indexes of the public imported files in the dependency list above.
PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency" json:"public_dependency,omitempty"`
PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"`
// Indexes of the weak imported files in the dependency list.
// For Google-internal migration only. Do not use.
WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency" json:"weak_dependency,omitempty"`
WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"`
// All top-level definitions in this file.
MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type" json:"message_type,omitempty"`
EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type" json:"enum_type,omitempty"`
MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"`
EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"`
Service []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"`
Extension []*FieldDescriptorProto `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"`
Options *FileOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"`
@ -317,16 +337,17 @@ type FileDescriptorProto struct {
// You may safely remove this entire field without harming runtime
// functionality of the descriptors -- the information is needed only by
// development tools.
SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info" json:"source_code_info,omitempty"`
SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"`
// The syntax of the proto file.
// The supported values are "proto2" and "proto3".
Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} }
func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) }
func (*FileDescriptorProto) ProtoMessage() {}
func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} }
func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) }
func (*FileDescriptorProto) ProtoMessage() {}
func (*FileDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{1} }
func (m *FileDescriptorProto) GetName() string {
if m != nil && m.Name != nil {
@ -417,21 +438,22 @@ type DescriptorProto struct {
Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"`
Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"`
NestedType []*DescriptorProto `protobuf:"bytes,3,rep,name=nested_type" json:"nested_type,omitempty"`
EnumType []*EnumDescriptorProto `protobuf:"bytes,4,rep,name=enum_type" json:"enum_type,omitempty"`
ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range" json:"extension_range,omitempty"`
OneofDecl []*OneofDescriptorProto `protobuf:"bytes,8,rep,name=oneof_decl" json:"oneof_decl,omitempty"`
NestedType []*DescriptorProto `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"`
EnumType []*EnumDescriptorProto `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"`
ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"`
OneofDecl []*OneofDescriptorProto `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"`
Options *MessageOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"`
ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range" json:"reserved_range,omitempty"`
ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"`
// Reserved field names, which may not be used by fields in the same message.
// A given name may only be reserved once.
ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name" json:"reserved_name,omitempty"`
ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *DescriptorProto) Reset() { *m = DescriptorProto{} }
func (m *DescriptorProto) String() string { return proto.CompactTextString(m) }
func (*DescriptorProto) ProtoMessage() {}
func (m *DescriptorProto) Reset() { *m = DescriptorProto{} }
func (m *DescriptorProto) String() string { return proto.CompactTextString(m) }
func (*DescriptorProto) ProtoMessage() {}
func (*DescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{2} }
func (m *DescriptorProto) GetName() string {
if m != nil && m.Name != nil {
@ -512,6 +534,9 @@ type DescriptorProto_ExtensionRange struct {
func (m *DescriptorProto_ExtensionRange) Reset() { *m = DescriptorProto_ExtensionRange{} }
func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) }
func (*DescriptorProto_ExtensionRange) ProtoMessage() {}
func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) {
return fileDescriptorDescriptor, []int{2, 0}
}
func (m *DescriptorProto_ExtensionRange) GetStart() int32 {
if m != nil && m.Start != nil {
@ -539,6 +564,9 @@ type DescriptorProto_ReservedRange struct {
func (m *DescriptorProto_ReservedRange) Reset() { *m = DescriptorProto_ReservedRange{} }
func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) }
func (*DescriptorProto_ReservedRange) ProtoMessage() {}
func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) {
return fileDescriptorDescriptor, []int{2, 1}
}
func (m *DescriptorProto_ReservedRange) GetStart() int32 {
if m != nil && m.Start != nil {
@ -567,7 +595,7 @@ type FieldDescriptorProto struct {
// rules are used to find the type (i.e. first the nested types within this
// message are searched, then within the parent, on up to the root
// namespace).
TypeName *string `protobuf:"bytes,6,opt,name=type_name" json:"type_name,omitempty"`
TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"`
// For extensions, this is the name of the type being extended. It is
// resolved in the same manner as type_name.
Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"`
@ -576,22 +604,23 @@ type FieldDescriptorProto struct {
// For strings, contains the default text contents (not escaped in any way).
// For bytes, contains the C escaped value. All bytes >= 128 are escaped.
// TODO(kenton): Base-64 encode?
DefaultValue *string `protobuf:"bytes,7,opt,name=default_value" json:"default_value,omitempty"`
DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"`
// If set, gives the index of a oneof in the containing type's oneof_decl
// list. This field is a member of that oneof.
OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index" json:"oneof_index,omitempty"`
OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"`
// JSON name of this field. The value is set by protocol compiler. If the
// user has set a "json_name" option on this field, that option's value
// will be used. Otherwise, it's deduced from the field's name by converting
// it to camelCase.
JsonName *string `protobuf:"bytes,10,opt,name=json_name" json:"json_name,omitempty"`
JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"`
Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} }
func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) }
func (*FieldDescriptorProto) ProtoMessage() {}
func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} }
func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) }
func (*FieldDescriptorProto) ProtoMessage() {}
func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{3} }
func (m *FieldDescriptorProto) GetName() string {
if m != nil && m.Name != nil {
@ -669,9 +698,10 @@ type OneofDescriptorProto struct {
XXX_unrecognized []byte `json:"-"`
}
func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} }
func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) }
func (*OneofDescriptorProto) ProtoMessage() {}
func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} }
func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) }
func (*OneofDescriptorProto) ProtoMessage() {}
func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{4} }
func (m *OneofDescriptorProto) GetName() string {
if m != nil && m.Name != nil {
@ -688,9 +718,10 @@ type EnumDescriptorProto struct {
XXX_unrecognized []byte `json:"-"`
}
func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} }
func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) }
func (*EnumDescriptorProto) ProtoMessage() {}
func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} }
func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) }
func (*EnumDescriptorProto) ProtoMessage() {}
func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{5} }
func (m *EnumDescriptorProto) GetName() string {
if m != nil && m.Name != nil {
@ -724,6 +755,9 @@ type EnumValueDescriptorProto struct {
func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorProto{} }
func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) }
func (*EnumValueDescriptorProto) ProtoMessage() {}
func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) {
return fileDescriptorDescriptor, []int{6}
}
func (m *EnumValueDescriptorProto) GetName() string {
if m != nil && m.Name != nil {
@ -754,9 +788,10 @@ type ServiceDescriptorProto struct {
XXX_unrecognized []byte `json:"-"`
}
func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} }
func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) }
func (*ServiceDescriptorProto) ProtoMessage() {}
func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} }
func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) }
func (*ServiceDescriptorProto) ProtoMessage() {}
func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{7} }
func (m *ServiceDescriptorProto) GetName() string {
if m != nil && m.Name != nil {
@ -784,19 +819,20 @@ type MethodDescriptorProto struct {
Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
// Input and output type names. These are resolved in the same way as
// FieldDescriptorProto.type_name, but must refer to a message type.
InputType *string `protobuf:"bytes,2,opt,name=input_type" json:"input_type,omitempty"`
OutputType *string `protobuf:"bytes,3,opt,name=output_type" json:"output_type,omitempty"`
InputType *string `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"`
OutputType *string `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"`
Options *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"`
// Identifies if client streams multiple client messages
ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,def=0" json:"client_streaming,omitempty"`
ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"`
// Identifies if server streams multiple server messages
ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,def=0" json:"server_streaming,omitempty"`
ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} }
func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) }
func (*MethodDescriptorProto) ProtoMessage() {}
func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} }
func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) }
func (*MethodDescriptorProto) ProtoMessage() {}
func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{8} }
const Default_MethodDescriptorProto_ClientStreaming bool = false
const Default_MethodDescriptorProto_ServerStreaming bool = false
@ -848,20 +884,20 @@ type FileOptions struct {
// placed. By default, the proto package is used, but this is often
// inappropriate because proto packages do not normally start with backwards
// domain names.
JavaPackage *string `protobuf:"bytes,1,opt,name=java_package" json:"java_package,omitempty"`
JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"`
// If set, all the classes from the .proto file are wrapped in a single
// outer class with the given name. This applies to both Proto1
// (equivalent to the old "--one_java_file" option) and Proto2 (where
// a .proto always translates to a single class, but you may want to
// explicitly choose the class name).
JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname" json:"java_outer_classname,omitempty"`
JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"`
// If set true, then the Java code generator will generate a separate .java
// file for each top-level message, enum, and service defined in the .proto
// file. Thus, these types will *not* be nested inside the outer class
// named by java_outer_classname. However, the outer class will still be
// generated to contain the file's getDescriptor() method as well as any
// top-level extensions defined in the file.
JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,def=0" json:"java_multiple_files,omitempty"`
JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"`
// If set true, then the Java code generator will generate equals() and
// hashCode() methods for all messages defined in the .proto file.
// This increases generated code size, potentially substantially for large
@ -874,21 +910,21 @@ type FileOptions struct {
// the generated methods compute their results based on field values rather
// than object identity. (Implementations should not assume that hashcodes
// will be consistent across runtimes or versions of the protocol compiler.)
JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,def=0" json:"java_generate_equals_and_hash,omitempty"`
JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash,def=0" json:"java_generate_equals_and_hash,omitempty"`
// If set true, then the Java2 code generator will generate code that
// throws an exception whenever an attempt is made to assign a non-UTF-8
// byte sequence to a string field.
// Message reflection will do the same.
// However, an extension field still accepts non-UTF-8 byte sequences.
// This option has no effect on when used with the lite runtime.
JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,def=0" json:"java_string_check_utf8,omitempty"`
OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"`
JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"`
OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"`
// Sets the Go package where structs generated from this .proto will be
// placed. If omitted, the Go package will be derived from the following:
// - The basename of the package import path, if provided.
// - Otherwise, the package statement in the .proto file, if present.
// - Otherwise, the basename of the .proto file, without extension.
GoPackage *string `protobuf:"bytes,11,opt,name=go_package" json:"go_package,omitempty"`
GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"`
// Should generic services be generated in each language? "Generic" services
// are not specific to any particular RPC system. They are generated by the
// main code generators in each language (without additional plugins).
@ -899,9 +935,9 @@ type FileOptions struct {
// that generate code specific to your particular RPC system. Therefore,
// these default to false. Old code which depends on generic services should
// explicitly set them to true.
CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,def=0" json:"cc_generic_services,omitempty"`
JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,def=0" json:"java_generic_services,omitempty"`
PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,def=0" json:"py_generic_services,omitempty"`
CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"`
JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"`
PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"`
// Is this file deprecated?
// Depending on the target platform, this can emit Deprecated annotations
// for everything in the file, or it will be completely ignored; in the very
@ -909,24 +945,25 @@ type FileOptions struct {
Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
// Enables the use of arenas for the proto messages in this file. This applies
// only to generated classes for C++.
CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,def=0" json:"cc_enable_arenas,omitempty"`
CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=0" json:"cc_enable_arenas,omitempty"`
// Sets the objective c class prefix which is prepended to all objective c
// generated classes from this .proto. There is no default.
ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix" json:"objc_class_prefix,omitempty"`
ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"`
// Namespace for generated classes; defaults to the package.
CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace" json:"csharp_namespace,omitempty"`
CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"`
// Whether the nano proto compiler should generate in the deprecated non-nano
// suffixed package.
JavananoUseDeprecatedPackage *bool `protobuf:"varint,38,opt,name=javanano_use_deprecated_package" json:"javanano_use_deprecated_package,omitempty"`
JavananoUseDeprecatedPackage *bool `protobuf:"varint,38,opt,name=javanano_use_deprecated_package,json=javananoUseDeprecatedPackage" json:"javanano_use_deprecated_package,omitempty"`
// The parser stores options it doesn't recognize here. See above.
UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option" json:"uninterpreted_option,omitempty"`
UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
XXX_extensions map[int32]proto.Extension `json:"-"`
XXX_unrecognized []byte `json:"-"`
}
func (m *FileOptions) Reset() { *m = FileOptions{} }
func (m *FileOptions) String() string { return proto.CompactTextString(m) }
func (*FileOptions) ProtoMessage() {}
func (m *FileOptions) Reset() { *m = FileOptions{} }
func (m *FileOptions) String() string { return proto.CompactTextString(m) }
func (*FileOptions) ProtoMessage() {}
func (*FileOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{9} }
var extRange_FileOptions = []proto.ExtensionRange{
{1000, 536870911},
@ -1083,11 +1120,11 @@ type MessageOptions struct {
//
// Because this is an option, the above two restrictions are not enforced by
// the protocol compiler.
MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,def=0" json:"message_set_wire_format,omitempty"`
MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"`
// Disables the generation of the standard "descriptor()" accessor, which can
// conflict with a field of the same name. This is meant to make migration
// from proto1 easier; new code should avoid fields named "descriptor".
NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,def=0" json:"no_standard_descriptor_accessor,omitempty"`
NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"`
// Is this message deprecated?
// Depending on the target platform, this can emit Deprecated annotations
// for the message, or it will be completely ignored; in the very least,
@ -1114,16 +1151,17 @@ type MessageOptions struct {
// NOTE: Do not set the option in .proto files. Always use the maps syntax
// instead. The option should only be implicitly set by the proto compiler
// parser.
MapEntry *bool `protobuf:"varint,7,opt,name=map_entry" json:"map_entry,omitempty"`
MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"`
// The parser stores options it doesn't recognize here. See above.
UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option" json:"uninterpreted_option,omitempty"`
UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
XXX_extensions map[int32]proto.Extension `json:"-"`
XXX_unrecognized []byte `json:"-"`
}
func (m *MessageOptions) Reset() { *m = MessageOptions{} }
func (m *MessageOptions) String() string { return proto.CompactTextString(m) }
func (*MessageOptions) ProtoMessage() {}
func (m *MessageOptions) Reset() { *m = MessageOptions{} }
func (m *MessageOptions) String() string { return proto.CompactTextString(m) }
func (*MessageOptions) ProtoMessage() {}
func (*MessageOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{10} }
var extRange_MessageOptions = []proto.ExtensionRange{
{1000, 536870911},
@ -1237,14 +1275,15 @@ type FieldOptions struct {
// For Google-internal migration only. Do not use.
Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"`
// The parser stores options it doesn't recognize here. See above.
UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option" json:"uninterpreted_option,omitempty"`
UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
XXX_extensions map[int32]proto.Extension `json:"-"`
XXX_unrecognized []byte `json:"-"`
}
func (m *FieldOptions) Reset() { *m = FieldOptions{} }
func (m *FieldOptions) String() string { return proto.CompactTextString(m) }
func (*FieldOptions) ProtoMessage() {}
func (m *FieldOptions) Reset() { *m = FieldOptions{} }
func (m *FieldOptions) String() string { return proto.CompactTextString(m) }
func (*FieldOptions) ProtoMessage() {}
func (*FieldOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{11} }
var extRange_FieldOptions = []proto.ExtensionRange{
{1000, 536870911},
@ -1318,21 +1357,22 @@ func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption {
type EnumOptions struct {
// Set this option to true to allow mapping different tag names to the same
// value.
AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias" json:"allow_alias,omitempty"`
AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"`
// Is this enum deprecated?
// Depending on the target platform, this can emit Deprecated annotations
// for the enum, or it will be completely ignored; in the very least, this
// is a formalization for deprecating enums.
Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
// The parser stores options it doesn't recognize here. See above.
UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option" json:"uninterpreted_option,omitempty"`
UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
XXX_extensions map[int32]proto.Extension `json:"-"`
XXX_unrecognized []byte `json:"-"`
}
func (m *EnumOptions) Reset() { *m = EnumOptions{} }
func (m *EnumOptions) String() string { return proto.CompactTextString(m) }
func (*EnumOptions) ProtoMessage() {}
func (m *EnumOptions) Reset() { *m = EnumOptions{} }
func (m *EnumOptions) String() string { return proto.CompactTextString(m) }
func (*EnumOptions) ProtoMessage() {}
func (*EnumOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{12} }
var extRange_EnumOptions = []proto.ExtensionRange{
{1000, 536870911},
@ -1378,14 +1418,15 @@ type EnumValueOptions struct {
// this is a formalization for deprecating enum values.
Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
// The parser stores options it doesn't recognize here. See above.
UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option" json:"uninterpreted_option,omitempty"`
UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
XXX_extensions map[int32]proto.Extension `json:"-"`
XXX_unrecognized []byte `json:"-"`
}
func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} }
func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) }
func (*EnumValueOptions) ProtoMessage() {}
func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} }
func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) }
func (*EnumValueOptions) ProtoMessage() {}
func (*EnumValueOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{13} }
var extRange_EnumValueOptions = []proto.ExtensionRange{
{1000, 536870911},
@ -1424,14 +1465,15 @@ type ServiceOptions struct {
// this is a formalization for deprecating services.
Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
// The parser stores options it doesn't recognize here. See above.
UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option" json:"uninterpreted_option,omitempty"`
UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
XXX_extensions map[int32]proto.Extension `json:"-"`
XXX_unrecognized []byte `json:"-"`
}
func (m *ServiceOptions) Reset() { *m = ServiceOptions{} }
func (m *ServiceOptions) String() string { return proto.CompactTextString(m) }
func (*ServiceOptions) ProtoMessage() {}
func (m *ServiceOptions) Reset() { *m = ServiceOptions{} }
func (m *ServiceOptions) String() string { return proto.CompactTextString(m) }
func (*ServiceOptions) ProtoMessage() {}
func (*ServiceOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{14} }
var extRange_ServiceOptions = []proto.ExtensionRange{
{1000, 536870911},
@ -1470,14 +1512,15 @@ type MethodOptions struct {
// this is a formalization for deprecating methods.
Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
// The parser stores options it doesn't recognize here. See above.
UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option" json:"uninterpreted_option,omitempty"`
UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
XXX_extensions map[int32]proto.Extension `json:"-"`
XXX_unrecognized []byte `json:"-"`
}
func (m *MethodOptions) Reset() { *m = MethodOptions{} }
func (m *MethodOptions) String() string { return proto.CompactTextString(m) }
func (*MethodOptions) ProtoMessage() {}
func (m *MethodOptions) Reset() { *m = MethodOptions{} }
func (m *MethodOptions) String() string { return proto.CompactTextString(m) }
func (*MethodOptions) ProtoMessage() {}
func (*MethodOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{15} }
var extRange_MethodOptions = []proto.ExtensionRange{
{1000, 536870911},
@ -1519,18 +1562,19 @@ type UninterpretedOption struct {
Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"`
// The value of the uninterpreted option, in whatever type the tokenizer
// identified it as during parsing. Exactly one of these should be set.
IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value" json:"identifier_value,omitempty"`
PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value" json:"positive_int_value,omitempty"`
NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value" json:"negative_int_value,omitempty"`
DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value" json:"double_value,omitempty"`
StringValue []byte `protobuf:"bytes,7,opt,name=string_value" json:"string_value,omitempty"`
AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value" json:"aggregate_value,omitempty"`
IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"`
PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"`
NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"`
DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"`
StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"`
AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} }
func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) }
func (*UninterpretedOption) ProtoMessage() {}
func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} }
func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) }
func (*UninterpretedOption) ProtoMessage() {}
func (*UninterpretedOption) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{16} }
func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart {
if m != nil {
@ -1587,14 +1631,17 @@ func (m *UninterpretedOption) GetAggregateValue() string {
// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents
// "foo.(bar.baz).qux".
type UninterpretedOption_NamePart struct {
NamePart *string `protobuf:"bytes,1,req,name=name_part" json:"name_part,omitempty"`
IsExtension *bool `protobuf:"varint,2,req,name=is_extension" json:"is_extension,omitempty"`
NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"`
IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOption_NamePart{} }
func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) }
func (*UninterpretedOption_NamePart) ProtoMessage() {}
func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) {
return fileDescriptorDescriptor, []int{16, 0}
}
func (m *UninterpretedOption_NamePart) GetNamePart() string {
if m != nil && m.NamePart != nil {
@ -1660,9 +1707,10 @@ type SourceCodeInfo struct {
XXX_unrecognized []byte `json:"-"`
}
func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} }
func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) }
func (*SourceCodeInfo) ProtoMessage() {}
func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} }
func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) }
func (*SourceCodeInfo) ProtoMessage() {}
func (*SourceCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{17} }
func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location {
if m != nil {
@ -1749,15 +1797,18 @@ type SourceCodeInfo_Location struct {
// optional int32 grault = 6;
//
// // ignored detached comments.
LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments" json:"leading_comments,omitempty"`
TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments" json:"trailing_comments,omitempty"`
LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments" json:"leading_detached_comments,omitempty"`
LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"`
TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"`
LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} }
func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) }
func (*SourceCodeInfo_Location) ProtoMessage() {}
func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) {
return fileDescriptorDescriptor, []int{17, 0}
}
func (m *SourceCodeInfo_Location) GetPath() []int32 {
if m != nil {
@ -1823,3 +1874,144 @@ func init() {
proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value)
proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value)
}
var fileDescriptorDescriptor = []byte{
// 2192 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xcc, 0x58, 0x4f, 0x73, 0xdb, 0xd6,
0x11, 0x2f, 0xff, 0x8a, 0x5c, 0x52, 0x24, 0xf4, 0xac, 0xd8, 0xb4, 0x62, 0xc7, 0x31, 0x63, 0xc7,
0x8e, 0xd3, 0xd2, 0x19, 0xb7, 0x49, 0x5c, 0xa5, 0x93, 0x0e, 0x45, 0xc2, 0x0a, 0x3d, 0x94, 0xc8,
0x3e, 0x92, 0xad, 0x93, 0x0b, 0x06, 0x02, 0x1f, 0x29, 0xd8, 0x20, 0xc0, 0x02, 0xa0, 0x6d, 0xe5,
0xd4, 0x99, 0x9e, 0xfa, 0x0d, 0x3a, 0x6d, 0xa7, 0x87, 0x5c, 0x32, 0xd3, 0x0f, 0xd0, 0x43, 0xef,
0xbd, 0xf6, 0xd0, 0x73, 0x8f, 0x9d, 0x69, 0xbf, 0x41, 0xaf, 0xdd, 0xf7, 0x1e, 0x00, 0x02, 0x24,
0x15, 0xab, 0x99, 0x49, 0x13, 0x5d, 0xc4, 0xb7, 0xfb, 0xdb, 0xc5, 0xbe, 0x7d, 0xbf, 0xb7, 0xbb,
0x00, 0x28, 0x63, 0xe6, 0x19, 0xae, 0x39, 0xf7, 0x1d, 0xb7, 0x31, 0x77, 0x1d, 0xdf, 0x21, 0xd5,
0xa9, 0xe3, 0x4c, 0x2d, 0x26, 0x57, 0x27, 0x8b, 0x49, 0xfd, 0x08, 0x76, 0x1e, 0x99, 0x16, 0x6b,
0x47, 0xc0, 0x01, 0xf3, 0xc9, 0x43, 0xc8, 0x4e, 0x50, 0x58, 0x4b, 0xbd, 0x99, 0xb9, 0x5b, 0x7a,
0x70, 0xab, 0xb1, 0x62, 0xd4, 0x48, 0x5a, 0xf4, 0xb9, 0x98, 0x0a, 0x8b, 0xfa, 0x3f, 0xb3, 0x70,
0x69, 0x83, 0x96, 0x10, 0xc8, 0xda, 0xfa, 0x8c, 0x7b, 0x4c, 0xdd, 0x2d, 0x52, 0xf1, 0x9b, 0xd4,
0x60, 0x6b, 0xae, 0x1b, 0xcf, 0xf4, 0x29, 0xab, 0xa5, 0x85, 0x38, 0x5c, 0x92, 0x37, 0x00, 0xc6,
0x6c, 0xce, 0xec, 0x31, 0xb3, 0x8d, 0xb3, 0x5a, 0x06, 0xa3, 0x28, 0xd2, 0x98, 0x84, 0xbc, 0x0b,
0x3b, 0xf3, 0xc5, 0x89, 0x65, 0x1a, 0x5a, 0x0c, 0x06, 0x08, 0xcb, 0x51, 0x45, 0x2a, 0xda, 0x4b,
0xf0, 0x1d, 0xa8, 0xbe, 0x60, 0xfa, 0xb3, 0x38, 0xb4, 0x24, 0xa0, 0x15, 0x2e, 0x8e, 0x01, 0x5b,
0x50, 0x9e, 0x31, 0xcf, 0xc3, 0x00, 0x34, 0xff, 0x6c, 0xce, 0x6a, 0x59, 0xb1, 0xfb, 0x37, 0xd7,
0x76, 0xbf, 0xba, 0xf3, 0x52, 0x60, 0x35, 0x44, 0x23, 0xd2, 0x84, 0x22, 0xb3, 0x17, 0x33, 0xe9,
0x21, 0x77, 0x4e, 0xfe, 0x54, 0x44, 0xac, 0x7a, 0x29, 0x70, 0xb3, 0xc0, 0xc5, 0x96, 0xc7, 0xdc,
0xe7, 0xa6, 0xc1, 0x6a, 0x79, 0xe1, 0xe0, 0xce, 0x9a, 0x83, 0x81, 0xd4, 0xaf, 0xfa, 0x08, 0xed,
0x70, 0x2b, 0x45, 0xf6, 0xd2, 0x67, 0xb6, 0x67, 0x3a, 0x76, 0x6d, 0x4b, 0x38, 0xb9, 0xbd, 0xe1,
0x14, 0x99, 0x35, 0x5e, 0x75, 0xb1, 0xb4, 0x23, 0x1f, 0xc0, 0x96, 0x33, 0xf7, 0xf1, 0x97, 0x57,
0x2b, 0xe0, 0xf9, 0x94, 0x1e, 0x5c, 0xdb, 0x48, 0x84, 0x9e, 0xc4, 0xd0, 0x10, 0x4c, 0x3a, 0xa0,
0x78, 0xce, 0xc2, 0x35, 0x98, 0x66, 0x38, 0x63, 0xa6, 0x99, 0xf6, 0xc4, 0xa9, 0x15, 0x85, 0x83,
0x1b, 0xeb, 0x1b, 0x11, 0xc0, 0x16, 0xe2, 0x3a, 0x08, 0xa3, 0x15, 0x2f, 0xb1, 0x26, 0x97, 0x21,
0xef, 0x9d, 0xd9, 0xbe, 0xfe, 0xb2, 0x56, 0x16, 0x0c, 0x09, 0x56, 0xf5, 0xff, 0xe4, 0xa0, 0x7a,
0x11, 0x8a, 0x7d, 0x04, 0xb9, 0x09, 0xdf, 0x25, 0x12, 0xec, 0x7f, 0xc8, 0x81, 0xb4, 0x49, 0x26,
0x31, 0xff, 0x35, 0x93, 0xd8, 0x84, 0x92, 0xcd, 0x3c, 0x9f, 0x8d, 0x25, 0x23, 0x32, 0x17, 0xe4,
0x14, 0x48, 0xa3, 0x75, 0x4a, 0x65, 0xbf, 0x16, 0xa5, 0x9e, 0x40, 0x35, 0x0a, 0x49, 0x73, 0x75,
0x7b, 0x1a, 0x72, 0xf3, 0xfe, 0xab, 0x22, 0x69, 0xa8, 0xa1, 0x1d, 0xe5, 0x66, 0xb4, 0xc2, 0x12,
0x6b, 0xd2, 0x06, 0x70, 0x6c, 0xe6, 0x4c, 0xf0, 0x7a, 0x19, 0x16, 0xf2, 0x64, 0x73, 0x96, 0x7a,
0x1c, 0xb2, 0x96, 0x25, 0x47, 0x4a, 0x0d, 0x8b, 0xfc, 0x78, 0x49, 0xb5, 0xad, 0x73, 0x98, 0x72,
0x24, 0x2f, 0xd9, 0x1a, 0xdb, 0x46, 0x50, 0x71, 0x19, 0xe7, 0x3d, 0xa6, 0x58, 0xee, 0xac, 0x28,
0x82, 0x68, 0xbc, 0x72, 0x67, 0x34, 0x30, 0x93, 0x1b, 0xdb, 0x76, 0xe3, 0x4b, 0xf2, 0x16, 0x44,
0x02, 0x4d, 0xd0, 0x0a, 0x44, 0x15, 0x2a, 0x87, 0xc2, 0x63, 0x94, 0xed, 0x3d, 0x84, 0x4a, 0x32,
0x3d, 0x64, 0x17, 0x72, 0x9e, 0xaf, 0xbb, 0xbe, 0x60, 0x61, 0x8e, 0xca, 0x05, 0x51, 0x20, 0x83,
0x45, 0x46, 0x54, 0xb9, 0x1c, 0xe5, 0x3f, 0xf7, 0x3e, 0x84, 0xed, 0xc4, 0xe3, 0x2f, 0x6a, 0x58,
0xff, 0x6d, 0x1e, 0x76, 0x37, 0x71, 0x6e, 0x23, 0xfd, 0xf1, 0xfa, 0x20, 0x03, 0x4e, 0x98, 0x8b,
0xbc, 0xe3, 0x1e, 0x82, 0x15, 0x32, 0x2a, 0x67, 0xe9, 0x27, 0xcc, 0x42, 0x36, 0xa5, 0xee, 0x56,
0x1e, 0xbc, 0x7b, 0x21, 0x56, 0x37, 0xba, 0xdc, 0x84, 0x4a, 0x4b, 0xf2, 0x31, 0x64, 0x83, 0x12,
0xc7, 0x3d, 0xdc, 0xbb, 0x98, 0x07, 0xce, 0x45, 0x2a, 0xec, 0xc8, 0xeb, 0x50, 0xe4, 0xff, 0x65,
0x6e, 0xf3, 0x22, 0xe6, 0x02, 0x17, 0xf0, 0xbc, 0x92, 0x3d, 0x28, 0x08, 0x9a, 0x8d, 0x59, 0xd8,
0x1a, 0xa2, 0x35, 0x3f, 0x98, 0x31, 0x9b, 0xe8, 0x0b, 0xcb, 0xd7, 0x9e, 0xeb, 0xd6, 0x82, 0x09,
0xc2, 0xe0, 0xc1, 0x04, 0xc2, 0x9f, 0x73, 0x19, 0xb9, 0x01, 0x25, 0xc9, 0x4a, 0x13, 0x6d, 0x5e,
0x8a, 0xea, 0x93, 0xa3, 0x92, 0xa8, 0x1d, 0x2e, 0xe1, 0x8f, 0x7f, 0xea, 0xe1, 0x5d, 0x08, 0x8e,
0x56, 0x3c, 0x82, 0x0b, 0xc4, 0xe3, 0x3f, 0x5c, 0x2d, 0x7c, 0xd7, 0x37, 0x6f, 0x6f, 0x95, 0x8b,
0xf5, 0x3f, 0xa7, 0x21, 0x2b, 0xee, 0x5b, 0x15, 0x4a, 0xc3, 0x4f, 0xfb, 0xaa, 0xd6, 0xee, 0x8d,
0x0e, 0xba, 0xaa, 0x92, 0x22, 0x15, 0x00, 0x21, 0x78, 0xd4, 0xed, 0x35, 0x87, 0x4a, 0x3a, 0x5a,
0x77, 0x8e, 0x87, 0x1f, 0xfc, 0x48, 0xc9, 0x44, 0x06, 0x23, 0x29, 0xc8, 0xc6, 0x01, 0x3f, 0x7c,
0xa0, 0xe4, 0x90, 0x09, 0x65, 0xe9, 0xa0, 0xf3, 0x44, 0x6d, 0x23, 0x22, 0x9f, 0x94, 0x20, 0x66,
0x8b, 0x6c, 0x43, 0x51, 0x48, 0x0e, 0x7a, 0xbd, 0xae, 0x52, 0x88, 0x7c, 0x0e, 0x86, 0xb4, 0x73,
0x7c, 0xa8, 0x14, 0x23, 0x9f, 0x87, 0xb4, 0x37, 0xea, 0x2b, 0x10, 0x79, 0x38, 0x52, 0x07, 0x83,
0xe6, 0xa1, 0xaa, 0x94, 0x22, 0xc4, 0xc1, 0xa7, 0x43, 0x75, 0xa0, 0x94, 0x13, 0x61, 0xe1, 0x23,
0xb6, 0xa3, 0x47, 0xa8, 0xc7, 0xa3, 0x23, 0xa5, 0x42, 0x76, 0x60, 0x5b, 0x3e, 0x22, 0x0c, 0xa2,
0xba, 0x22, 0xc2, 0x48, 0x95, 0x65, 0x20, 0xd2, 0xcb, 0x4e, 0x42, 0x80, 0x08, 0x52, 0x6f, 0x41,
0x4e, 0xb0, 0x0b, 0x59, 0x5c, 0xe9, 0x36, 0x0f, 0xd4, 0xae, 0xd6, 0xeb, 0x0f, 0x3b, 0xbd, 0xe3,
0x66, 0x17, 0x73, 0x17, 0xc9, 0xa8, 0xfa, 0xb3, 0x51, 0x87, 0xaa, 0x6d, 0xcc, 0x5f, 0x4c, 0xd6,
0x57, 0x9b, 0x43, 0x94, 0x65, 0xea, 0xf7, 0x60, 0x77, 0x53, 0x9d, 0xd9, 0x74, 0x33, 0xea, 0x5f,
0xa4, 0xe0, 0xd2, 0x86, 0x92, 0xb9, 0xf1, 0x16, 0xfd, 0x14, 0x72, 0x92, 0x69, 0xb2, 0x89, 0xbc,
0xb3, 0xb1, 0xf6, 0x0a, 0xde, 0xad, 0x35, 0x12, 0x61, 0x17, 0x6f, 0xa4, 0x99, 0x73, 0x1a, 0x29,
0x77, 0xb1, 0x46, 0xa7, 0x5f, 0xa7, 0xa0, 0x76, 0x9e, 0xef, 0x57, 0xdc, 0xf7, 0x74, 0xe2, 0xbe,
0x7f, 0xb4, 0x1a, 0xc0, 0xcd, 0xf3, 0xf7, 0xb0, 0x16, 0xc5, 0x97, 0x29, 0xb8, 0xbc, 0x79, 0xde,
0xd8, 0x18, 0xc3, 0xc7, 0x90, 0x9f, 0x31, 0xff, 0xd4, 0x09, 0x7b, 0xee, 0xdb, 0x1b, 0x2a, 0x39,
0x57, 0xaf, 0xe6, 0x2a, 0xb0, 0x8a, 0xb7, 0x82, 0xcc, 0x79, 0x43, 0x83, 0x8c, 0x66, 0x2d, 0xd2,
0xdf, 0xa4, 0xe1, 0xb5, 0x8d, 0xce, 0x37, 0x06, 0x7a, 0x1d, 0xc0, 0xb4, 0xe7, 0x0b, 0x5f, 0xf6,
0x55, 0x59, 0x66, 0x8a, 0x42, 0x22, 0xae, 0x30, 0x2f, 0x21, 0x0b, 0x3f, 0xd2, 0x67, 0x84, 0x1e,
0xa4, 0x48, 0x00, 0x1e, 0x2e, 0x03, 0xcd, 0x8a, 0x40, 0xdf, 0x38, 0x67, 0xa7, 0x6b, 0x2d, 0xeb,
0x3d, 0x50, 0x0c, 0xcb, 0x64, 0xb6, 0xaf, 0x79, 0xbe, 0xcb, 0xf4, 0x99, 0x69, 0x4f, 0x45, 0x1d,
0x2d, 0xec, 0xe7, 0x26, 0xba, 0xe5, 0x31, 0x5a, 0x95, 0xea, 0x41, 0xa8, 0xe5, 0x16, 0xa2, 0x59,
0xb8, 0x31, 0x8b, 0x7c, 0xc2, 0x42, 0xaa, 0x23, 0x8b, 0xfa, 0xdf, 0xb7, 0xa0, 0x14, 0x9b, 0xce,
0xc8, 0x4d, 0x28, 0x3f, 0xd5, 0x9f, 0xeb, 0x5a, 0x38, 0x71, 0xcb, 0x4c, 0x94, 0xb8, 0xac, 0x1f,
0x4c, 0xdd, 0xef, 0xc1, 0xae, 0x80, 0xe0, 0x1e, 0xf1, 0x41, 0x86, 0xa5, 0x7b, 0x9e, 0x48, 0x5a,
0x41, 0x40, 0x09, 0xd7, 0xf5, 0xb8, 0xaa, 0x15, 0x6a, 0xc8, 0xfb, 0x70, 0x49, 0x58, 0xcc, 0xb0,
0xf0, 0x9a, 0x73, 0x8b, 0x69, 0xfc, 0x1d, 0xc0, 0x13, 0xf5, 0x34, 0x8a, 0x6c, 0x87, 0x23, 0x8e,
0x02, 0x00, 0x8f, 0xc8, 0x23, 0x87, 0x70, 0x5d, 0x98, 0x4d, 0x99, 0xcd, 0x5c, 0xdd, 0x67, 0x1a,
0xfb, 0xe5, 0x02, 0xb1, 0x9a, 0x6e, 0x8f, 0xb5, 0x53, 0xdd, 0x3b, 0xad, 0xed, 0xc6, 0x1d, 0x5c,
0xe5, 0xd8, 0xc3, 0x00, 0xaa, 0x0a, 0x64, 0xd3, 0x1e, 0x7f, 0x82, 0x38, 0xb2, 0x0f, 0x97, 0x85,
0x23, 0x4c, 0x0a, 0xee, 0x59, 0x33, 0x4e, 0x99, 0xf1, 0x4c, 0x5b, 0xf8, 0x93, 0x87, 0xb5, 0xd7,
0xe3, 0x1e, 0x44, 0x90, 0x03, 0x81, 0x69, 0x71, 0xc8, 0x08, 0x11, 0x64, 0x00, 0x65, 0x7e, 0x1e,
0x33, 0xf3, 0x73, 0x0c, 0xdb, 0x71, 0x45, 0x8f, 0xa8, 0x6c, 0xb8, 0xdc, 0xb1, 0x24, 0x36, 0x7a,
0x81, 0xc1, 0x11, 0xce, 0xa7, 0xfb, 0xb9, 0x41, 0x5f, 0x55, 0xdb, 0xb4, 0x14, 0x7a, 0x79, 0xe4,
0xb8, 0x9c, 0x53, 0x53, 0x27, 0xca, 0x71, 0x49, 0x72, 0x6a, 0xea, 0x84, 0x19, 0xc6, 0x7c, 0x19,
0x86, 0xdc, 0x36, 0xbe, 0xbb, 0x04, 0xc3, 0xba, 0x57, 0x53, 0x12, 0xf9, 0x32, 0x8c, 0x43, 0x09,
0x08, 0x68, 0xee, 0xe1, 0x95, 0x78, 0x6d, 0x99, 0xaf, 0xb8, 0xe1, 0xce, 0xda, 0x2e, 0x57, 0x4d,
0xf1, 0x89, 0xf3, 0xb3, 0x75, 0x43, 0x92, 0x78, 0xe2, 0xfc, 0x6c, 0xd5, 0xec, 0xb6, 0x78, 0x01,
0x73, 0x99, 0x81, 0x29, 0x1f, 0xd7, 0xae, 0xc4, 0xd1, 0x31, 0x05, 0xb9, 0x8f, 0x44, 0x36, 0x34,
0x66, 0xeb, 0x27, 0x78, 0xf6, 0xba, 0x8b, 0x3f, 0xbc, 0xda, 0x8d, 0x38, 0xb8, 0x62, 0x18, 0xaa,
0xd0, 0x36, 0x85, 0x92, 0xdc, 0x83, 0x1d, 0xe7, 0xe4, 0xa9, 0x21, 0xc9, 0xa5, 0xa1, 0x9f, 0x89,
0xf9, 0xb2, 0x76, 0x4b, 0xa4, 0xa9, 0xca, 0x15, 0x82, 0x5a, 0x7d, 0x21, 0x26, 0xef, 0xa0, 0x73,
0xef, 0x54, 0x77, 0xe7, 0xa2, 0x49, 0x7b, 0x98, 0x54, 0x56, 0xbb, 0x2d, 0xa1, 0x52, 0x7e, 0x1c,
0x8a, 0x89, 0x0a, 0x37, 0xf8, 0xe6, 0x6d, 0xdd, 0x76, 0xb4, 0x85, 0xc7, 0xb4, 0x65, 0x88, 0xd1,
0x59, 0xbc, 0xcd, 0xc3, 0xa2, 0xd7, 0x42, 0xd8, 0xc8, 0xc3, 0x62, 0x16, 0x82, 0xc2, 0xe3, 0x79,
0x02, 0xbb, 0x0b, 0xdb, 0xb4, 0x91, 0xe2, 0xa8, 0xe1, 0xc6, 0xf2, 0xc2, 0xd6, 0xfe, 0xb5, 0x75,
0xce, 0xd0, 0x3d, 0x8a, 0xa3, 0x25, 0x49, 0xe8, 0xa5, 0xc5, 0xba, 0xb0, 0xbe, 0x0f, 0xe5, 0x38,
0x77, 0x48, 0x11, 0x24, 0x7b, 0xb0, 0xbb, 0x61, 0x47, 0x6d, 0xf5, 0xda, 0xbc, 0x17, 0x7e, 0xa6,
0x62, 0x63, 0xc3, 0x9e, 0xdc, 0xed, 0x0c, 0x55, 0x8d, 0x8e, 0x8e, 0x87, 0x9d, 0x23, 0x55, 0xc9,
0xdc, 0x2b, 0x16, 0xfe, 0xbd, 0xa5, 0xfc, 0x0a, 0xff, 0xd2, 0xf5, 0xbf, 0xa6, 0xa1, 0x92, 0x9c,
0x83, 0xc9, 0x4f, 0xe0, 0x4a, 0xf8, 0xd2, 0xea, 0x31, 0x5f, 0x7b, 0x61, 0xba, 0x82, 0xce, 0x33,
0x5d, 0x4e, 0x92, 0xd1, 0x49, 0xec, 0x06, 0x28, 0x7c, 0xbd, 0xff, 0x05, 0x62, 0x1e, 0x09, 0x08,
0xe9, 0xc2, 0x0d, 0x4c, 0x19, 0xce, 0x9a, 0xf6, 0x58, 0x77, 0xc7, 0xda, 0xf2, 0x73, 0x81, 0xa6,
0x1b, 0xc8, 0x03, 0xcf, 0x91, 0x9d, 0x24, 0xf2, 0x72, 0xcd, 0x76, 0x06, 0x01, 0x78, 0x59, 0x62,
0x9b, 0x01, 0x74, 0x85, 0x35, 0x99, 0xf3, 0x58, 0x83, 0xb3, 0xd7, 0x4c, 0x9f, 0x23, 0x6d, 0x7c,
0xf7, 0x4c, 0x4c, 0x6f, 0x05, 0x5a, 0x40, 0x81, 0xca, 0xd7, 0xdf, 0xdc, 0x19, 0xc4, 0xf3, 0xf8,
0x8f, 0x0c, 0x94, 0xe3, 0x13, 0x1c, 0x1f, 0x88, 0x0d, 0x51, 0xe6, 0x53, 0xa2, 0x0a, 0xbc, 0xf5,
0x95, 0xf3, 0x5e, 0xa3, 0xc5, 0xeb, 0xff, 0x7e, 0x5e, 0xce, 0x55, 0x54, 0x5a, 0xf2, 0xde, 0xcb,
0xb9, 0xc6, 0xe4, 0xb4, 0x5e, 0xa0, 0xc1, 0x0a, 0x8b, 0x5d, 0xfe, 0xa9, 0x27, 0x7c, 0xe7, 0x85,
0xef, 0x5b, 0x5f, 0xed, 0xfb, 0xf1, 0x40, 0x38, 0x2f, 0x3e, 0x1e, 0x68, 0xc7, 0x3d, 0x7a, 0xd4,
0xec, 0xd2, 0xc0, 0x9c, 0x5c, 0x85, 0xac, 0xa5, 0x7f, 0x7e, 0x96, 0xec, 0x14, 0x42, 0x74, 0xd1,
0xc4, 0xa3, 0x07, 0xfe, 0xc9, 0x23, 0x59, 0x9f, 0x85, 0xe8, 0x1b, 0xa4, 0xfe, 0x7d, 0xc8, 0x89,
0x7c, 0x11, 0x80, 0x20, 0x63, 0xca, 0xf7, 0x48, 0x01, 0xb2, 0xad, 0x1e, 0xe5, 0xf4, 0x47, 0xbe,
0x4b, 0xa9, 0xd6, 0xef, 0xa8, 0x2d, 0xbc, 0x01, 0xf5, 0xf7, 0x21, 0x2f, 0x93, 0xc0, 0xaf, 0x46,
0x94, 0x06, 0x34, 0x92, 0xcb, 0xc0, 0x47, 0x2a, 0xd4, 0x8e, 0x8e, 0x0e, 0x54, 0xaa, 0xa4, 0xe3,
0xc7, 0xfb, 0x97, 0x14, 0x94, 0x62, 0x03, 0x15, 0x6f, 0xe5, 0xba, 0x65, 0x39, 0x2f, 0x34, 0xdd,
0x32, 0xb1, 0x42, 0xc9, 0xf3, 0x01, 0x21, 0x6a, 0x72, 0xc9, 0x45, 0xf3, 0xf7, 0x7f, 0xe1, 0xe6,
0x1f, 0x53, 0xa0, 0xac, 0x0e, 0x63, 0x2b, 0x01, 0xa6, 0xbe, 0xd5, 0x00, 0xff, 0x90, 0x82, 0x4a,
0x72, 0x02, 0x5b, 0x09, 0xef, 0xe6, 0xb7, 0x1a, 0xde, 0xef, 0x53, 0xb0, 0x9d, 0x98, 0xbb, 0xbe,
0x53, 0xd1, 0xfd, 0x2e, 0x03, 0x97, 0x36, 0xd8, 0x61, 0x01, 0x92, 0x03, 0xaa, 0x9c, 0x99, 0x7f,
0x70, 0x91, 0x67, 0x35, 0x78, 0xff, 0xeb, 0xeb, 0xae, 0x1f, 0xcc, 0xb3, 0xd8, 0x2f, 0xcd, 0x31,
0x16, 0x55, 0x73, 0x62, 0xe2, 0xf8, 0x26, 0xdf, 0x58, 0xe4, 0xd4, 0x5a, 0x5d, 0xca, 0xe5, 0xeb,
0xf1, 0xf7, 0x81, 0xcc, 0x1d, 0xcf, 0xf4, 0xcd, 0xe7, 0xfc, 0xf3, 0x5c, 0xf8, 0x22, 0xcd, 0xa7,
0xd8, 0x2c, 0x55, 0x42, 0x4d, 0xc7, 0xf6, 0x23, 0xb4, 0xcd, 0xa6, 0xfa, 0x0a, 0x9a, 0x97, 0xa1,
0x0c, 0x55, 0x42, 0x4d, 0x84, 0xc6, 0x41, 0x73, 0xec, 0x2c, 0xf8, 0x40, 0x20, 0x71, 0xbc, 0xea,
0xa5, 0x68, 0x49, 0xca, 0x22, 0x48, 0x30, 0xb1, 0x2d, 0xdf, 0xe0, 0xcb, 0xb4, 0x24, 0x65, 0x12,
0x72, 0x07, 0xaa, 0xfa, 0x74, 0xea, 0x72, 0xe7, 0xa1, 0x23, 0x39, 0x86, 0x56, 0x22, 0xb1, 0x00,
0xee, 0x3d, 0x86, 0x42, 0x98, 0x07, 0xde, 0x58, 0x78, 0x26, 0xb0, 0xe7, 0x8b, 0xef, 0x28, 0x69,
0xfe, 0x52, 0x6f, 0x87, 0x4a, 0x7c, 0xa8, 0xe9, 0x69, 0xcb, 0x0f, 0x7a, 0x69, 0xd4, 0x17, 0x68,
0xc9, 0xf4, 0xa2, 0x2f, 0x38, 0xf5, 0x2f, 0xb1, 0xbd, 0x26, 0x3f, 0x48, 0x92, 0x36, 0x14, 0x2c,
0x07, 0xf9, 0xc1, 0x2d, 0xe4, 0xd7, 0xf0, 0xbb, 0xaf, 0xf8, 0x86, 0xd9, 0xe8, 0x06, 0x78, 0x1a,
0x59, 0xee, 0xfd, 0x2d, 0x05, 0x85, 0x50, 0x8c, 0x8d, 0x22, 0x3b, 0xd7, 0xfd, 0x53, 0xe1, 0x2e,
0x77, 0x90, 0x56, 0x52, 0x54, 0xac, 0xb9, 0x1c, 0xa7, 0x19, 0x5b, 0x50, 0x20, 0x90, 0xf3, 0x35,
0x3f, 0x57, 0x8b, 0xe9, 0x63, 0x31, 0xe0, 0x3a, 0xb3, 0x19, 0x9e, 0xa4, 0x17, 0x9e, 0x6b, 0x20,
0x6f, 0x05, 0x62, 0xfe, 0x5d, 0xdc, 0x77, 0x75, 0xd3, 0x4a, 0x60, 0xb3, 0x02, 0xab, 0x84, 0x8a,
0x08, 0xbc, 0x0f, 0x57, 0x43, 0xbf, 0x63, 0xe6, 0xeb, 0x38, 0x3c, 0x8f, 0x97, 0x46, 0x79, 0xf1,
0xb5, 0xeb, 0x4a, 0x00, 0x68, 0x07, 0xfa, 0xd0, 0xf6, 0xe0, 0x09, 0x0e, 0xb2, 0xce, 0x6c, 0x35,
0x13, 0x07, 0xca, 0xca, 0x7b, 0x97, 0xf7, 0x49, 0xea, 0x33, 0x58, 0x0e, 0x15, 0x5f, 0xa4, 0x33,
0x87, 0xfd, 0x83, 0x3f, 0xa5, 0xf7, 0x0e, 0xa5, 0x5d, 0x3f, 0xcc, 0x20, 0x65, 0x13, 0x8b, 0x19,
0x3c, 0x3b, 0xff, 0x0d, 0x00, 0x00, 0xff, 0xff, 0x78, 0x42, 0x69, 0x71, 0xb3, 0x18, 0x00, 0x00,
}

View File

@ -127,13 +127,22 @@ func (file *FileDescriptorProto) GetMessage(typeName string) *DescriptorProto {
if msg.GetName() == typeName {
return msg
}
for _, nes := range msg.GetNestedType() {
if nes.GetName() == typeName {
return nes
}
if msg.GetName()+"."+nes.GetName() == typeName {
return nes
}
nes := file.GetNestedMessage(msg, strings.TrimPrefix(typeName, msg.GetName()+"."))
if nes != nil {
return nes
}
}
return nil
}
func (file *FileDescriptorProto) GetNestedMessage(msg *DescriptorProto, typeName string) *DescriptorProto {
for _, nes := range msg.GetNestedType() {
if nes.GetName() == typeName {
return nes
}
res := file.GetNestedMessage(nes, strings.TrimPrefix(typeName, nes.GetName()+"."))
if res != nil {
return res
}
}
return nil

16
vendor/github.com/golang/snappy/.gitignore generated vendored Normal file
View File

@ -0,0 +1,16 @@
cmd/snappytool/snappytool
testdata/bench
# These explicitly listed benchmark data files are for an obsolete version of
# snappy_test.go.
testdata/alice29.txt
testdata/asyoulik.txt
testdata/fireworks.jpeg
testdata/geo.protodata
testdata/html
testdata/html_x_4
testdata/kppkn.gtb
testdata/lcet10.txt
testdata/paper-100k.pdf
testdata/plrabn12.txt
testdata/urls.10K

15
vendor/github.com/golang/snappy/AUTHORS generated vendored Normal file
View File

@ -0,0 +1,15 @@
# This is the official list of Snappy-Go authors for copyright purposes.
# This file is distinct from the CONTRIBUTORS files.
# See the latter for an explanation.
# Names should be added to this file as
# Name or Organization <email address>
# The email address is not required for organizations.
# Please keep the list sorted.
Damian Gryski <dgryski@gmail.com>
Google Inc.
Jan Mercl <0xjnml@gmail.com>
Rodolfo Carvalho <rhcarvalho@gmail.com>
Sebastien Binet <seb.binet@gmail.com>

37
vendor/github.com/golang/snappy/CONTRIBUTORS generated vendored Normal file
View File

@ -0,0 +1,37 @@
# This is the official list of people who can contribute
# (and typically have contributed) code to the Snappy-Go repository.
# The AUTHORS file lists the copyright holders; this file
# lists people. For example, Google employees are listed here
# but not in AUTHORS, because Google holds the copyright.
#
# The submission process automatically checks to make sure
# that people submitting code are listed in this file (by email address).
#
# Names should be added to this file only after verifying that
# the individual or the individual's organization has agreed to
# the appropriate Contributor License Agreement, found here:
#
# http://code.google.com/legal/individual-cla-v1.0.html
# http://code.google.com/legal/corporate-cla-v1.0.html
#
# The agreement for individuals can be filled out on the web.
#
# When adding J Random Contributor's name to this file,
# either J's name or J's organization's name should be
# added to the AUTHORS file, depending on whether the
# individual or corporate CLA was used.
# Names should be added to this file like so:
# Name <email address>
# Please keep the list sorted.
Damian Gryski <dgryski@gmail.com>
Jan Mercl <0xjnml@gmail.com>
Kai Backman <kaib@golang.org>
Marc-Antoine Ruel <maruel@chromium.org>
Nigel Tao <nigeltao@golang.org>
Rob Pike <r@golang.org>
Rodolfo Carvalho <rhcarvalho@gmail.com>
Russ Cox <rsc@golang.org>
Sebastien Binet <seb.binet@gmail.com>

27
vendor/github.com/golang/snappy/LICENSE generated vendored Normal file
View File

@ -0,0 +1,27 @@
Copyright (c) 2011 The Snappy-Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

7
vendor/github.com/golang/snappy/README generated vendored Normal file
View File

@ -0,0 +1,7 @@
The Snappy compression format in the Go programming language.
To download and install from source:
$ go get github.com/golang/snappy
Unless otherwise noted, the Snappy-Go source files are distributed
under the BSD-style license found in the LICENSE file.

241
vendor/github.com/golang/snappy/decode.go generated vendored Normal file
View File

@ -0,0 +1,241 @@
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package snappy
import (
"encoding/binary"
"errors"
"io"
)
var (
// ErrCorrupt reports that the input is invalid.
ErrCorrupt = errors.New("snappy: corrupt input")
// ErrTooLarge reports that the uncompressed length is too large.
ErrTooLarge = errors.New("snappy: decoded block is too large")
// ErrUnsupported reports that the input isn't supported.
ErrUnsupported = errors.New("snappy: unsupported input")
errUnsupportedCopy4Tag = errors.New("snappy: unsupported COPY_4 tag")
errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length")
)
// DecodedLen returns the length of the decoded block.
func DecodedLen(src []byte) (int, error) {
v, _, err := decodedLen(src)
return v, err
}
// decodedLen returns the length of the decoded block and the number of bytes
// that the length header occupied.
func decodedLen(src []byte) (blockLen, headerLen int, err error) {
v, n := binary.Uvarint(src)
if n <= 0 || v > 0xffffffff {
return 0, 0, ErrCorrupt
}
const wordSize = 32 << (^uint(0) >> 32 & 1)
if wordSize == 32 && v > 0x7fffffff {
return 0, 0, ErrTooLarge
}
return int(v), n, nil
}
const (
decodeErrCodeCorrupt = 1
decodeErrCodeUnsupportedLiteralLength = 2
decodeErrCodeUnsupportedCopy4Tag = 3
)
// Decode returns the decoded form of src. The returned slice may be a sub-
// slice of dst if dst was large enough to hold the entire decoded block.
// Otherwise, a newly allocated slice will be returned.
//
// The dst and src must not overlap. It is valid to pass a nil dst.
func Decode(dst, src []byte) ([]byte, error) {
dLen, s, err := decodedLen(src)
if err != nil {
return nil, err
}
if dLen <= len(dst) {
dst = dst[:dLen]
} else {
dst = make([]byte, dLen)
}
switch decode(dst, src[s:]) {
case 0:
return dst, nil
case decodeErrCodeUnsupportedLiteralLength:
return nil, errUnsupportedLiteralLength
case decodeErrCodeUnsupportedCopy4Tag:
return nil, errUnsupportedCopy4Tag
}
return nil, ErrCorrupt
}
// NewReader returns a new Reader that decompresses from r, using the framing
// format described at
// https://github.com/google/snappy/blob/master/framing_format.txt
func NewReader(r io.Reader) *Reader {
return &Reader{
r: r,
decoded: make([]byte, maxBlockSize),
buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize),
}
}
// Reader is an io.Reader that can read Snappy-compressed bytes.
type Reader struct {
r io.Reader
err error
decoded []byte
buf []byte
// decoded[i:j] contains decoded bytes that have not yet been passed on.
i, j int
readHeader bool
}
// Reset discards any buffered data, resets all state, and switches the Snappy
// reader to read from r. This permits reusing a Reader rather than allocating
// a new one.
func (r *Reader) Reset(reader io.Reader) {
r.r = reader
r.err = nil
r.i = 0
r.j = 0
r.readHeader = false
}
func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) {
if _, r.err = io.ReadFull(r.r, p); r.err != nil {
if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) {
r.err = ErrCorrupt
}
return false
}
return true
}
// Read satisfies the io.Reader interface.
func (r *Reader) Read(p []byte) (int, error) {
if r.err != nil {
return 0, r.err
}
for {
if r.i < r.j {
n := copy(p, r.decoded[r.i:r.j])
r.i += n
return n, nil
}
if !r.readFull(r.buf[:4], true) {
return 0, r.err
}
chunkType := r.buf[0]
if !r.readHeader {
if chunkType != chunkTypeStreamIdentifier {
r.err = ErrCorrupt
return 0, r.err
}
r.readHeader = true
}
chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
if chunkLen > len(r.buf) {
r.err = ErrUnsupported
return 0, r.err
}
// The chunk types are specified at
// https://github.com/google/snappy/blob/master/framing_format.txt
switch chunkType {
case chunkTypeCompressedData:
// Section 4.2. Compressed data (chunk type 0x00).
if chunkLen < checksumSize {
r.err = ErrCorrupt
return 0, r.err
}
buf := r.buf[:chunkLen]
if !r.readFull(buf, false) {
return 0, r.err
}
checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
buf = buf[checksumSize:]
n, err := DecodedLen(buf)
if err != nil {
r.err = err
return 0, r.err
}
if n > len(r.decoded) {
r.err = ErrCorrupt
return 0, r.err
}
if _, err := Decode(r.decoded, buf); err != nil {
r.err = err
return 0, r.err
}
if crc(r.decoded[:n]) != checksum {
r.err = ErrCorrupt
return 0, r.err
}
r.i, r.j = 0, n
continue
case chunkTypeUncompressedData:
// Section 4.3. Uncompressed data (chunk type 0x01).
if chunkLen < checksumSize {
r.err = ErrCorrupt
return 0, r.err
}
buf := r.buf[:checksumSize]
if !r.readFull(buf, false) {
return 0, r.err
}
checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
// Read directly into r.decoded instead of via r.buf.
n := chunkLen - checksumSize
if n > len(r.decoded) {
r.err = ErrCorrupt
return 0, r.err
}
if !r.readFull(r.decoded[:n], false) {
return 0, r.err
}
if crc(r.decoded[:n]) != checksum {
r.err = ErrCorrupt
return 0, r.err
}
r.i, r.j = 0, n
continue
case chunkTypeStreamIdentifier:
// Section 4.1. Stream identifier (chunk type 0xff).
if chunkLen != len(magicBody) {
r.err = ErrCorrupt
return 0, r.err
}
if !r.readFull(r.buf[:len(magicBody)], false) {
return 0, r.err
}
for i := 0; i < len(magicBody); i++ {
if r.buf[i] != magicBody[i] {
r.err = ErrCorrupt
return 0, r.err
}
}
continue
}
if chunkType <= 0x7f {
// Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
r.err = ErrUnsupported
return 0, r.err
}
// Section 4.4 Padding (chunk type 0xfe).
// Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
if !r.readFull(r.buf[:chunkLen], false) {
return 0, r.err
}
}
}

14
vendor/github.com/golang/snappy/decode_amd64.go generated vendored Normal file
View File

@ -0,0 +1,14 @@
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !appengine
// +build gc
// +build !noasm
package snappy
// decode has the same semantics as in decode_other.go.
//
//go:noescape
func decode(dst, src []byte) int

476
vendor/github.com/golang/snappy/decode_amd64.s generated vendored Normal file
View File

@ -0,0 +1,476 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !appengine
// +build gc
// +build !noasm
#include "textflag.h"
// The asm code generally follows the pure Go code in decode_other.go, except
// where marked with a "!!!".
// func decode(dst, src []byte) int
//
// All local variables fit into registers. The non-zero stack size is only to
// spill registers and push args when issuing a CALL. The register allocation:
// - AX scratch
// - BX scratch
// - CX length or x
// - DX offset
// - SI &src[s]
// - DI &dst[d]
// + R8 dst_base
// + R9 dst_len
// + R10 dst_base + dst_len
// + R11 src_base
// + R12 src_len
// + R13 src_base + src_len
// - R14 used by doCopy
// - R15 used by doCopy
//
// The registers R8-R13 (marked with a "+") are set at the start of the
// function, and after a CALL returns, and are not otherwise modified.
//
// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI.
// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI.
TEXT ·decode(SB), NOSPLIT, $48-56
// Initialize SI, DI and R8-R13.
MOVQ dst_base+0(FP), R8
MOVQ dst_len+8(FP), R9
MOVQ R8, DI
MOVQ R8, R10
ADDQ R9, R10
MOVQ src_base+24(FP), R11
MOVQ src_len+32(FP), R12
MOVQ R11, SI
MOVQ R11, R13
ADDQ R12, R13
loop:
// for s < len(src)
CMPQ SI, R13
JEQ end
// CX = uint32(src[s])
//
// switch src[s] & 0x03
MOVBLZX (SI), CX
MOVL CX, BX
ANDL $3, BX
CMPL BX, $1
JAE tagCopy
// ----------------------------------------
// The code below handles literal tags.
// case tagLiteral:
// x := uint32(src[s] >> 2)
// switch
SHRL $2, CX
CMPL CX, $60
JAE tagLit60Plus
// case x < 60:
// s++
INCQ SI
doLit:
// This is the end of the inner "switch", when we have a literal tag.
//
// We assume that CX == x and x fits in a uint32, where x is the variable
// used in the pure Go decode_other.go code.
// length = int(x) + 1
//
// Unlike the pure Go code, we don't need to check if length <= 0 because
// CX can hold 64 bits, so the increment cannot overflow.
INCQ CX
// Prepare to check if copying length bytes will run past the end of dst or
// src.
//
// AX = len(dst) - d
// BX = len(src) - s
MOVQ R10, AX
SUBQ DI, AX
MOVQ R13, BX
SUBQ SI, BX
// !!! Try a faster technique for short (16 or fewer bytes) copies.
//
// if length > 16 || len(dst)-d < 16 || len(src)-s < 16 {
// goto callMemmove // Fall back on calling runtime·memmove.
// }
//
// The C++ snappy code calls this TryFastAppend. It also checks len(src)-s
// against 21 instead of 16, because it cannot assume that all of its input
// is contiguous in memory and so it needs to leave enough source bytes to
// read the next tag without refilling buffers, but Go's Decode assumes
// contiguousness (the src argument is a []byte).
CMPQ CX, $16
JGT callMemmove
CMPQ AX, $16
JLT callMemmove
CMPQ BX, $16
JLT callMemmove
// !!! Implement the copy from src to dst as a 16-byte load and store.
// (Decode's documentation says that dst and src must not overlap.)
//
// This always copies 16 bytes, instead of only length bytes, but that's
// OK. If the input is a valid Snappy encoding then subsequent iterations
// will fix up the overrun. Otherwise, Decode returns a nil []byte (and a
// non-nil error), so the overrun will be ignored.
//
// Note that on amd64, it is legal and cheap to issue unaligned 8-byte or
// 16-byte loads and stores. This technique probably wouldn't be as
// effective on architectures that are fussier about alignment.
MOVOU 0(SI), X0
MOVOU X0, 0(DI)
// d += length
// s += length
ADDQ CX, DI
ADDQ CX, SI
JMP loop
callMemmove:
// if length > len(dst)-d || length > len(src)-s { etc }
CMPQ CX, AX
JGT errCorrupt
CMPQ CX, BX
JGT errCorrupt
// copy(dst[d:], src[s:s+length])
//
// This means calling runtime·memmove(&dst[d], &src[s], length), so we push
// DI, SI and CX as arguments. Coincidentally, we also need to spill those
// three registers to the stack, to save local variables across the CALL.
MOVQ DI, 0(SP)
MOVQ SI, 8(SP)
MOVQ CX, 16(SP)
MOVQ DI, 24(SP)
MOVQ SI, 32(SP)
MOVQ CX, 40(SP)
CALL runtime·memmove(SB)
// Restore local variables: unspill registers from the stack and
// re-calculate R8-R13.
MOVQ 24(SP), DI
MOVQ 32(SP), SI
MOVQ 40(SP), CX
MOVQ dst_base+0(FP), R8
MOVQ dst_len+8(FP), R9
MOVQ R8, R10
ADDQ R9, R10
MOVQ src_base+24(FP), R11
MOVQ src_len+32(FP), R12
MOVQ R11, R13
ADDQ R12, R13
// d += length
// s += length
ADDQ CX, DI
ADDQ CX, SI
JMP loop
tagLit60Plus:
// !!! This fragment does the
//
// s += x - 58; if uint(s) > uint(len(src)) { etc }
//
// checks. In the asm version, we code it once instead of once per switch case.
ADDQ CX, SI
SUBQ $58, SI
MOVQ SI, BX
SUBQ R11, BX
CMPQ BX, R12
JA errCorrupt
// case x == 60:
CMPL CX, $61
JEQ tagLit61
JA tagLit62Plus
// x = uint32(src[s-1])
MOVBLZX -1(SI), CX
JMP doLit
tagLit61:
// case x == 61:
// x = uint32(src[s-2]) | uint32(src[s-1])<<8
MOVWLZX -2(SI), CX
JMP doLit
tagLit62Plus:
CMPL CX, $62
JA tagLit63
// case x == 62:
// x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
MOVWLZX -3(SI), CX
MOVBLZX -1(SI), BX
SHLL $16, BX
ORL BX, CX
JMP doLit
tagLit63:
// case x == 63:
// x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
MOVL -4(SI), CX
JMP doLit
// The code above handles literal tags.
// ----------------------------------------
// The code below handles copy tags.
tagCopy2:
// case tagCopy2:
// s += 3
ADDQ $3, SI
// if uint(s) > uint(len(src)) { etc }
MOVQ SI, BX
SUBQ R11, BX
CMPQ BX, R12
JA errCorrupt
// length = 1 + int(src[s-3])>>2
SHRQ $2, CX
INCQ CX
// offset = int(src[s-2]) | int(src[s-1])<<8
MOVWQZX -2(SI), DX
JMP doCopy
tagCopy:
// We have a copy tag. We assume that:
// - BX == src[s] & 0x03
// - CX == src[s]
CMPQ BX, $2
JEQ tagCopy2
JA errUC4T
// case tagCopy1:
// s += 2
ADDQ $2, SI
// if uint(s) > uint(len(src)) { etc }
MOVQ SI, BX
SUBQ R11, BX
CMPQ BX, R12
JA errCorrupt
// offset = int(src[s-2])&0xe0<<3 | int(src[s-1])
MOVQ CX, DX
ANDQ $0xe0, DX
SHLQ $3, DX
MOVBQZX -1(SI), BX
ORQ BX, DX
// length = 4 + int(src[s-2])>>2&0x7
SHRQ $2, CX
ANDQ $7, CX
ADDQ $4, CX
doCopy:
// This is the end of the outer "switch", when we have a copy tag.
//
// We assume that:
// - CX == length && CX > 0
// - DX == offset
// if offset <= 0 { etc }
CMPQ DX, $0
JLE errCorrupt
// if d < offset { etc }
MOVQ DI, BX
SUBQ R8, BX
CMPQ BX, DX
JLT errCorrupt
// if length > len(dst)-d { etc }
MOVQ R10, BX
SUBQ DI, BX
CMPQ CX, BX
JGT errCorrupt
// forwardCopy(dst[d:d+length], dst[d-offset:]); d += length
//
// Set:
// - R14 = len(dst)-d
// - R15 = &dst[d-offset]
MOVQ R10, R14
SUBQ DI, R14
MOVQ DI, R15
SUBQ DX, R15
// !!! Try a faster technique for short (16 or fewer bytes) forward copies.
//
// First, try using two 8-byte load/stores, similar to the doLit technique
// above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is
// still OK if offset >= 8. Note that this has to be two 8-byte load/stores
// and not one 16-byte load/store, and the first store has to be before the
// second load, due to the overlap if offset is in the range [8, 16).
//
// if length > 16 || offset < 8 || len(dst)-d < 16 {
// goto slowForwardCopy
// }
// copy 16 bytes
// d += length
CMPQ CX, $16
JGT slowForwardCopy
CMPQ DX, $8
JLT slowForwardCopy
CMPQ R14, $16
JLT slowForwardCopy
MOVQ 0(R15), AX
MOVQ AX, 0(DI)
MOVQ 8(R15), BX
MOVQ BX, 8(DI)
ADDQ CX, DI
JMP loop
slowForwardCopy:
// !!! If the forward copy is longer than 16 bytes, or if offset < 8, we
// can still try 8-byte load stores, provided we can overrun up to 10 extra
// bytes. As above, the overrun will be fixed up by subsequent iterations
// of the outermost loop.
//
// The C++ snappy code calls this technique IncrementalCopyFastPath. Its
// commentary says:
//
// ----
//
// The main part of this loop is a simple copy of eight bytes at a time
// until we've copied (at least) the requested amount of bytes. However,
// if d and d-offset are less than eight bytes apart (indicating a
// repeating pattern of length < 8), we first need to expand the pattern in
// order to get the correct results. For instance, if the buffer looks like
// this, with the eight-byte <d-offset> and <d> patterns marked as
// intervals:
//
// abxxxxxxxxxxxx
// [------] d-offset
// [------] d
//
// a single eight-byte copy from <d-offset> to <d> will repeat the pattern
// once, after which we can move <d> two bytes without moving <d-offset>:
//
// ababxxxxxxxxxx
// [------] d-offset
// [------] d
//
// and repeat the exercise until the two no longer overlap.
//
// This allows us to do very well in the special case of one single byte
// repeated many times, without taking a big hit for more general cases.
//
// The worst case of extra writing past the end of the match occurs when
// offset == 1 and length == 1; the last copy will read from byte positions
// [0..7] and write to [4..11], whereas it was only supposed to write to
// position 1. Thus, ten excess bytes.
//
// ----
//
// That "10 byte overrun" worst case is confirmed by Go's
// TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy
// and finishSlowForwardCopy algorithm.
//
// if length > len(dst)-d-10 {
// goto verySlowForwardCopy
// }
SUBQ $10, R14
CMPQ CX, R14
JGT verySlowForwardCopy
makeOffsetAtLeast8:
// !!! As above, expand the pattern so that offset >= 8 and we can use
// 8-byte load/stores.
//
// for offset < 8 {
// copy 8 bytes from dst[d-offset:] to dst[d:]
// length -= offset
// d += offset
// offset += offset
// // The two previous lines together means that d-offset, and therefore
// // R15, is unchanged.
// }
CMPQ DX, $8
JGE fixUpSlowForwardCopy
MOVQ (R15), BX
MOVQ BX, (DI)
SUBQ DX, CX
ADDQ DX, DI
ADDQ DX, DX
JMP makeOffsetAtLeast8
fixUpSlowForwardCopy:
// !!! Add length (which might be negative now) to d (implied by DI being
// &dst[d]) so that d ends up at the right place when we jump back to the
// top of the loop. Before we do that, though, we save DI to AX so that, if
// length is positive, copying the remaining length bytes will write to the
// right place.
MOVQ DI, AX
ADDQ CX, DI
finishSlowForwardCopy:
// !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative
// length means that we overrun, but as above, that will be fixed up by
// subsequent iterations of the outermost loop.
CMPQ CX, $0
JLE loop
MOVQ (R15), BX
MOVQ BX, (AX)
ADDQ $8, R15
ADDQ $8, AX
SUBQ $8, CX
JMP finishSlowForwardCopy
verySlowForwardCopy:
// verySlowForwardCopy is a simple implementation of forward copy. In C
// parlance, this is a do/while loop instead of a while loop, since we know
// that length > 0. In Go syntax:
//
// for {
// dst[d] = dst[d - offset]
// d++
// length--
// if length == 0 {
// break
// }
// }
MOVB (R15), BX
MOVB BX, (DI)
INCQ R15
INCQ DI
DECQ CX
JNZ verySlowForwardCopy
JMP loop
// The code above handles copy tags.
// ----------------------------------------
end:
// This is the end of the "for s < len(src)".
//
// if d != len(dst) { etc }
CMPQ DI, R10
JNE errCorrupt
// return 0
MOVQ $0, ret+48(FP)
RET
errCorrupt:
// return decodeErrCodeCorrupt
MOVQ $1, ret+48(FP)
RET
errUC4T:
// return decodeErrCodeUnsupportedCopy4Tag
MOVQ $3, ret+48(FP)
RET

96
vendor/github.com/golang/snappy/decode_other.go generated vendored Normal file
View File

@ -0,0 +1,96 @@
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !amd64 appengine !gc noasm
package snappy
// decode writes the decoding of src to dst. It assumes that the varint-encoded
// length of the decompressed bytes has already been read, and that len(dst)
// equals that length.
//
// It returns 0 on success or a decodeErrCodeXxx error code on failure.
func decode(dst, src []byte) int {
var d, s, offset, length int
for s < len(src) {
switch src[s] & 0x03 {
case tagLiteral:
x := uint32(src[s] >> 2)
switch {
case x < 60:
s++
case x == 60:
s += 2
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
return decodeErrCodeCorrupt
}
x = uint32(src[s-1])
case x == 61:
s += 3
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
return decodeErrCodeCorrupt
}
x = uint32(src[s-2]) | uint32(src[s-1])<<8
case x == 62:
s += 4
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
return decodeErrCodeCorrupt
}
x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
case x == 63:
s += 5
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
return decodeErrCodeCorrupt
}
x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
}
length = int(x) + 1
if length <= 0 {
return decodeErrCodeUnsupportedLiteralLength
}
if length > len(dst)-d || length > len(src)-s {
return decodeErrCodeCorrupt
}
copy(dst[d:], src[s:s+length])
d += length
s += length
continue
case tagCopy1:
s += 2
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
return decodeErrCodeCorrupt
}
length = 4 + int(src[s-2])>>2&0x7
offset = int(src[s-2])&0xe0<<3 | int(src[s-1])
case tagCopy2:
s += 3
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
return decodeErrCodeCorrupt
}
length = 1 + int(src[s-3])>>2
offset = int(src[s-2]) | int(src[s-1])<<8
case tagCopy4:
return decodeErrCodeUnsupportedCopy4Tag
}
if offset <= 0 || d < offset || length > len(dst)-d {
return decodeErrCodeCorrupt
}
// Copy from an earlier sub-slice of dst to a later sub-slice. Unlike
// the built-in copy function, this byte-by-byte copy always runs
// forwards, even if the slices overlap. Conceptually, this is:
//
// d += forwardCopy(dst[d:d+length], dst[d-offset:])
for end := d + length; d != end; d++ {
dst[d] = dst[d-offset]
}
}
if d != len(dst) {
return decodeErrCodeCorrupt
}
return 0
}

285
vendor/github.com/golang/snappy/encode.go generated vendored Normal file
View File

@ -0,0 +1,285 @@
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package snappy
import (
"encoding/binary"
"errors"
"io"
)
// Encode returns the encoded form of src. The returned slice may be a sub-
// slice of dst if dst was large enough to hold the entire encoded block.
// Otherwise, a newly allocated slice will be returned.
//
// The dst and src must not overlap. It is valid to pass a nil dst.
func Encode(dst, src []byte) []byte {
if n := MaxEncodedLen(len(src)); n < 0 {
panic(ErrTooLarge)
} else if len(dst) < n {
dst = make([]byte, n)
}
// The block starts with the varint-encoded length of the decompressed bytes.
d := binary.PutUvarint(dst, uint64(len(src)))
for len(src) > 0 {
p := src
src = nil
if len(p) > maxBlockSize {
p, src = p[:maxBlockSize], p[maxBlockSize:]
}
if len(p) < minNonLiteralBlockSize {
d += emitLiteral(dst[d:], p)
} else {
d += encodeBlock(dst[d:], p)
}
}
return dst[:d]
}
// inputMargin is the minimum number of extra input bytes to keep, inside
// encodeBlock's inner loop. On some architectures, this margin lets us
// implement a fast path for emitLiteral, where the copy of short (<= 16 byte)
// literals can be implemented as a single load to and store from a 16-byte
// register. That literal's actual length can be as short as 1 byte, so this
// can copy up to 15 bytes too much, but that's OK as subsequent iterations of
// the encoding loop will fix up the copy overrun, and this inputMargin ensures
// that we don't overrun the dst and src buffers.
const inputMargin = 16 - 1
// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that
// could be encoded with a copy tag. This is the minimum with respect to the
// algorithm used by encodeBlock, not a minimum enforced by the file format.
//
// The encoded output must start with at least a 1 byte literal, as there are
// no previous bytes to copy. A minimal (1 byte) copy after that, generated
// from an emitCopy call in encodeBlock's main loop, would require at least
// another inputMargin bytes, for the reason above: we want any emitLiteral
// calls inside encodeBlock's main loop to use the fast path if possible, which
// requires being able to overrun by inputMargin bytes. Thus,
// minNonLiteralBlockSize equals 1 + 1 + inputMargin.
//
// The C++ code doesn't use this exact threshold, but it could, as discussed at
// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion
// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an
// optimization. It should not affect the encoded form. This is tested by
// TestSameEncodingAsCppShortCopies.
const minNonLiteralBlockSize = 1 + 1 + inputMargin
// MaxEncodedLen returns the maximum length of a snappy block, given its
// uncompressed length.
//
// It will return a negative value if srcLen is too large to encode.
func MaxEncodedLen(srcLen int) int {
n := uint64(srcLen)
if n > 0xffffffff {
return -1
}
// Compressed data can be defined as:
// compressed := item* literal*
// item := literal* copy
//
// The trailing literal sequence has a space blowup of at most 62/60
// since a literal of length 60 needs one tag byte + one extra byte
// for length information.
//
// Item blowup is trickier to measure. Suppose the "copy" op copies
// 4 bytes of data. Because of a special check in the encoding code,
// we produce a 4-byte copy only if the offset is < 65536. Therefore
// the copy op takes 3 bytes to encode, and this type of item leads
// to at most the 62/60 blowup for representing literals.
//
// Suppose the "copy" op copies 5 bytes of data. If the offset is big
// enough, it will take 5 bytes to encode the copy op. Therefore the
// worst case here is a one-byte literal followed by a five-byte copy.
// That is, 6 bytes of input turn into 7 bytes of "compressed" data.
//
// This last factor dominates the blowup, so the final estimate is:
n = 32 + n + n/6
if n > 0xffffffff {
return -1
}
return int(n)
}
var errClosed = errors.New("snappy: Writer is closed")
// NewWriter returns a new Writer that compresses to w.
//
// The Writer returned does not buffer writes. There is no need to Flush or
// Close such a Writer.
//
// Deprecated: the Writer returned is not suitable for many small writes, only
// for few large writes. Use NewBufferedWriter instead, which is efficient
// regardless of the frequency and shape of the writes, and remember to Close
// that Writer when done.
func NewWriter(w io.Writer) *Writer {
return &Writer{
w: w,
obuf: make([]byte, obufLen),
}
}
// NewBufferedWriter returns a new Writer that compresses to w, using the
// framing format described at
// https://github.com/google/snappy/blob/master/framing_format.txt
//
// The Writer returned buffers writes. Users must call Close to guarantee all
// data has been forwarded to the underlying io.Writer. They may also call
// Flush zero or more times before calling Close.
func NewBufferedWriter(w io.Writer) *Writer {
return &Writer{
w: w,
ibuf: make([]byte, 0, maxBlockSize),
obuf: make([]byte, obufLen),
}
}
// Writer is an io.Writer than can write Snappy-compressed bytes.
type Writer struct {
w io.Writer
err error
// ibuf is a buffer for the incoming (uncompressed) bytes.
//
// Its use is optional. For backwards compatibility, Writers created by the
// NewWriter function have ibuf == nil, do not buffer incoming bytes, and
// therefore do not need to be Flush'ed or Close'd.
ibuf []byte
// obuf is a buffer for the outgoing (compressed) bytes.
obuf []byte
// wroteStreamHeader is whether we have written the stream header.
wroteStreamHeader bool
}
// Reset discards the writer's state and switches the Snappy writer to write to
// w. This permits reusing a Writer rather than allocating a new one.
func (w *Writer) Reset(writer io.Writer) {
w.w = writer
w.err = nil
if w.ibuf != nil {
w.ibuf = w.ibuf[:0]
}
w.wroteStreamHeader = false
}
// Write satisfies the io.Writer interface.
func (w *Writer) Write(p []byte) (nRet int, errRet error) {
if w.ibuf == nil {
// Do not buffer incoming bytes. This does not perform or compress well
// if the caller of Writer.Write writes many small slices. This
// behavior is therefore deprecated, but still supported for backwards
// compatibility with code that doesn't explicitly Flush or Close.
return w.write(p)
}
// The remainder of this method is based on bufio.Writer.Write from the
// standard library.
for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil {
var n int
if len(w.ibuf) == 0 {
// Large write, empty buffer.
// Write directly from p to avoid copy.
n, _ = w.write(p)
} else {
n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
w.ibuf = w.ibuf[:len(w.ibuf)+n]
w.Flush()
}
nRet += n
p = p[n:]
}
if w.err != nil {
return nRet, w.err
}
n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
w.ibuf = w.ibuf[:len(w.ibuf)+n]
nRet += n
return nRet, nil
}
func (w *Writer) write(p []byte) (nRet int, errRet error) {
if w.err != nil {
return 0, w.err
}
for len(p) > 0 {
obufStart := len(magicChunk)
if !w.wroteStreamHeader {
w.wroteStreamHeader = true
copy(w.obuf, magicChunk)
obufStart = 0
}
var uncompressed []byte
if len(p) > maxBlockSize {
uncompressed, p = p[:maxBlockSize], p[maxBlockSize:]
} else {
uncompressed, p = p, nil
}
checksum := crc(uncompressed)
// Compress the buffer, discarding the result if the improvement
// isn't at least 12.5%.
compressed := Encode(w.obuf[obufHeaderLen:], uncompressed)
chunkType := uint8(chunkTypeCompressedData)
chunkLen := 4 + len(compressed)
obufEnd := obufHeaderLen + len(compressed)
if len(compressed) >= len(uncompressed)-len(uncompressed)/8 {
chunkType = chunkTypeUncompressedData
chunkLen = 4 + len(uncompressed)
obufEnd = obufHeaderLen
}
// Fill in the per-chunk header that comes before the body.
w.obuf[len(magicChunk)+0] = chunkType
w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0)
w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8)
w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16)
w.obuf[len(magicChunk)+4] = uint8(checksum >> 0)
w.obuf[len(magicChunk)+5] = uint8(checksum >> 8)
w.obuf[len(magicChunk)+6] = uint8(checksum >> 16)
w.obuf[len(magicChunk)+7] = uint8(checksum >> 24)
if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil {
w.err = err
return nRet, err
}
if chunkType == chunkTypeUncompressedData {
if _, err := w.w.Write(uncompressed); err != nil {
w.err = err
return nRet, err
}
}
nRet += len(uncompressed)
}
return nRet, nil
}
// Flush flushes the Writer to its underlying io.Writer.
func (w *Writer) Flush() error {
if w.err != nil {
return w.err
}
if len(w.ibuf) == 0 {
return nil
}
w.write(w.ibuf)
w.ibuf = w.ibuf[:0]
return w.err
}
// Close calls Flush and then closes the Writer.
func (w *Writer) Close() error {
w.Flush()
ret := w.err
if w.err == nil {
w.err = errClosed
}
return ret
}

29
vendor/github.com/golang/snappy/encode_amd64.go generated vendored Normal file
View File

@ -0,0 +1,29 @@
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !appengine
// +build gc
// +build !noasm
package snappy
// emitLiteral has the same semantics as in encode_other.go.
//
//go:noescape
func emitLiteral(dst, lit []byte) int
// emitCopy has the same semantics as in encode_other.go.
//
//go:noescape
func emitCopy(dst []byte, offset, length int) int
// extendMatch has the same semantics as in encode_other.go.
//
//go:noescape
func extendMatch(src []byte, i, j int) int
// encodeBlock has the same semantics as in encode_other.go.
//
//go:noescape
func encodeBlock(dst, src []byte) (d int)

608
vendor/github.com/golang/snappy/encode_amd64.s generated vendored Normal file
View File

@ -0,0 +1,608 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !appengine
// +build gc
// +build !noasm
#include "textflag.h"
// TODO: figure out why the XXX lines compile with Go 1.4 and Go tip but not
// Go 1.6.
//
// This is https://github.com/golang/snappy/issues/29
// The asm code generally follows the pure Go code in encode_other.go, except
// where marked with a "!!!".
// ----------------------------------------------------------------------------
// func emitLiteral(dst, lit []byte) int
//
// All local variables fit into registers. The register allocation:
// - AX return value
// - BX n
// - CX len(lit)
// - SI &lit[0]
// - DI &dst[i]
//
// The 24 bytes of stack space is to call runtime·memmove.
TEXT ·emitLiteral(SB), NOSPLIT, $24-56
MOVQ dst_base+0(FP), DI
MOVQ lit_base+24(FP), SI
MOVQ lit_len+32(FP), CX
MOVQ CX, AX
MOVL CX, BX
SUBL $1, BX
CMPL BX, $60
JLT oneByte
CMPL BX, $256
JLT twoBytes
threeBytes:
MOVB $0xf4, 0(DI)
MOVW BX, 1(DI)
ADDQ $3, DI
ADDQ $3, AX
JMP emitLiteralEnd
twoBytes:
MOVB $0xf0, 0(DI)
MOVB BX, 1(DI)
ADDQ $2, DI
ADDQ $2, AX
JMP emitLiteralEnd
oneByte:
SHLB $2, BX
MOVB BX, 0(DI)
ADDQ $1, DI
ADDQ $1, AX
emitLiteralEnd:
MOVQ AX, ret+48(FP)
// copy(dst[i:], lit)
//
// This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
// DI, SI and CX as arguments.
MOVQ DI, 0(SP)
MOVQ SI, 8(SP)
MOVQ CX, 16(SP)
CALL runtime·memmove(SB)
RET
// ----------------------------------------------------------------------------
// func emitCopy(dst []byte, offset, length int) int
//
// All local variables fit into registers. The register allocation:
// - BX offset
// - CX length
// - SI &dst[0]
// - DI &dst[i]
TEXT ·emitCopy(SB), NOSPLIT, $0-48
MOVQ dst_base+0(FP), DI
MOVQ DI, SI
MOVQ offset+24(FP), BX
MOVQ length+32(FP), CX
loop0:
// for length >= 68 { etc }
CMPL CX, $68
JLT step1
// Emit a length 64 copy, encoded as 3 bytes.
MOVB $0xfe, 0(DI)
MOVW BX, 1(DI)
ADDQ $3, DI
SUBL $64, CX
JMP loop0
step1:
// if length > 64 { etc }
CMPL CX, $64
JLE step2
// Emit a length 60 copy, encoded as 3 bytes.
MOVB $0xee, 0(DI)
MOVW BX, 1(DI)
ADDQ $3, DI
SUBL $60, CX
step2:
// if length >= 12 || offset >= 2048 { goto step3 }
CMPL CX, $12
JGE step3
CMPL BX, $2048
JGE step3
// Emit the remaining copy, encoded as 2 bytes.
MOVB BX, 1(DI)
SHRL $8, BX
SHLB $5, BX
SUBB $4, CX
SHLB $2, CX
ORB CX, BX
ORB $1, BX
MOVB BX, 0(DI)
ADDQ $2, DI
// Return the number of bytes written.
SUBQ SI, DI
MOVQ DI, ret+40(FP)
RET
step3:
// Emit the remaining copy, encoded as 3 bytes.
SUBL $1, CX
SHLB $2, CX
ORB $2, CX
MOVB CX, 0(DI)
MOVW BX, 1(DI)
ADDQ $3, DI
// Return the number of bytes written.
SUBQ SI, DI
MOVQ DI, ret+40(FP)
RET
// ----------------------------------------------------------------------------
// func extendMatch(src []byte, i, j int) int
//
// All local variables fit into registers. The register allocation:
// - CX &src[0]
// - DX &src[len(src)]
// - SI &src[i]
// - DI &src[j]
// - R9 &src[len(src) - 8]
TEXT ·extendMatch(SB), NOSPLIT, $0-48
MOVQ src_base+0(FP), CX
MOVQ src_len+8(FP), DX
MOVQ i+24(FP), SI
MOVQ j+32(FP), DI
ADDQ CX, DX
ADDQ CX, SI
ADDQ CX, DI
MOVQ DX, R9
SUBQ $8, R9
cmp8:
// As long as we are 8 or more bytes before the end of src, we can load and
// compare 8 bytes at a time. If those 8 bytes are equal, repeat.
CMPQ DI, R9
JA cmp1
MOVQ (SI), AX
MOVQ (DI), BX
CMPQ AX, BX
JNE bsf
ADDQ $8, SI
ADDQ $8, DI
JMP cmp8
bsf:
// If those 8 bytes were not equal, XOR the two 8 byte values, and return
// the index of the first byte that differs. The BSF instruction finds the
// least significant 1 bit, the amd64 architecture is little-endian, and
// the shift by 3 converts a bit index to a byte index.
XORQ AX, BX
BSFQ BX, BX
SHRQ $3, BX
ADDQ BX, DI
// Convert from &src[ret] to ret.
SUBQ CX, DI
MOVQ DI, ret+40(FP)
RET
cmp1:
// In src's tail, compare 1 byte at a time.
CMPQ DI, DX
JAE extendMatchEnd
MOVB (SI), AX
MOVB (DI), BX
CMPB AX, BX
JNE extendMatchEnd
ADDQ $1, SI
ADDQ $1, DI
JMP cmp1
extendMatchEnd:
// Convert from &src[ret] to ret.
SUBQ CX, DI
MOVQ DI, ret+40(FP)
RET
// ----------------------------------------------------------------------------
// func encodeBlock(dst, src []byte) (d int)
//
// All local variables fit into registers, other than "var table". The register
// allocation:
// - AX . .
// - BX . .
// - CX 56 shift (note that amd64 shifts by non-immediates must use CX).
// - DX 64 &src[0], tableSize
// - SI 72 &src[s]
// - DI 80 &dst[d]
// - R9 88 sLimit
// - R10 . &src[nextEmit]
// - R11 96 prevHash, currHash, nextHash, offset
// - R12 104 &src[base], skip
// - R13 . &src[nextS]
// - R14 . len(src), bytesBetweenHashLookups, x
// - R15 112 candidate
//
// The second column (56, 64, etc) is the stack offset to spill the registers
// when calling other functions. We could pack this slightly tighter, but it's
// simpler to have a dedicated spill map independent of the function called.
//
// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An
// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill
// local variables (registers) during calls gives 32768 + 56 + 64 = 32888.
TEXT ·encodeBlock(SB), 0, $32888-56
MOVQ dst_base+0(FP), DI
MOVQ src_base+24(FP), SI
MOVQ src_len+32(FP), R14
// shift, tableSize := uint32(32-8), 1<<8
MOVQ $24, CX
MOVQ $256, DX
calcShift:
// for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
// shift--
// }
CMPQ DX, $16384
JGE varTable
CMPQ DX, R14
JGE varTable
SUBQ $1, CX
SHLQ $1, DX
JMP calcShift
varTable:
// var table [maxTableSize]uint16
//
// In the asm code, unlike the Go code, we can zero-initialize only the
// first tableSize elements. Each uint16 element is 2 bytes and each MOVOU
// writes 16 bytes, so we can do only tableSize/8 writes instead of the
// 2048 writes that would zero-initialize all of table's 32768 bytes.
SHRQ $3, DX
LEAQ table-32768(SP), BX
PXOR X0, X0
memclr:
MOVOU X0, 0(BX)
ADDQ $16, BX
SUBQ $1, DX
JNZ memclr
// !!! DX = &src[0]
MOVQ SI, DX
// sLimit := len(src) - inputMargin
MOVQ R14, R9
SUBQ $15, R9
// !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't
// change for the rest of the function.
MOVQ CX, 56(SP)
MOVQ DX, 64(SP)
MOVQ R9, 88(SP)
// nextEmit := 0
MOVQ DX, R10
// s := 1
ADDQ $1, SI
// nextHash := hash(load32(src, s), shift)
MOVL 0(SI), R11
IMULL $0x1e35a7bd, R11
SHRL CX, R11
outer:
// for { etc }
// skip := 32
MOVQ $32, R12
// nextS := s
MOVQ SI, R13
// candidate := 0
MOVQ $0, R15
inner0:
// for { etc }
// s := nextS
MOVQ R13, SI
// bytesBetweenHashLookups := skip >> 5
MOVQ R12, R14
SHRQ $5, R14
// nextS = s + bytesBetweenHashLookups
ADDQ R14, R13
// skip += bytesBetweenHashLookups
ADDQ R14, R12
// if nextS > sLimit { goto emitRemainder }
MOVQ R13, AX
SUBQ DX, AX
CMPQ AX, R9
JA emitRemainder
// candidate = int(table[nextHash])
// XXX: MOVWQZX table-32768(SP)(R11*2), R15
// XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
BYTE $0x4e
BYTE $0x0f
BYTE $0xb7
BYTE $0x7c
BYTE $0x5c
BYTE $0x78
// table[nextHash] = uint16(s)
MOVQ SI, AX
SUBQ DX, AX
// XXX: MOVW AX, table-32768(SP)(R11*2)
// XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
BYTE $0x66
BYTE $0x42
BYTE $0x89
BYTE $0x44
BYTE $0x5c
BYTE $0x78
// nextHash = hash(load32(src, nextS), shift)
MOVL 0(R13), R11
IMULL $0x1e35a7bd, R11
SHRL CX, R11
// if load32(src, s) != load32(src, candidate) { continue } break
MOVL 0(SI), AX
MOVL (DX)(R15*1), BX
CMPL AX, BX
JNE inner0
fourByteMatch:
// As per the encode_other.go code:
//
// A 4-byte match has been found. We'll later see etc.
// !!! Jump to a fast path for short (<= 16 byte) literals. See the comment
// on inputMargin in encode.go.
MOVQ SI, AX
SUBQ R10, AX
CMPQ AX, $16
JLE emitLiteralFastPath
// d += emitLiteral(dst[d:], src[nextEmit:s])
//
// Push args.
MOVQ DI, 0(SP)
MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative.
MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative.
MOVQ R10, 24(SP)
MOVQ AX, 32(SP)
MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative.
// Spill local variables (registers) onto the stack; call; unspill.
MOVQ SI, 72(SP)
MOVQ DI, 80(SP)
MOVQ R15, 112(SP)
CALL ·emitLiteral(SB)
MOVQ 56(SP), CX
MOVQ 64(SP), DX
MOVQ 72(SP), SI
MOVQ 80(SP), DI
MOVQ 88(SP), R9
MOVQ 112(SP), R15
// Finish the "d +=" part of "d += emitLiteral(etc)".
ADDQ 48(SP), DI
JMP inner1
emitLiteralFastPath:
// !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2".
MOVB AX, BX
SUBB $1, BX
SHLB $2, BX
MOVB BX, (DI)
ADDQ $1, DI
// !!! Implement the copy from lit to dst as a 16-byte load and store.
// (Encode's documentation says that dst and src must not overlap.)
//
// This always copies 16 bytes, instead of only len(lit) bytes, but that's
// OK. Subsequent iterations will fix up the overrun.
//
// Note that on amd64, it is legal and cheap to issue unaligned 8-byte or
// 16-byte loads and stores. This technique probably wouldn't be as
// effective on architectures that are fussier about alignment.
MOVOU 0(R10), X0
MOVOU X0, 0(DI)
ADDQ AX, DI
inner1:
// for { etc }
// base := s
MOVQ SI, R12
// !!! offset := base - candidate
MOVQ R12, R11
SUBQ R15, R11
SUBQ DX, R11
// s = extendMatch(src, candidate+4, s+4)
//
// Push args.
MOVQ DX, 0(SP)
MOVQ src_len+32(FP), R14
MOVQ R14, 8(SP)
MOVQ R14, 16(SP) // Unnecessary, as the callee ignores it, but conservative.
ADDQ $4, R15
MOVQ R15, 24(SP)
ADDQ $4, SI
SUBQ DX, SI
MOVQ SI, 32(SP)
// Spill local variables (registers) onto the stack; call; unspill.
//
// We don't need to unspill CX or R9 as we are just about to call another
// function.
MOVQ DI, 80(SP)
MOVQ R11, 96(SP)
MOVQ R12, 104(SP)
CALL ·extendMatch(SB)
MOVQ 64(SP), DX
MOVQ 80(SP), DI
MOVQ 96(SP), R11
MOVQ 104(SP), R12
// Finish the "s =" part of "s = extendMatch(etc)", remembering that the SI
// register holds &src[s], not s.
MOVQ 40(SP), SI
ADDQ DX, SI
// d += emitCopy(dst[d:], base-candidate, s-base)
//
// Push args.
MOVQ DI, 0(SP)
MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative.
MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative.
MOVQ R11, 24(SP)
MOVQ SI, AX
SUBQ R12, AX
MOVQ AX, 32(SP)
// Spill local variables (registers) onto the stack; call; unspill.
MOVQ SI, 72(SP)
MOVQ DI, 80(SP)
CALL ·emitCopy(SB)
MOVQ 56(SP), CX
MOVQ 64(SP), DX
MOVQ 72(SP), SI
MOVQ 80(SP), DI
MOVQ 88(SP), R9
// Finish the "d +=" part of "d += emitCopy(etc)".
ADDQ 40(SP), DI
// nextEmit = s
MOVQ SI, R10
// if s >= sLimit { goto emitRemainder }
MOVQ SI, AX
SUBQ DX, AX
CMPQ AX, R9
JAE emitRemainder
// As per the encode_other.go code:
//
// We could immediately etc.
// x := load64(src, s-1)
MOVQ -1(SI), R14
// prevHash := hash(uint32(x>>0), shift)
MOVL R14, R11
IMULL $0x1e35a7bd, R11
SHRL CX, R11
// table[prevHash] = uint16(s-1)
MOVQ SI, AX
SUBQ DX, AX
SUBQ $1, AX
// XXX: MOVW AX, table-32768(SP)(R11*2)
// XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
BYTE $0x66
BYTE $0x42
BYTE $0x89
BYTE $0x44
BYTE $0x5c
BYTE $0x78
// currHash := hash(uint32(x>>8), shift)
SHRQ $8, R14
MOVL R14, R11
IMULL $0x1e35a7bd, R11
SHRL CX, R11
// candidate = int(table[currHash])
// XXX: MOVWQZX table-32768(SP)(R11*2), R15
// XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
BYTE $0x4e
BYTE $0x0f
BYTE $0xb7
BYTE $0x7c
BYTE $0x5c
BYTE $0x78
// table[currHash] = uint16(s)
ADDQ $1, AX
// XXX: MOVW AX, table-32768(SP)(R11*2)
// XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
BYTE $0x66
BYTE $0x42
BYTE $0x89
BYTE $0x44
BYTE $0x5c
BYTE $0x78
// if uint32(x>>8) == load32(src, candidate) { continue }
MOVL (DX)(R15*1), BX
CMPL R14, BX
JEQ inner1
// nextHash = hash(uint32(x>>16), shift)
SHRQ $8, R14
MOVL R14, R11
IMULL $0x1e35a7bd, R11
SHRL CX, R11
// s++
ADDQ $1, SI
// break out of the inner1 for loop, i.e. continue the outer loop.
JMP outer
emitRemainder:
// if nextEmit < len(src) { etc }
MOVQ src_len+32(FP), AX
ADDQ DX, AX
CMPQ R10, AX
JEQ encodeBlockEnd
// d += emitLiteral(dst[d:], src[nextEmit:])
//
// Push args.
MOVQ DI, 0(SP)
MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative.
MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative.
MOVQ R10, 24(SP)
SUBQ R10, AX
MOVQ AX, 32(SP)
MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative.
// Spill local variables (registers) onto the stack; call; unspill.
MOVQ DI, 80(SP)
CALL ·emitLiteral(SB)
MOVQ 80(SP), DI
// Finish the "d +=" part of "d += emitLiteral(etc)".
ADDQ 48(SP), DI
encodeBlockEnd:
MOVQ dst_base+0(FP), AX
SUBQ AX, DI
MOVQ DI, d+48(FP)
RET

238
vendor/github.com/golang/snappy/encode_other.go generated vendored Normal file
View File

@ -0,0 +1,238 @@
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !amd64 appengine !gc noasm
package snappy
func load32(b []byte, i int) uint32 {
b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line.
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
}
func load64(b []byte, i int) uint64 {
b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line.
return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
}
// emitLiteral writes a literal chunk and returns the number of bytes written.
//
// It assumes that:
// dst is long enough to hold the encoded bytes
// 1 <= len(lit) && len(lit) <= 65536
func emitLiteral(dst, lit []byte) int {
i, n := 0, uint(len(lit)-1)
switch {
case n < 60:
dst[0] = uint8(n)<<2 | tagLiteral
i = 1
case n < 1<<8:
dst[0] = 60<<2 | tagLiteral
dst[1] = uint8(n)
i = 2
default:
dst[0] = 61<<2 | tagLiteral
dst[1] = uint8(n)
dst[2] = uint8(n >> 8)
i = 3
}
return i + copy(dst[i:], lit)
}
// emitCopy writes a copy chunk and returns the number of bytes written.
//
// It assumes that:
// dst is long enough to hold the encoded bytes
// 1 <= offset && offset <= 65535
// 4 <= length && length <= 65535
func emitCopy(dst []byte, offset, length int) int {
i := 0
// The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The
// threshold for this loop is a little higher (at 68 = 64 + 4), and the
// length emitted down below is is a little lower (at 60 = 64 - 4), because
// it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed
// by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as
// a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as
// 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a
// tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an
// encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1.
for length >= 68 {
// Emit a length 64 copy, encoded as 3 bytes.
dst[i+0] = 63<<2 | tagCopy2
dst[i+1] = uint8(offset)
dst[i+2] = uint8(offset >> 8)
i += 3
length -= 64
}
if length > 64 {
// Emit a length 60 copy, encoded as 3 bytes.
dst[i+0] = 59<<2 | tagCopy2
dst[i+1] = uint8(offset)
dst[i+2] = uint8(offset >> 8)
i += 3
length -= 60
}
if length >= 12 || offset >= 2048 {
// Emit the remaining copy, encoded as 3 bytes.
dst[i+0] = uint8(length-1)<<2 | tagCopy2
dst[i+1] = uint8(offset)
dst[i+2] = uint8(offset >> 8)
return i + 3
}
// Emit the remaining copy, encoded as 2 bytes.
dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
dst[i+1] = uint8(offset)
return i + 2
}
// extendMatch returns the largest k such that k <= len(src) and that
// src[i:i+k-j] and src[j:k] have the same contents.
//
// It assumes that:
// 0 <= i && i < j && j <= len(src)
func extendMatch(src []byte, i, j int) int {
for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 {
}
return j
}
func hash(u, shift uint32) uint32 {
return (u * 0x1e35a7bd) >> shift
}
// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
// assumes that the varint-encoded length of the decompressed bytes has already
// been written.
//
// It also assumes that:
// len(dst) >= MaxEncodedLen(len(src)) &&
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
func encodeBlock(dst, src []byte) (d int) {
// Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive.
// The table element type is uint16, as s < sLimit and sLimit < len(src)
// and len(src) <= maxBlockSize and maxBlockSize == 65536.
const (
maxTableSize = 1 << 14
// tableMask is redundant, but helps the compiler eliminate bounds
// checks.
tableMask = maxTableSize - 1
)
shift := uint32(32 - 8)
for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
shift--
}
// In Go, all array elements are zero-initialized, so there is no advantage
// to a smaller tableSize per se. However, it matches the C++ algorithm,
// and in the asm versions of this code, we can get away with zeroing only
// the first tableSize elements.
var table [maxTableSize]uint16
// sLimit is when to stop looking for offset/length copies. The inputMargin
// lets us use a fast path for emitLiteral in the main loop, while we are
// looking for copies.
sLimit := len(src) - inputMargin
// nextEmit is where in src the next emitLiteral should start from.
nextEmit := 0
// The encoded form must start with a literal, as there are no previous
// bytes to copy, so we start looking for hash matches at s == 1.
s := 1
nextHash := hash(load32(src, s), shift)
for {
// Copied from the C++ snappy implementation:
//
// Heuristic match skipping: If 32 bytes are scanned with no matches
// found, start looking only at every other byte. If 32 more bytes are
// scanned (or skipped), look at every third byte, etc.. When a match
// is found, immediately go back to looking at every byte. This is a
// small loss (~5% performance, ~0.1% density) for compressible data
// due to more bookkeeping, but for non-compressible data (such as
// JPEG) it's a huge win since the compressor quickly "realizes" the
// data is incompressible and doesn't bother looking for matches
// everywhere.
//
// The "skip" variable keeps track of how many bytes there are since
// the last match; dividing it by 32 (ie. right-shifting by five) gives
// the number of bytes to move ahead for each iteration.
skip := 32
nextS := s
candidate := 0
for {
s = nextS
bytesBetweenHashLookups := skip >> 5
nextS = s + bytesBetweenHashLookups
skip += bytesBetweenHashLookups
if nextS > sLimit {
goto emitRemainder
}
candidate = int(table[nextHash&tableMask])
table[nextHash&tableMask] = uint16(s)
nextHash = hash(load32(src, nextS), shift)
if load32(src, s) == load32(src, candidate) {
break
}
}
// A 4-byte match has been found. We'll later see if more than 4 bytes
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
// them as literal bytes.
d += emitLiteral(dst[d:], src[nextEmit:s])
// Call emitCopy, and then see if another emitCopy could be our next
// move. Repeat until we find no match for the input immediately after
// what was consumed by the last emitCopy call.
//
// If we exit this loop normally then we need to call emitLiteral next,
// though we don't yet know how big the literal will be. We handle that
// by proceeding to the next iteration of the main loop. We also can
// exit this loop via goto if we get close to exhausting the input.
for {
// Invariant: we have a 4-byte match at s, and no need to emit any
// literal bytes prior to s.
base := s
// Extend the 4-byte match as long as possible.
//
// This is an inlined version of:
// s = extendMatch(src, candidate+4, s+4)
s += 4
for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 {
}
d += emitCopy(dst[d:], base-candidate, s-base)
nextEmit = s
if s >= sLimit {
goto emitRemainder
}
// We could immediately start working at s now, but to improve
// compression we first update the hash table at s-1 and at s. If
// another emitCopy is not our next move, also calculate nextHash
// at s+1. At least on GOARCH=amd64, these three hash calculations
// are faster as one load64 call (with some shifts) instead of
// three load32 calls.
x := load64(src, s-1)
prevHash := hash(uint32(x>>0), shift)
table[prevHash&tableMask] = uint16(s - 1)
currHash := hash(uint32(x>>8), shift)
candidate = int(table[currHash&tableMask])
table[currHash&tableMask] = uint16(s)
if uint32(x>>8) != load32(src, candidate) {
nextHash = hash(uint32(x>>16), shift)
s++
break
}
}
}
emitRemainder:
if nextEmit < len(src) {
d += emitLiteral(dst[d:], src[nextEmit:])
}
return d
}

84
vendor/github.com/golang/snappy/snappy.go generated vendored Normal file
View File

@ -0,0 +1,84 @@
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package snappy implements the snappy block-based compression format.
// It aims for very high speeds and reasonable compression.
//
// The C++ snappy implementation is at https://github.com/google/snappy
package snappy
import (
"hash/crc32"
)
/*
Each encoded block begins with the varint-encoded length of the decoded data,
followed by a sequence of chunks. Chunks begin and end on byte boundaries. The
first byte of each chunk is broken into its 2 least and 6 most significant bits
called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag.
Zero means a literal tag. All other values mean a copy tag.
For literal tags:
- If m < 60, the next 1 + m bytes are literal bytes.
- Otherwise, let n be the little-endian unsigned integer denoted by the next
m - 59 bytes. The next 1 + n bytes after that are literal bytes.
For copy tags, length bytes are copied from offset bytes ago, in the style of
Lempel-Ziv compression algorithms. In particular:
- For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12).
The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10
of the offset. The next byte is bits 0-7 of the offset.
- For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65).
The length is 1 + m. The offset is the little-endian unsigned integer
denoted by the next 2 bytes.
- For l == 3, this tag is a legacy format that is no longer supported.
*/
const (
tagLiteral = 0x00
tagCopy1 = 0x01
tagCopy2 = 0x02
tagCopy4 = 0x03
)
const (
checksumSize = 4
chunkHeaderSize = 4
magicChunk = "\xff\x06\x00\x00" + magicBody
magicBody = "sNaPpY"
// maxBlockSize is the maximum size of the input to encodeBlock. It is not
// part of the wire format per se, but some parts of the encoder assume
// that an offset fits into a uint16.
//
// Also, for the framing format (Writer type instead of Encode function),
// https://github.com/google/snappy/blob/master/framing_format.txt says
// that "the uncompressed data in a chunk must be no longer than 65536
// bytes".
maxBlockSize = 65536
// maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is
// hard coded to be a const instead of a variable, so that obufLen can also
// be a const. Their equivalence is confirmed by
// TestMaxEncodedLenOfMaxBlockSize.
maxEncodedLenOfMaxBlockSize = 76490
obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize
obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize
)
const (
chunkTypeCompressedData = 0x00
chunkTypeUncompressedData = 0x01
chunkTypePadding = 0xfe
chunkTypeStreamIdentifier = 0xff
)
var crcTable = crc32.MakeTable(crc32.Castagnoli)
// crc implements the checksum specified in section 3 of
// https://github.com/google/snappy/blob/master/framing_format.txt
func crc(b []byte) uint32 {
c := crc32.Update(0, crcTable, b)
return uint32(c>>15|c<<17) + 0xa282ead8
}

28
vendor/github.com/youtube/vitess/LICENSE generated vendored Normal file
View File

@ -0,0 +1,28 @@
Copyright 2012, Google Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

75
vendor/github.com/youtube/vitess/go/cgzip/adler32.go generated vendored Normal file
View File

@ -0,0 +1,75 @@
// Copyright 2012, Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cgzip
/*
#cgo CFLAGS: -Werror=implicit
#cgo pkg-config: zlib
#include "zlib.h"
*/
import "C"
import (
"hash"
"unsafe"
)
type adler32Hash struct {
adler C.uLong
}
// an empty buffer has an adler32 of '1' by default, so start with that
// (the go hash/adler32 does the same)
func NewAdler32() hash.Hash32 {
a := &adler32Hash{}
a.Reset()
return a
}
// io.Writer interface
func (a *adler32Hash) Write(p []byte) (n int, err error) {
if len(p) > 0 {
a.adler = C.adler32(a.adler, (*C.Bytef)(unsafe.Pointer(&p[0])), (C.uInt)(len(p)))
}
return len(p), nil
}
// hash.Hash interface
func (a *adler32Hash) Sum(b []byte) []byte {
s := a.Sum32()
b = append(b, byte(s>>24))
b = append(b, byte(s>>16))
b = append(b, byte(s>>8))
b = append(b, byte(s))
return b
}
func (a *adler32Hash) Reset() {
a.adler = C.adler32(0, (*C.Bytef)(unsafe.Pointer(nil)), 0)
}
func (a *adler32Hash) Size() int {
return 4
}
func (a *adler32Hash) BlockSize() int {
return 1
}
// hash.Hash32 interface
func (a *adler32Hash) Sum32() uint32 {
return uint32(a.adler)
}
// helper method for partial checksums. From the zlib.h header:
//
// Combine two Adler-32 checksums into one. For two sequences of bytes, seq1
// and seq2 with lengths len1 and len2, Adler-32 checksums were calculated for
// each, adler1 and adler2. adler32_combine() returns the Adler-32 checksum of
// seq1 and seq2 concatenated, requiring only adler1, adler2, and len2.
func Adler32Combine(adler1, adler2 uint32, len2 int) uint32 {
return uint32(C.adler32_combine(C.uLong(adler1), C.uLong(adler2), C.z_off_t(len2)))
}

75
vendor/github.com/youtube/vitess/go/cgzip/crc32.go generated vendored Normal file
View File

@ -0,0 +1,75 @@
// Copyright 2012, Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cgzip
/*
#cgo CFLAGS: -Werror=implicit
#cgo pkg-config: zlib
#include "zlib.h"
*/
import "C"
import (
"hash"
"unsafe"
)
type crc32Hash struct {
crc C.uLong
}
// an empty buffer has an crc32 of '1' by default, so start with that
// (the go hash/crc32 does the same)
func NewCrc32() hash.Hash32 {
c := &crc32Hash{}
c.Reset()
return c
}
// io.Writer interface
func (a *crc32Hash) Write(p []byte) (n int, err error) {
if len(p) > 0 {
a.crc = C.crc32(a.crc, (*C.Bytef)(unsafe.Pointer(&p[0])), (C.uInt)(len(p)))
}
return len(p), nil
}
// hash.Hash interface
func (a *crc32Hash) Sum(b []byte) []byte {
s := a.Sum32()
b = append(b, byte(s>>24))
b = append(b, byte(s>>16))
b = append(b, byte(s>>8))
b = append(b, byte(s))
return b
}
func (a *crc32Hash) Reset() {
a.crc = C.crc32(0, (*C.Bytef)(unsafe.Pointer(nil)), 0)
}
func (a *crc32Hash) Size() int {
return 4
}
func (a *crc32Hash) BlockSize() int {
return 1
}
// hash.Hash32 interface
func (a *crc32Hash) Sum32() uint32 {
return uint32(a.crc)
}
// helper method for partial checksums. From the zlib.h header:
//
// Combine two CRC-32 checksums into one. For two sequences of bytes, seq1
// and seq2 with lengths len1 and len2, CRC-32 checksums were calculated for
// each, crc1 and crc2. crc32_combine() returns the CRC-32 checksum of
// seq1 and seq2 concatenated, requiring only crc1, crc2, and len2.
func Crc32Combine(crc1, crc2 uint32, len2 int) uint32 {
return uint32(C.crc32_combine(C.uLong(crc1), C.uLong(crc2), C.z_off_t(len2)))
}

6
vendor/github.com/youtube/vitess/go/cgzip/doc.go generated vendored Normal file
View File

@ -0,0 +1,6 @@
// Copyright 2014, Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package cgzip wraps the C library for gzip.
package cgzip

100
vendor/github.com/youtube/vitess/go/cgzip/reader.go generated vendored Normal file
View File

@ -0,0 +1,100 @@
// Copyright 2012, Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cgzip
import "io"
// err starts out as nil
// we will call inflateEnd when we set err to a value:
// - whatever error is returned by the underlying reader
// - io.EOF if Close was called
type reader struct {
r io.Reader
in []byte
strm zstream
err error
skipIn bool
}
func NewReader(r io.Reader) (io.ReadCloser, error) {
return NewReaderBuffer(r, DEFAULT_COMPRESSED_BUFFER_SIZE)
}
func NewReaderBuffer(r io.Reader, bufferSize int) (io.ReadCloser, error) {
z := &reader{r: r, in: make([]byte, bufferSize)}
if err := z.strm.inflateInit(); err != nil {
return nil, err
}
return z, nil
}
func (z *reader) Read(p []byte) (int, error) {
if z.err != nil {
return 0, z.err
}
if len(p) == 0 {
return 0, nil
}
// read and deflate until the output buffer is full
z.strm.setOutBuf(p, len(p))
for {
// if we have no data to inflate, read more
if !z.skipIn && z.strm.availIn() == 0 {
var n int
n, z.err = z.r.Read(z.in)
// If we got data and EOF, pretend we didn't get the
// EOF. That way we will return the right values
// upstream. Note this will trigger another read
// later on, that should return (0, EOF).
if n > 0 && z.err == io.EOF {
z.err = nil
}
// FIXME(alainjobart) this code is not compliant with
// the Reader interface. We should process all the
// data we got from the reader, and then return the
// error, whatever it is.
if (z.err != nil && z.err != io.EOF) || (n == 0 && z.err == io.EOF) {
z.strm.inflateEnd()
return 0, z.err
}
z.strm.setInBuf(z.in, n)
} else {
z.skipIn = false
}
// inflate some
ret, err := z.strm.inflate(zNoFlush)
if err != nil {
z.err = err
z.strm.inflateEnd()
return 0, z.err
}
// if we read something, we're good
have := len(p) - z.strm.availOut()
if have > 0 {
z.skipIn = ret == Z_OK && z.strm.availOut() == 0
return have, z.err
}
}
}
// Close closes the Reader. It does not close the underlying io.Reader.
func (z *reader) Close() error {
if z.err != nil {
if z.err != io.EOF {
return z.err
}
return nil
}
z.strm.inflateEnd()
z.err = io.EOF
return nil
}

144
vendor/github.com/youtube/vitess/go/cgzip/writer.go generated vendored Normal file
View File

@ -0,0 +1,144 @@
// Copyright 2012, Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cgzip
import (
"fmt"
"io"
)
const (
// Allowed flush values
Z_NO_FLUSH = 0
Z_PARTIAL_FLUSH = 1
Z_SYNC_FLUSH = 2
Z_FULL_FLUSH = 3
Z_FINISH = 4
Z_BLOCK = 5
Z_TREES = 6
// Return codes
Z_OK = 0
Z_STREAM_END = 1
Z_NEED_DICT = 2
Z_ERRNO = -1
Z_STREAM_ERROR = -2
Z_DATA_ERROR = -3
Z_MEM_ERROR = -4
Z_BUF_ERROR = -5
Z_VERSION_ERROR = -6
// compression levels
Z_NO_COMPRESSION = 0
Z_BEST_SPEED = 1
Z_BEST_COMPRESSION = 9
Z_DEFAULT_COMPRESSION = -1
// our default buffer size
// most go io functions use 32KB as buffer size, so 32KB
// works well here for compressed data buffer
DEFAULT_COMPRESSED_BUFFER_SIZE = 32 * 1024
)
// err starts out as nil
// we will call deflateEnd when we set err to a value:
// - whatever error is returned by the underlying writer
// - io.EOF if Close was called
type Writer struct {
w io.Writer
out []byte
strm zstream
err error
}
func NewWriter(w io.Writer) *Writer {
z, _ := NewWriterLevelBuffer(w, Z_DEFAULT_COMPRESSION, DEFAULT_COMPRESSED_BUFFER_SIZE)
return z
}
func NewWriterLevel(w io.Writer, level int) (*Writer, error) {
return NewWriterLevelBuffer(w, level, DEFAULT_COMPRESSED_BUFFER_SIZE)
}
func NewWriterLevelBuffer(w io.Writer, level, bufferSize int) (*Writer, error) {
z := &Writer{w: w, out: make([]byte, bufferSize)}
if err := z.strm.deflateInit(level); err != nil {
return nil, err
}
return z, nil
}
// this is the main function: it advances the write with either
// new data or something else to do, like a flush
func (z *Writer) write(p []byte, flush int) int {
if len(p) == 0 {
z.strm.setInBuf(nil, 0)
} else {
z.strm.setInBuf(p, len(p))
}
// we loop until we don't get a full output buffer
// each loop completely writes the output buffer to the underlying
// writer
for {
// deflate one buffer
z.strm.setOutBuf(z.out, len(z.out))
z.strm.deflate(flush)
// write everything
from := 0
have := len(z.out) - int(z.strm.availOut())
for have > 0 {
var n int
n, z.err = z.w.Write(z.out[from:have])
if z.err != nil {
z.strm.deflateEnd()
return 0
}
from += n
have -= n
}
// we stop trying if we get a partial response
if z.strm.availOut() != 0 {
break
}
}
// the library guarantees this
if z.strm.availIn() != 0 {
panic(fmt.Errorf("cgzip: Unexpected error (2)"))
}
return len(p)
}
func (z *Writer) Write(p []byte) (n int, err error) {
if z.err != nil {
return 0, z.err
}
n = z.write(p, Z_NO_FLUSH)
return n, z.err
}
func (z *Writer) Flush() error {
if z.err != nil {
return z.err
}
z.write(nil, Z_SYNC_FLUSH)
return z.err
}
// Calling Close does not close the wrapped io.Writer originally
// passed to NewWriterX.
func (z *Writer) Close() error {
if z.err != nil {
return z.err
}
z.write(nil, Z_FINISH)
if z.err != nil {
return z.err
}
z.strm.deflateEnd()
z.err = io.EOF
return nil
}

Some files were not shown because too many files have changed in this diff Show More