Update Godeps.

This was a mechanical change:
```
make dep-restore
godep update -goversion
make dep-save
```

Signed-off-by: John Sirois <john.sirois@gmail.com>
This commit is contained in:
John Sirois 2016-02-24 13:13:01 -08:00
parent 8046e13683
commit 5896ecff40
445 changed files with 15585 additions and 50884 deletions

387
Godeps/Godeps.json generated
View File

@ -1,6 +1,6 @@
{
"ImportPath": "github.com/docker/machine",
"GoVersion": "go1.5",
"GoVersion": "go1.6",
"Packages": [
"github.com/docker/machine",
"github.com/docker/machine/cmd",
@ -24,12 +24,14 @@
"github.com/docker/machine/drivers/vmwarefusion",
"github.com/docker/machine/drivers/vmwarevcloudair",
"github.com/docker/machine/drivers/vmwarevsphere",
"github.com/docker/machine/drivers/vmwarevsphere/errors",
"github.com/docker/machine/its",
"github.com/docker/machine/its/cli",
"github.com/docker/machine/its/thirdparty",
"github.com/docker/machine/libmachine",
"github.com/docker/machine/libmachine/auth",
"github.com/docker/machine/libmachine/bugsnag",
"github.com/docker/machine/libmachine/cert",
"github.com/docker/machine/libmachine/check",
"github.com/docker/machine/libmachine/crashreport",
"github.com/docker/machine/libmachine/drivers",
"github.com/docker/machine/libmachine/drivers/plugin",
"github.com/docker/machine/libmachine/drivers/plugin/localbinary",
@ -49,37 +51,112 @@
"github.com/docker/machine/libmachine/provider",
"github.com/docker/machine/libmachine/provision",
"github.com/docker/machine/libmachine/provision/pkgaction",
"github.com/docker/machine/libmachine/provision/provisiontest",
"github.com/docker/machine/libmachine/provision/serviceaction",
"github.com/docker/machine/libmachine/shell",
"github.com/docker/machine/libmachine/ssh",
"github.com/docker/machine/libmachine/ssh/sshtest",
"github.com/docker/machine/libmachine/state",
"github.com/docker/machine/libmachine/swarm",
"github.com/docker/machine/libmachine/version",
"github.com/docker/machine/version"
],
"Deps": [
{
"ImportPath": "github.com/bugsnag/bugsnag-go",
"Comment": "v1.0.5-19-g02e9528",
"Rev": "02e952891c52fbcb15f113d90633897355783b6e"
},
{
"ImportPath": "github.com/bugsnag/osext",
"Rev": "0dd3f918b21bec95ace9dc86c7e70266cfc5c702"
},
{
"ImportPath": "github.com/bugsnag/panicwrap",
"Rev": "e5f9854865b9778a45169fc249e99e338d4d6f27"
},
{
"ImportPath": "github.com/MSOpenTech/azure-sdk-for-go",
"Comment": "v1.1-17-g515f3ec",
"Rev": "515f3ec74ce6a5b31e934cefae997c97bd0a1b1e"
},
{
"ImportPath": "github.com/MSOpenTech/azure-sdk-for-go/clients/imageClient",
"Comment": "v1.1-17-g515f3ec",
"Rev": "515f3ec74ce6a5b31e934cefae997c97bd0a1b1e"
},
{
"ImportPath": "github.com/MSOpenTech/azure-sdk-for-go/clients/locationClient",
"Comment": "v1.1-17-g515f3ec",
"Rev": "515f3ec74ce6a5b31e934cefae997c97bd0a1b1e"
},
{
"ImportPath": "github.com/MSOpenTech/azure-sdk-for-go/clients/storageServiceClient",
"Comment": "v1.1-17-g515f3ec",
"Rev": "515f3ec74ce6a5b31e934cefae997c97bd0a1b1e"
},
{
"ImportPath": "github.com/MSOpenTech/azure-sdk-for-go/clients/vmClient",
"Comment": "v1.1-17-g515f3ec",
"Rev": "515f3ec74ce6a5b31e934cefae997c97bd0a1b1e"
},
{
"ImportPath": "github.com/MSOpenTech/azure-sdk-for-go/core/http",
"Comment": "v1.1-17-g515f3ec",
"Rev": "515f3ec74ce6a5b31e934cefae997c97bd0a1b1e"
},
{
"ImportPath": "github.com/MSOpenTech/azure-sdk-for-go/core/tls",
"Comment": "v1.1-17-g515f3ec",
"Rev": "515f3ec74ce6a5b31e934cefae997c97bd0a1b1e"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws",
"Comment": "v1.0.2",
"Rev": "9d7bc2d6ca2ada0468f705f0e9725ca97f8550c8"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/awserr",
"Comment": "v1.0.2",
"Rev": "9d7bc2d6ca2ada0468f705f0e9725ca97f8550c8"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/awsutil",
"Comment": "v1.0.2",
"Rev": "9d7bc2d6ca2ada0468f705f0e9725ca97f8550c8"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/client",
"Comment": "v1.0.2",
"Rev": "9d7bc2d6ca2ada0468f705f0e9725ca97f8550c8"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/client/metadata",
"Comment": "v1.0.2",
"Rev": "9d7bc2d6ca2ada0468f705f0e9725ca97f8550c8"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/corehandlers",
"Comment": "v1.0.2",
"Rev": "9d7bc2d6ca2ada0468f705f0e9725ca97f8550c8"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/credentials",
"Comment": "v1.0.2",
"Rev": "9d7bc2d6ca2ada0468f705f0e9725ca97f8550c8"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds",
"Comment": "v1.0.2",
"Rev": "9d7bc2d6ca2ada0468f705f0e9725ca97f8550c8"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/defaults",
"Comment": "v1.0.2",
"Rev": "9d7bc2d6ca2ada0468f705f0e9725ca97f8550c8"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/ec2metadata",
"Comment": "v1.0.2",
"Rev": "9d7bc2d6ca2ada0468f705f0e9725ca97f8550c8"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/request",
"Comment": "v1.0.2",
"Rev": "9d7bc2d6ca2ada0468f705f0e9725ca97f8550c8"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/session",
"Comment": "v1.0.2",
"Rev": "9d7bc2d6ca2ada0468f705f0e9725ca97f8550c8"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/endpoints",
"Comment": "v1.0.2",
@ -120,6 +197,24 @@
"Comment": "v1.0.2",
"Rev": "9d7bc2d6ca2ada0468f705f0e9725ca97f8550c8"
},
{
"ImportPath": "github.com/bugsnag/bugsnag-go",
"Comment": "v1.0.5-19-g02e9528",
"Rev": "02e952891c52fbcb15f113d90633897355783b6e"
},
{
"ImportPath": "github.com/bugsnag/bugsnag-go/errors",
"Comment": "v1.0.5-19-g02e9528",
"Rev": "02e952891c52fbcb15f113d90633897355783b6e"
},
{
"ImportPath": "github.com/bugsnag/osext",
"Rev": "0dd3f918b21bec95ace9dc86c7e70266cfc5c702"
},
{
"ImportPath": "github.com/bugsnag/panicwrap",
"Rev": "e5f9854865b9778a45169fc249e99e338d4d6f27"
},
{
"ImportPath": "github.com/cenkalti/backoff",
"Rev": "9831e1e25c874e0a0601b6dc43641071414eec7a"
@ -140,15 +235,19 @@
"Rev": "a8a31eff10544860d2188dddabdee4d727545796"
},
{
"ImportPath": "github.com/docker/docker/pkg/units",
"Comment": "v1.5.0",
"Rev": "a8a31eff10544860d2188dddabdee4d727545796"
"ImportPath": "github.com/docker/go-units",
"Comment": "v0.1.0-21-g0bbddae",
"Rev": "0bbddae09c5a5419a8c6dcdd7ff90da3d450393b"
},
{
"ImportPath": "github.com/go-ini/ini",
"Comment": "v0-56-g03e0e7d",
"Rev": "03e0e7d51a13a91c765d8d0161246bc14a38001a"
},
{
"ImportPath": "github.com/golang/protobuf/proto",
"Rev": "3c84672111d91bb5ac31719e112f9f7126a0e26e"
},
{
"ImportPath": "github.com/google/go-querystring/query",
"Rev": "30f7a39f4a218feb5325f3aebc60c32a572a8274"
@ -171,6 +270,101 @@
"Comment": "v1.0.0-558-gce0f487",
"Rev": "ce0f487f6747ab43c4e4404722df25349385bebd"
},
{
"ImportPath": "github.com/rackspace/gophercloud/openstack",
"Comment": "v1.0.0-558-gce0f487",
"Rev": "ce0f487f6747ab43c4e4404722df25349385bebd"
},
{
"ImportPath": "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/floatingip",
"Comment": "v1.0.0-558-gce0f487",
"Rev": "ce0f487f6747ab43c4e4404722df25349385bebd"
},
{
"ImportPath": "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs",
"Comment": "v1.0.0-558-gce0f487",
"Rev": "ce0f487f6747ab43c4e4404722df25349385bebd"
},
{
"ImportPath": "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/startstop",
"Comment": "v1.0.0-558-gce0f487",
"Rev": "ce0f487f6747ab43c4e4404722df25349385bebd"
},
{
"ImportPath": "github.com/rackspace/gophercloud/openstack/compute/v2/flavors",
"Comment": "v1.0.0-558-gce0f487",
"Rev": "ce0f487f6747ab43c4e4404722df25349385bebd"
},
{
"ImportPath": "github.com/rackspace/gophercloud/openstack/compute/v2/images",
"Comment": "v1.0.0-558-gce0f487",
"Rev": "ce0f487f6747ab43c4e4404722df25349385bebd"
},
{
"ImportPath": "github.com/rackspace/gophercloud/openstack/compute/v2/servers",
"Comment": "v1.0.0-558-gce0f487",
"Rev": "ce0f487f6747ab43c4e4404722df25349385bebd"
},
{
"ImportPath": "github.com/rackspace/gophercloud/openstack/identity/v2/tenants",
"Comment": "v1.0.0-558-gce0f487",
"Rev": "ce0f487f6747ab43c4e4404722df25349385bebd"
},
{
"ImportPath": "github.com/rackspace/gophercloud/openstack/identity/v2/tokens",
"Comment": "v1.0.0-558-gce0f487",
"Rev": "ce0f487f6747ab43c4e4404722df25349385bebd"
},
{
"ImportPath": "github.com/rackspace/gophercloud/openstack/identity/v3/tokens",
"Comment": "v1.0.0-558-gce0f487",
"Rev": "ce0f487f6747ab43c4e4404722df25349385bebd"
},
{
"ImportPath": "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/floatingips",
"Comment": "v1.0.0-558-gce0f487",
"Rev": "ce0f487f6747ab43c4e4404722df25349385bebd"
},
{
"ImportPath": "github.com/rackspace/gophercloud/openstack/networking/v2/networks",
"Comment": "v1.0.0-558-gce0f487",
"Rev": "ce0f487f6747ab43c4e4404722df25349385bebd"
},
{
"ImportPath": "github.com/rackspace/gophercloud/openstack/networking/v2/ports",
"Comment": "v1.0.0-558-gce0f487",
"Rev": "ce0f487f6747ab43c4e4404722df25349385bebd"
},
{
"ImportPath": "github.com/rackspace/gophercloud/openstack/utils",
"Comment": "v1.0.0-558-gce0f487",
"Rev": "ce0f487f6747ab43c4e4404722df25349385bebd"
},
{
"ImportPath": "github.com/rackspace/gophercloud/pagination",
"Comment": "v1.0.0-558-gce0f487",
"Rev": "ce0f487f6747ab43c4e4404722df25349385bebd"
},
{
"ImportPath": "github.com/rackspace/gophercloud/rackspace",
"Comment": "v1.0.0-558-gce0f487",
"Rev": "ce0f487f6747ab43c4e4404722df25349385bebd"
},
{
"ImportPath": "github.com/rackspace/gophercloud/rackspace/identity/v2/tokens",
"Comment": "v1.0.0-558-gce0f487",
"Rev": "ce0f487f6747ab43c4e4404722df25349385bebd"
},
{
"ImportPath": "github.com/rackspace/gophercloud/testhelper",
"Comment": "v1.0.0-558-gce0f487",
"Rev": "ce0f487f6747ab43c4e4404722df25349385bebd"
},
{
"ImportPath": "github.com/rackspace/gophercloud/testhelper/client",
"Comment": "v1.0.0-558-gce0f487",
"Rev": "ce0f487f6747ab43c4e4404722df25349385bebd"
},
{
"ImportPath": "github.com/samalba/dockerclient",
"Rev": "f661dd4754aa5c52da85d04b5871ee0e11f4b59c"
@ -192,6 +386,91 @@
"Comment": "v0.0.2",
"Rev": "66a23eaabc61518f91769939ff541886fe1dceef"
},
{
"ImportPath": "github.com/vmware/govcloudair/types/v56",
"Comment": "v0.0.2",
"Rev": "66a23eaabc61518f91769939ff541886fe1dceef"
},
{
"ImportPath": "github.com/vmware/govmomi",
"Comment": "v0.3.0-11-g20c009c",
"Rev": "20c009ce9c493f0c714a9fffa5bda5fb84df2b6c"
},
{
"ImportPath": "github.com/vmware/govmomi/find",
"Comment": "v0.3.0-11-g20c009c",
"Rev": "20c009ce9c493f0c714a9fffa5bda5fb84df2b6c"
},
{
"ImportPath": "github.com/vmware/govmomi/guest",
"Comment": "v0.3.0-11-g20c009c",
"Rev": "20c009ce9c493f0c714a9fffa5bda5fb84df2b6c"
},
{
"ImportPath": "github.com/vmware/govmomi/list",
"Comment": "v0.3.0-11-g20c009c",
"Rev": "20c009ce9c493f0c714a9fffa5bda5fb84df2b6c"
},
{
"ImportPath": "github.com/vmware/govmomi/object",
"Comment": "v0.3.0-11-g20c009c",
"Rev": "20c009ce9c493f0c714a9fffa5bda5fb84df2b6c"
},
{
"ImportPath": "github.com/vmware/govmomi/property",
"Comment": "v0.3.0-11-g20c009c",
"Rev": "20c009ce9c493f0c714a9fffa5bda5fb84df2b6c"
},
{
"ImportPath": "github.com/vmware/govmomi/session",
"Comment": "v0.3.0-11-g20c009c",
"Rev": "20c009ce9c493f0c714a9fffa5bda5fb84df2b6c"
},
{
"ImportPath": "github.com/vmware/govmomi/task",
"Comment": "v0.3.0-11-g20c009c",
"Rev": "20c009ce9c493f0c714a9fffa5bda5fb84df2b6c"
},
{
"ImportPath": "github.com/vmware/govmomi/vim25",
"Comment": "v0.3.0-11-g20c009c",
"Rev": "20c009ce9c493f0c714a9fffa5bda5fb84df2b6c"
},
{
"ImportPath": "github.com/vmware/govmomi/vim25/debug",
"Comment": "v0.3.0-11-g20c009c",
"Rev": "20c009ce9c493f0c714a9fffa5bda5fb84df2b6c"
},
{
"ImportPath": "github.com/vmware/govmomi/vim25/methods",
"Comment": "v0.3.0-11-g20c009c",
"Rev": "20c009ce9c493f0c714a9fffa5bda5fb84df2b6c"
},
{
"ImportPath": "github.com/vmware/govmomi/vim25/mo",
"Comment": "v0.3.0-11-g20c009c",
"Rev": "20c009ce9c493f0c714a9fffa5bda5fb84df2b6c"
},
{
"ImportPath": "github.com/vmware/govmomi/vim25/progress",
"Comment": "v0.3.0-11-g20c009c",
"Rev": "20c009ce9c493f0c714a9fffa5bda5fb84df2b6c"
},
{
"ImportPath": "github.com/vmware/govmomi/vim25/soap",
"Comment": "v0.3.0-11-g20c009c",
"Rev": "20c009ce9c493f0c714a9fffa5bda5fb84df2b6c"
},
{
"ImportPath": "github.com/vmware/govmomi/vim25/types",
"Comment": "v0.3.0-11-g20c009c",
"Rev": "20c009ce9c493f0c714a9fffa5bda5fb84df2b6c"
},
{
"ImportPath": "github.com/vmware/govmomi/vim25/xml",
"Comment": "v0.3.0-11-g20c009c",
"Rev": "20c009ce9c493f0c714a9fffa5bda5fb84df2b6c"
},
{
"ImportPath": "golang.org/x/crypto/curve25519",
"Rev": "beef0f4390813b96e8e68fd78570396d0f4751fc"
@ -200,14 +479,38 @@
"ImportPath": "golang.org/x/crypto/ssh",
"Rev": "beef0f4390813b96e8e68fd78570396d0f4751fc"
},
{
"ImportPath": "golang.org/x/crypto/ssh/terminal",
"Rev": "beef0f4390813b96e8e68fd78570396d0f4751fc"
},
{
"ImportPath": "golang.org/x/net/context",
"Rev": "4f2fc6c1e69d41baf187332ee08fbd2b296f21ed"
},
{
"ImportPath": "golang.org/x/net/context/ctxhttp",
"Rev": "4f2fc6c1e69d41baf187332ee08fbd2b296f21ed"
},
{
"ImportPath": "golang.org/x/oauth2",
"Rev": "442624c9ec9243441e83b374a9e22ac549b5c51d"
},
{
"ImportPath": "golang.org/x/oauth2/google",
"Rev": "442624c9ec9243441e83b374a9e22ac549b5c51d"
},
{
"ImportPath": "golang.org/x/oauth2/internal",
"Rev": "442624c9ec9243441e83b374a9e22ac549b5c51d"
},
{
"ImportPath": "golang.org/x/oauth2/jws",
"Rev": "442624c9ec9243441e83b374a9e22ac549b5c51d"
},
{
"ImportPath": "golang.org/x/oauth2/jwt",
"Rev": "442624c9ec9243441e83b374a9e22ac549b5c51d"
},
{
"ImportPath": "golang.org/x/sys/windows/registry",
"Rev": "d9157a9621b69ad1d8d77a1933590c416593f24f"
@ -224,6 +527,42 @@
"ImportPath": "google.golang.org/api/googleapi",
"Rev": "030d584ade5f79aa2ed0ce067e8f7da50c9a10d5"
},
{
"ImportPath": "google.golang.org/api/googleapi/internal/uritemplates",
"Rev": "030d584ade5f79aa2ed0ce067e8f7da50c9a10d5"
},
{
"ImportPath": "google.golang.org/appengine",
"Rev": "6a436539be38c296a8075a871cc536686b458371"
},
{
"ImportPath": "google.golang.org/appengine/internal",
"Rev": "6a436539be38c296a8075a871cc536686b458371"
},
{
"ImportPath": "google.golang.org/appengine/internal/app_identity",
"Rev": "6a436539be38c296a8075a871cc536686b458371"
},
{
"ImportPath": "google.golang.org/appengine/internal/base",
"Rev": "6a436539be38c296a8075a871cc536686b458371"
},
{
"ImportPath": "google.golang.org/appengine/internal/datastore",
"Rev": "6a436539be38c296a8075a871cc536686b458371"
},
{
"ImportPath": "google.golang.org/appengine/internal/log",
"Rev": "6a436539be38c296a8075a871cc536686b458371"
},
{
"ImportPath": "google.golang.org/appengine/internal/modules",
"Rev": "6a436539be38c296a8075a871cc536686b458371"
},
{
"ImportPath": "google.golang.org/appengine/internal/remote_api",
"Rev": "6a436539be38c296a8075a871cc536686b458371"
},
{
"ImportPath": "google.golang.org/cloud/compute/metadata",
"Rev": "975617b05ea8a58727e6c1a06b6161ff4185a9f2"
@ -231,16 +570,6 @@
{
"ImportPath": "google.golang.org/cloud/internal",
"Rev": "975617b05ea8a58727e6c1a06b6161ff4185a9f2"
},
{
"ImportPath": "github.com/vmware/govmomi",
"Comment": "v0.3.0-11-g20c009c",
"Rev": "20c009ce9c493f0c714a9fffa5bda5fb84df2b6c"
},
{
"ImportPath": "github.com/docker/go-units",
"Comment": "v0.1.0-21-g0bbddae",
"Rev": "0bbddae09c5a5419a8c6dcdd7ff90da3d450393b"
}
]
}

View File

@ -1,206 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file implements CGI from the perspective of a child
// process.
package cgi
import (
"bufio"
"crypto/tls"
"errors"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"net/url"
"os"
"strconv"
"strings"
)
// Request returns the HTTP request as represented in the current
// environment. This assumes the current program is being run
// by a web server in a CGI environment.
// The returned Request's Body is populated, if applicable.
func Request() (*http.Request, error) {
r, err := RequestFromMap(envMap(os.Environ()))
if err != nil {
return nil, err
}
if r.ContentLength > 0 {
r.Body = ioutil.NopCloser(io.LimitReader(os.Stdin, r.ContentLength))
}
return r, nil
}
func envMap(env []string) map[string]string {
m := make(map[string]string)
for _, kv := range env {
if idx := strings.Index(kv, "="); idx != -1 {
m[kv[:idx]] = kv[idx+1:]
}
}
return m
}
// RequestFromMap creates an http.Request from CGI variables.
// The returned Request's Body field is not populated.
func RequestFromMap(params map[string]string) (*http.Request, error) {
r := new(http.Request)
r.Method = params["REQUEST_METHOD"]
if r.Method == "" {
return nil, errors.New("cgi: no REQUEST_METHOD in environment")
}
r.Proto = params["SERVER_PROTOCOL"]
var ok bool
r.ProtoMajor, r.ProtoMinor, ok = http.ParseHTTPVersion(r.Proto)
if !ok {
return nil, errors.New("cgi: invalid SERVER_PROTOCOL version")
}
r.Close = true
r.Trailer = http.Header{}
r.Header = http.Header{}
r.Host = params["HTTP_HOST"]
if lenstr := params["CONTENT_LENGTH"]; lenstr != "" {
clen, err := strconv.ParseInt(lenstr, 10, 64)
if err != nil {
return nil, errors.New("cgi: bad CONTENT_LENGTH in environment: " + lenstr)
}
r.ContentLength = clen
}
if ct := params["CONTENT_TYPE"]; ct != "" {
r.Header.Set("Content-Type", ct)
}
// Copy "HTTP_FOO_BAR" variables to "Foo-Bar" Headers
for k, v := range params {
if !strings.HasPrefix(k, "HTTP_") || k == "HTTP_HOST" {
continue
}
r.Header.Add(strings.Replace(k[5:], "_", "-", -1), v)
}
// TODO: cookies. parsing them isn't exported, though.
uriStr := params["REQUEST_URI"]
if uriStr == "" {
// Fallback to SCRIPT_NAME, PATH_INFO and QUERY_STRING.
uriStr = params["SCRIPT_NAME"] + params["PATH_INFO"]
s := params["QUERY_STRING"]
if s != "" {
uriStr += "?" + s
}
}
// There's apparently a de-facto standard for this.
// http://docstore.mik.ua/orelly/linux/cgi/ch03_02.htm#ch03-35636
if s := params["HTTPS"]; s == "on" || s == "ON" || s == "1" {
r.TLS = &tls.ConnectionState{HandshakeComplete: true}
}
if r.Host != "" {
// Hostname is provided, so we can reasonably construct a URL.
rawurl := r.Host + uriStr
if r.TLS == nil {
rawurl = "http://" + rawurl
} else {
rawurl = "https://" + rawurl
}
url, err := url.Parse(rawurl)
if err != nil {
return nil, errors.New("cgi: failed to parse host and REQUEST_URI into a URL: " + rawurl)
}
r.URL = url
}
// Fallback logic if we don't have a Host header or the URL
// failed to parse
if r.URL == nil {
url, err := url.Parse(uriStr)
if err != nil {
return nil, errors.New("cgi: failed to parse REQUEST_URI into a URL: " + uriStr)
}
r.URL = url
}
// Request.RemoteAddr has its port set by Go's standard http
// server, so we do here too. We don't have one, though, so we
// use a dummy one.
r.RemoteAddr = net.JoinHostPort(params["REMOTE_ADDR"], "0")
return r, nil
}
// Serve executes the provided Handler on the currently active CGI
// request, if any. If there's no current CGI environment
// an error is returned. The provided handler may be nil to use
// http.DefaultServeMux.
func Serve(handler http.Handler) error {
req, err := Request()
if err != nil {
return err
}
if handler == nil {
handler = http.DefaultServeMux
}
rw := &response{
req: req,
header: make(http.Header),
bufw: bufio.NewWriter(os.Stdout),
}
handler.ServeHTTP(rw, req)
rw.Write(nil) // make sure a response is sent
if err = rw.bufw.Flush(); err != nil {
return err
}
return nil
}
type response struct {
req *http.Request
header http.Header
bufw *bufio.Writer
headerSent bool
}
func (r *response) Flush() {
r.bufw.Flush()
}
func (r *response) Header() http.Header {
return r.header
}
func (r *response) Write(p []byte) (n int, err error) {
if !r.headerSent {
r.WriteHeader(http.StatusOK)
}
return r.bufw.Write(p)
}
func (r *response) WriteHeader(code int) {
if r.headerSent {
// Note: explicitly using Stderr, as Stdout is our HTTP output.
fmt.Fprintf(os.Stderr, "CGI attempted to write header twice on request for %s", r.req.URL)
return
}
r.headerSent = true
fmt.Fprintf(r.bufw, "Status: %d %s\r\n", code, http.StatusText(code))
// Set a default Content-Type
if _, hasType := r.header["Content-Type"]; !hasType {
r.header.Add("Content-Type", "text/html; charset=utf-8")
}
r.header.Write(r.bufw)
r.bufw.WriteString("\r\n")
r.bufw.Flush()
}

View File

@ -1,131 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Tests for CGI (the child process perspective)
package cgi
import (
"testing"
)
func TestRequest(t *testing.T) {
env := map[string]string{
"SERVER_PROTOCOL": "HTTP/1.1",
"REQUEST_METHOD": "GET",
"HTTP_HOST": "example.com",
"HTTP_REFERER": "elsewhere",
"HTTP_USER_AGENT": "goclient",
"HTTP_FOO_BAR": "baz",
"REQUEST_URI": "/path?a=b",
"CONTENT_LENGTH": "123",
"CONTENT_TYPE": "text/xml",
"REMOTE_ADDR": "5.6.7.8",
}
req, err := RequestFromMap(env)
if err != nil {
t.Fatalf("RequestFromMap: %v", err)
}
if g, e := req.UserAgent(), "goclient"; e != g {
t.Errorf("expected UserAgent %q; got %q", e, g)
}
if g, e := req.Method, "GET"; e != g {
t.Errorf("expected Method %q; got %q", e, g)
}
if g, e := req.Header.Get("Content-Type"), "text/xml"; e != g {
t.Errorf("expected Content-Type %q; got %q", e, g)
}
if g, e := req.ContentLength, int64(123); e != g {
t.Errorf("expected ContentLength %d; got %d", e, g)
}
if g, e := req.Referer(), "elsewhere"; e != g {
t.Errorf("expected Referer %q; got %q", e, g)
}
if req.Header == nil {
t.Fatalf("unexpected nil Header")
}
if g, e := req.Header.Get("Foo-Bar"), "baz"; e != g {
t.Errorf("expected Foo-Bar %q; got %q", e, g)
}
if g, e := req.URL.String(), "http://example.com/path?a=b"; e != g {
t.Errorf("expected URL %q; got %q", e, g)
}
if g, e := req.FormValue("a"), "b"; e != g {
t.Errorf("expected FormValue(a) %q; got %q", e, g)
}
if req.Trailer == nil {
t.Errorf("unexpected nil Trailer")
}
if req.TLS != nil {
t.Errorf("expected nil TLS")
}
if e, g := "5.6.7.8:0", req.RemoteAddr; e != g {
t.Errorf("RemoteAddr: got %q; want %q", g, e)
}
}
func TestRequestWithTLS(t *testing.T) {
env := map[string]string{
"SERVER_PROTOCOL": "HTTP/1.1",
"REQUEST_METHOD": "GET",
"HTTP_HOST": "example.com",
"HTTP_REFERER": "elsewhere",
"REQUEST_URI": "/path?a=b",
"CONTENT_TYPE": "text/xml",
"HTTPS": "1",
"REMOTE_ADDR": "5.6.7.8",
}
req, err := RequestFromMap(env)
if err != nil {
t.Fatalf("RequestFromMap: %v", err)
}
if g, e := req.URL.String(), "https://example.com/path?a=b"; e != g {
t.Errorf("expected URL %q; got %q", e, g)
}
if req.TLS == nil {
t.Errorf("expected non-nil TLS")
}
}
func TestRequestWithoutHost(t *testing.T) {
env := map[string]string{
"SERVER_PROTOCOL": "HTTP/1.1",
"HTTP_HOST": "",
"REQUEST_METHOD": "GET",
"REQUEST_URI": "/path?a=b",
"CONTENT_LENGTH": "123",
}
req, err := RequestFromMap(env)
if err != nil {
t.Fatalf("RequestFromMap: %v", err)
}
if req.URL == nil {
t.Fatalf("unexpected nil URL")
}
if g, e := req.URL.String(), "/path?a=b"; e != g {
t.Errorf("URL = %q; want %q", g, e)
}
}
func TestRequestWithoutRequestURI(t *testing.T) {
env := map[string]string{
"SERVER_PROTOCOL": "HTTP/1.1",
"HTTP_HOST": "example.com",
"REQUEST_METHOD": "GET",
"SCRIPT_NAME": "/dir/scriptname",
"PATH_INFO": "/p1/p2",
"QUERY_STRING": "a=1&b=2",
"CONTENT_LENGTH": "123",
}
req, err := RequestFromMap(env)
if err != nil {
t.Fatalf("RequestFromMap: %v", err)
}
if req.URL == nil {
t.Fatalf("unexpected nil URL")
}
if g, e := req.URL.String(), "http://example.com/dir/scriptname/p1/p2?a=1&b=2"; e != g {
t.Errorf("URL = %q; want %q", g, e)
}
}

View File

@ -1,377 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file implements the host side of CGI (being the webserver
// parent process).
// Package cgi implements CGI (Common Gateway Interface) as specified
// in RFC 3875.
//
// Note that using CGI means starting a new process to handle each
// request, which is typically less efficient than using a
// long-running server. This package is intended primarily for
// compatibility with existing systems.
package cgi
import (
"bufio"
"fmt"
"io"
"log"
"net/http"
"os"
"os/exec"
"path/filepath"
"regexp"
"runtime"
"strconv"
"strings"
)
var trailingPort = regexp.MustCompile(`:([0-9]+)$`)
var osDefaultInheritEnv = map[string][]string{
"darwin": {"DYLD_LIBRARY_PATH"},
"freebsd": {"LD_LIBRARY_PATH"},
"hpux": {"LD_LIBRARY_PATH", "SHLIB_PATH"},
"irix": {"LD_LIBRARY_PATH", "LD_LIBRARYN32_PATH", "LD_LIBRARY64_PATH"},
"linux": {"LD_LIBRARY_PATH"},
"openbsd": {"LD_LIBRARY_PATH"},
"solaris": {"LD_LIBRARY_PATH", "LD_LIBRARY_PATH_32", "LD_LIBRARY_PATH_64"},
"windows": {"SystemRoot", "COMSPEC", "PATHEXT", "WINDIR"},
}
// Handler runs an executable in a subprocess with a CGI environment.
type Handler struct {
Path string // path to the CGI executable
Root string // root URI prefix of handler or empty for "/"
// Dir specifies the CGI executable's working directory.
// If Dir is empty, the base directory of Path is used.
// If Path has no base directory, the current working
// directory is used.
Dir string
Env []string // extra environment variables to set, if any, as "key=value"
InheritEnv []string // environment variables to inherit from host, as "key"
Logger *log.Logger // optional log for errors or nil to use log.Print
Args []string // optional arguments to pass to child process
// PathLocationHandler specifies the root http Handler that
// should handle internal redirects when the CGI process
// returns a Location header value starting with a "/", as
// specified in RFC 3875 § 6.3.2. This will likely be
// http.DefaultServeMux.
//
// If nil, a CGI response with a local URI path is instead sent
// back to the client and not redirected internally.
PathLocationHandler http.Handler
}
// removeLeadingDuplicates remove leading duplicate in environments.
// It's possible to override environment like following.
// cgi.Handler{
// ...
// Env: []string{"SCRIPT_FILENAME=foo.php"},
// }
func removeLeadingDuplicates(env []string) (ret []string) {
n := len(env)
for i := 0; i < n; i++ {
e := env[i]
s := strings.SplitN(e, "=", 2)[0]
found := false
for j := i + 1; j < n; j++ {
if s == strings.SplitN(env[j], "=", 2)[0] {
found = true
break
}
}
if !found {
ret = append(ret, e)
}
}
return
}
func (h *Handler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
root := h.Root
if root == "" {
root = "/"
}
if len(req.TransferEncoding) > 0 && req.TransferEncoding[0] == "chunked" {
rw.WriteHeader(http.StatusBadRequest)
rw.Write([]byte("Chunked request bodies are not supported by CGI."))
return
}
pathInfo := req.URL.Path
if root != "/" && strings.HasPrefix(pathInfo, root) {
pathInfo = pathInfo[len(root):]
}
port := "80"
if matches := trailingPort.FindStringSubmatch(req.Host); len(matches) != 0 {
port = matches[1]
}
env := []string{
"SERVER_SOFTWARE=go",
"SERVER_NAME=" + req.Host,
"SERVER_PROTOCOL=HTTP/1.1",
"HTTP_HOST=" + req.Host,
"GATEWAY_INTERFACE=CGI/1.1",
"REQUEST_METHOD=" + req.Method,
"QUERY_STRING=" + req.URL.RawQuery,
"REQUEST_URI=" + req.URL.RequestURI(),
"PATH_INFO=" + pathInfo,
"SCRIPT_NAME=" + root,
"SCRIPT_FILENAME=" + h.Path,
"REMOTE_ADDR=" + req.RemoteAddr,
"REMOTE_HOST=" + req.RemoteAddr,
"SERVER_PORT=" + port,
}
if req.TLS != nil {
env = append(env, "HTTPS=on")
}
for k, v := range req.Header {
k = strings.Map(upperCaseAndUnderscore, k)
joinStr := ", "
if k == "COOKIE" {
joinStr = "; "
}
env = append(env, "HTTP_"+k+"="+strings.Join(v, joinStr))
}
if req.ContentLength > 0 {
env = append(env, fmt.Sprintf("CONTENT_LENGTH=%d", req.ContentLength))
}
if ctype := req.Header.Get("Content-Type"); ctype != "" {
env = append(env, "CONTENT_TYPE="+ctype)
}
if h.Env != nil {
env = append(env, h.Env...)
}
envPath := os.Getenv("PATH")
if envPath == "" {
envPath = "/bin:/usr/bin:/usr/ucb:/usr/bsd:/usr/local/bin"
}
env = append(env, "PATH="+envPath)
for _, e := range h.InheritEnv {
if v := os.Getenv(e); v != "" {
env = append(env, e+"="+v)
}
}
for _, e := range osDefaultInheritEnv[runtime.GOOS] {
if v := os.Getenv(e); v != "" {
env = append(env, e+"="+v)
}
}
env = removeLeadingDuplicates(env)
var cwd, path string
if h.Dir != "" {
path = h.Path
cwd = h.Dir
} else {
cwd, path = filepath.Split(h.Path)
}
if cwd == "" {
cwd = "."
}
internalError := func(err error) {
rw.WriteHeader(http.StatusInternalServerError)
h.printf("CGI error: %v", err)
}
cmd := &exec.Cmd{
Path: path,
Args: append([]string{h.Path}, h.Args...),
Dir: cwd,
Env: env,
Stderr: os.Stderr, // for now
}
if req.ContentLength != 0 {
cmd.Stdin = req.Body
}
stdoutRead, err := cmd.StdoutPipe()
if err != nil {
internalError(err)
return
}
err = cmd.Start()
if err != nil {
internalError(err)
return
}
if hook := testHookStartProcess; hook != nil {
hook(cmd.Process)
}
defer cmd.Wait()
defer stdoutRead.Close()
linebody := bufio.NewReaderSize(stdoutRead, 1024)
headers := make(http.Header)
statusCode := 0
headerLines := 0
sawBlankLine := false
for {
line, isPrefix, err := linebody.ReadLine()
if isPrefix {
rw.WriteHeader(http.StatusInternalServerError)
h.printf("cgi: long header line from subprocess.")
return
}
if err == io.EOF {
break
}
if err != nil {
rw.WriteHeader(http.StatusInternalServerError)
h.printf("cgi: error reading headers: %v", err)
return
}
if len(line) == 0 {
sawBlankLine = true
break
}
headerLines++
parts := strings.SplitN(string(line), ":", 2)
if len(parts) < 2 {
h.printf("cgi: bogus header line: %s", string(line))
continue
}
header, val := parts[0], parts[1]
header = strings.TrimSpace(header)
val = strings.TrimSpace(val)
switch {
case header == "Status":
if len(val) < 3 {
h.printf("cgi: bogus status (short): %q", val)
return
}
code, err := strconv.Atoi(val[0:3])
if err != nil {
h.printf("cgi: bogus status: %q", val)
h.printf("cgi: line was %q", line)
return
}
statusCode = code
default:
headers.Add(header, val)
}
}
if headerLines == 0 || !sawBlankLine {
rw.WriteHeader(http.StatusInternalServerError)
h.printf("cgi: no headers")
return
}
if loc := headers.Get("Location"); loc != "" {
if strings.HasPrefix(loc, "/") && h.PathLocationHandler != nil {
h.handleInternalRedirect(rw, req, loc)
return
}
if statusCode == 0 {
statusCode = http.StatusFound
}
}
if statusCode == 0 && headers.Get("Content-Type") == "" {
rw.WriteHeader(http.StatusInternalServerError)
h.printf("cgi: missing required Content-Type in headers")
return
}
if statusCode == 0 {
statusCode = http.StatusOK
}
// Copy headers to rw's headers, after we've decided not to
// go into handleInternalRedirect, which won't want its rw
// headers to have been touched.
for k, vv := range headers {
for _, v := range vv {
rw.Header().Add(k, v)
}
}
rw.WriteHeader(statusCode)
_, err = io.Copy(rw, linebody)
if err != nil {
h.printf("cgi: copy error: %v", err)
// And kill the child CGI process so we don't hang on
// the deferred cmd.Wait above if the error was just
// the client (rw) going away. If it was a read error
// (because the child died itself), then the extra
// kill of an already-dead process is harmless (the PID
// won't be reused until the Wait above).
cmd.Process.Kill()
}
}
func (h *Handler) printf(format string, v ...interface{}) {
if h.Logger != nil {
h.Logger.Printf(format, v...)
} else {
log.Printf(format, v...)
}
}
func (h *Handler) handleInternalRedirect(rw http.ResponseWriter, req *http.Request, path string) {
url, err := req.URL.Parse(path)
if err != nil {
rw.WriteHeader(http.StatusInternalServerError)
h.printf("cgi: error resolving local URI path %q: %v", path, err)
return
}
// TODO: RFC 3875 isn't clear if only GET is supported, but it
// suggests so: "Note that any message-body attached to the
// request (such as for a POST request) may not be available
// to the resource that is the target of the redirect." We
// should do some tests against Apache to see how it handles
// POST, HEAD, etc. Does the internal redirect get the same
// method or just GET? What about incoming headers?
// (e.g. Cookies) Which headers, if any, are copied into the
// second request?
newReq := &http.Request{
Method: "GET",
URL: url,
Proto: "HTTP/1.1",
ProtoMajor: 1,
ProtoMinor: 1,
Header: make(http.Header),
Host: url.Host,
RemoteAddr: req.RemoteAddr,
TLS: req.TLS,
}
h.PathLocationHandler.ServeHTTP(rw, newReq)
}
func upperCaseAndUnderscore(r rune) rune {
switch {
case r >= 'a' && r <= 'z':
return r - ('a' - 'A')
case r == '-':
return '_'
case r == '=':
// Maybe not part of the CGI 'spec' but would mess up
// the environment in any case, as Go represents the
// environment as a slice of "key=value" strings.
return '_'
}
// TODO: other transformations in spec or practice?
return r
}
var testHookStartProcess func(*os.Process) // nil except for some tests

View File

@ -1,461 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Tests for package cgi
package cgi
import (
"bufio"
"fmt"
"io"
"net"
"net/http"
"net/http/httptest"
"os"
"os/exec"
"path/filepath"
"runtime"
"strconv"
"strings"
"testing"
"time"
)
func newRequest(httpreq string) *http.Request {
buf := bufio.NewReader(strings.NewReader(httpreq))
req, err := http.ReadRequest(buf)
if err != nil {
panic("cgi: bogus http request in test: " + httpreq)
}
req.RemoteAddr = "1.2.3.4"
return req
}
func runCgiTest(t *testing.T, h *Handler, httpreq string, expectedMap map[string]string) *httptest.ResponseRecorder {
rw := httptest.NewRecorder()
req := newRequest(httpreq)
h.ServeHTTP(rw, req)
// Make a map to hold the test map that the CGI returns.
m := make(map[string]string)
m["_body"] = rw.Body.String()
linesRead := 0
readlines:
for {
line, err := rw.Body.ReadString('\n')
switch {
case err == io.EOF:
break readlines
case err != nil:
t.Fatalf("unexpected error reading from CGI: %v", err)
}
linesRead++
trimmedLine := strings.TrimRight(line, "\r\n")
split := strings.SplitN(trimmedLine, "=", 2)
if len(split) != 2 {
t.Fatalf("Unexpected %d parts from invalid line number %v: %q; existing map=%v",
len(split), linesRead, line, m)
}
m[split[0]] = split[1]
}
for key, expected := range expectedMap {
got := m[key]
if key == "cwd" {
// For Windows. golang.org/issue/4645.
fi1, _ := os.Stat(got)
fi2, _ := os.Stat(expected)
if os.SameFile(fi1, fi2) {
got = expected
}
}
if got != expected {
t.Errorf("for key %q got %q; expected %q", key, got, expected)
}
}
return rw
}
var cgiTested, cgiWorks bool
func check(t *testing.T) {
if !cgiTested {
cgiTested = true
cgiWorks = exec.Command("./testdata/test.cgi").Run() == nil
}
if !cgiWorks {
// No Perl on Windows, needed by test.cgi
// TODO: make the child process be Go, not Perl.
t.Skip("Skipping test: test.cgi failed.")
}
}
func TestCGIBasicGet(t *testing.T) {
check(t)
h := &Handler{
Path: "testdata/test.cgi",
Root: "/test.cgi",
}
expectedMap := map[string]string{
"test": "Hello CGI",
"param-a": "b",
"param-foo": "bar",
"env-GATEWAY_INTERFACE": "CGI/1.1",
"env-HTTP_HOST": "example.com",
"env-PATH_INFO": "",
"env-QUERY_STRING": "foo=bar&a=b",
"env-REMOTE_ADDR": "1.2.3.4",
"env-REMOTE_HOST": "1.2.3.4",
"env-REQUEST_METHOD": "GET",
"env-REQUEST_URI": "/test.cgi?foo=bar&a=b",
"env-SCRIPT_FILENAME": "testdata/test.cgi",
"env-SCRIPT_NAME": "/test.cgi",
"env-SERVER_NAME": "example.com",
"env-SERVER_PORT": "80",
"env-SERVER_SOFTWARE": "go",
}
replay := runCgiTest(t, h, "GET /test.cgi?foo=bar&a=b HTTP/1.0\nHost: example.com\n\n", expectedMap)
if expected, got := "text/html", replay.Header().Get("Content-Type"); got != expected {
t.Errorf("got a Content-Type of %q; expected %q", got, expected)
}
if expected, got := "X-Test-Value", replay.Header().Get("X-Test-Header"); got != expected {
t.Errorf("got a X-Test-Header of %q; expected %q", got, expected)
}
}
func TestCGIBasicGetAbsPath(t *testing.T) {
check(t)
pwd, err := os.Getwd()
if err != nil {
t.Fatalf("getwd error: %v", err)
}
h := &Handler{
Path: pwd + "/testdata/test.cgi",
Root: "/test.cgi",
}
expectedMap := map[string]string{
"env-REQUEST_URI": "/test.cgi?foo=bar&a=b",
"env-SCRIPT_FILENAME": pwd + "/testdata/test.cgi",
"env-SCRIPT_NAME": "/test.cgi",
}
runCgiTest(t, h, "GET /test.cgi?foo=bar&a=b HTTP/1.0\nHost: example.com\n\n", expectedMap)
}
func TestPathInfo(t *testing.T) {
check(t)
h := &Handler{
Path: "testdata/test.cgi",
Root: "/test.cgi",
}
expectedMap := map[string]string{
"param-a": "b",
"env-PATH_INFO": "/extrapath",
"env-QUERY_STRING": "a=b",
"env-REQUEST_URI": "/test.cgi/extrapath?a=b",
"env-SCRIPT_FILENAME": "testdata/test.cgi",
"env-SCRIPT_NAME": "/test.cgi",
}
runCgiTest(t, h, "GET /test.cgi/extrapath?a=b HTTP/1.0\nHost: example.com\n\n", expectedMap)
}
func TestPathInfoDirRoot(t *testing.T) {
check(t)
h := &Handler{
Path: "testdata/test.cgi",
Root: "/myscript/",
}
expectedMap := map[string]string{
"env-PATH_INFO": "bar",
"env-QUERY_STRING": "a=b",
"env-REQUEST_URI": "/myscript/bar?a=b",
"env-SCRIPT_FILENAME": "testdata/test.cgi",
"env-SCRIPT_NAME": "/myscript/",
}
runCgiTest(t, h, "GET /myscript/bar?a=b HTTP/1.0\nHost: example.com\n\n", expectedMap)
}
func TestDupHeaders(t *testing.T) {
check(t)
h := &Handler{
Path: "testdata/test.cgi",
}
expectedMap := map[string]string{
"env-REQUEST_URI": "/myscript/bar?a=b",
"env-SCRIPT_FILENAME": "testdata/test.cgi",
"env-HTTP_COOKIE": "nom=NOM; yum=YUM",
"env-HTTP_X_FOO": "val1, val2",
}
runCgiTest(t, h, "GET /myscript/bar?a=b HTTP/1.0\n"+
"Cookie: nom=NOM\n"+
"Cookie: yum=YUM\n"+
"X-Foo: val1\n"+
"X-Foo: val2\n"+
"Host: example.com\n\n",
expectedMap)
}
func TestPathInfoNoRoot(t *testing.T) {
check(t)
h := &Handler{
Path: "testdata/test.cgi",
Root: "",
}
expectedMap := map[string]string{
"env-PATH_INFO": "/bar",
"env-QUERY_STRING": "a=b",
"env-REQUEST_URI": "/bar?a=b",
"env-SCRIPT_FILENAME": "testdata/test.cgi",
"env-SCRIPT_NAME": "/",
}
runCgiTest(t, h, "GET /bar?a=b HTTP/1.0\nHost: example.com\n\n", expectedMap)
}
func TestCGIBasicPost(t *testing.T) {
check(t)
postReq := `POST /test.cgi?a=b HTTP/1.0
Host: example.com
Content-Type: application/x-www-form-urlencoded
Content-Length: 15
postfoo=postbar`
h := &Handler{
Path: "testdata/test.cgi",
Root: "/test.cgi",
}
expectedMap := map[string]string{
"test": "Hello CGI",
"param-postfoo": "postbar",
"env-REQUEST_METHOD": "POST",
"env-CONTENT_LENGTH": "15",
"env-REQUEST_URI": "/test.cgi?a=b",
}
runCgiTest(t, h, postReq, expectedMap)
}
func chunk(s string) string {
return fmt.Sprintf("%x\r\n%s\r\n", len(s), s)
}
// The CGI spec doesn't allow chunked requests.
func TestCGIPostChunked(t *testing.T) {
check(t)
postReq := `POST /test.cgi?a=b HTTP/1.1
Host: example.com
Content-Type: application/x-www-form-urlencoded
Transfer-Encoding: chunked
` + chunk("postfoo") + chunk("=") + chunk("postbar") + chunk("")
h := &Handler{
Path: "testdata/test.cgi",
Root: "/test.cgi",
}
expectedMap := map[string]string{}
resp := runCgiTest(t, h, postReq, expectedMap)
if got, expected := resp.Code, http.StatusBadRequest; got != expected {
t.Fatalf("Expected %v response code from chunked request body; got %d",
expected, got)
}
}
func TestRedirect(t *testing.T) {
check(t)
h := &Handler{
Path: "testdata/test.cgi",
Root: "/test.cgi",
}
rec := runCgiTest(t, h, "GET /test.cgi?loc=http://foo.com/ HTTP/1.0\nHost: example.com\n\n", nil)
if e, g := 302, rec.Code; e != g {
t.Errorf("expected status code %d; got %d", e, g)
}
if e, g := "http://foo.com/", rec.Header().Get("Location"); e != g {
t.Errorf("expected Location header of %q; got %q", e, g)
}
}
func TestInternalRedirect(t *testing.T) {
check(t)
baseHandler := http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
fmt.Fprintf(rw, "basepath=%s\n", req.URL.Path)
fmt.Fprintf(rw, "remoteaddr=%s\n", req.RemoteAddr)
})
h := &Handler{
Path: "testdata/test.cgi",
Root: "/test.cgi",
PathLocationHandler: baseHandler,
}
expectedMap := map[string]string{
"basepath": "/foo",
"remoteaddr": "1.2.3.4",
}
runCgiTest(t, h, "GET /test.cgi?loc=/foo HTTP/1.0\nHost: example.com\n\n", expectedMap)
}
// TestCopyError tests that we kill the process if there's an error copying
// its output. (for example, from the client having gone away)
func TestCopyError(t *testing.T) {
check(t)
if runtime.GOOS == "windows" {
t.Skipf("skipping test on %q", runtime.GOOS)
}
h := &Handler{
Path: "testdata/test.cgi",
Root: "/test.cgi",
}
ts := httptest.NewServer(h)
defer ts.Close()
conn, err := net.Dial("tcp", ts.Listener.Addr().String())
if err != nil {
t.Fatal(err)
}
req, _ := http.NewRequest("GET", "http://example.com/test.cgi?bigresponse=1", nil)
err = req.Write(conn)
if err != nil {
t.Fatalf("Write: %v", err)
}
res, err := http.ReadResponse(bufio.NewReader(conn), req)
if err != nil {
t.Fatalf("ReadResponse: %v", err)
}
pidstr := res.Header.Get("X-CGI-Pid")
if pidstr == "" {
t.Fatalf("expected an X-CGI-Pid header in response")
}
pid, err := strconv.Atoi(pidstr)
if err != nil {
t.Fatalf("invalid X-CGI-Pid value")
}
var buf [5000]byte
n, err := io.ReadFull(res.Body, buf[:])
if err != nil {
t.Fatalf("ReadFull: %d bytes, %v", n, err)
}
childRunning := func() bool {
return isProcessRunning(t, pid)
}
if !childRunning() {
t.Fatalf("pre-conn.Close, expected child to be running")
}
conn.Close()
tries := 0
for tries < 25 && childRunning() {
time.Sleep(50 * time.Millisecond * time.Duration(tries))
tries++
}
if childRunning() {
t.Fatalf("post-conn.Close, expected child to be gone")
}
}
func TestDirUnix(t *testing.T) {
check(t)
if runtime.GOOS == "windows" {
t.Skipf("skipping test on %q", runtime.GOOS)
}
cwd, _ := os.Getwd()
h := &Handler{
Path: "testdata/test.cgi",
Root: "/test.cgi",
Dir: cwd,
}
expectedMap := map[string]string{
"cwd": cwd,
}
runCgiTest(t, h, "GET /test.cgi HTTP/1.0\nHost: example.com\n\n", expectedMap)
cwd, _ = os.Getwd()
cwd = filepath.Join(cwd, "testdata")
h = &Handler{
Path: "testdata/test.cgi",
Root: "/test.cgi",
}
expectedMap = map[string]string{
"cwd": cwd,
}
runCgiTest(t, h, "GET /test.cgi HTTP/1.0\nHost: example.com\n\n", expectedMap)
}
func TestDirWindows(t *testing.T) {
if runtime.GOOS != "windows" {
t.Skip("Skipping windows specific test.")
}
cgifile, _ := filepath.Abs("testdata/test.cgi")
var perl string
var err error
perl, err = exec.LookPath("perl")
if err != nil {
t.Skip("Skipping test: perl not found.")
}
perl, _ = filepath.Abs(perl)
cwd, _ := os.Getwd()
h := &Handler{
Path: perl,
Root: "/test.cgi",
Dir: cwd,
Args: []string{cgifile},
Env: []string{"SCRIPT_FILENAME=" + cgifile},
}
expectedMap := map[string]string{
"cwd": cwd,
}
runCgiTest(t, h, "GET /test.cgi HTTP/1.0\nHost: example.com\n\n", expectedMap)
// If not specify Dir on windows, working directory should be
// base directory of perl.
cwd, _ = filepath.Split(perl)
if cwd != "" && cwd[len(cwd)-1] == filepath.Separator {
cwd = cwd[:len(cwd)-1]
}
h = &Handler{
Path: perl,
Root: "/test.cgi",
Args: []string{cgifile},
Env: []string{"SCRIPT_FILENAME=" + cgifile},
}
expectedMap = map[string]string{
"cwd": cwd,
}
runCgiTest(t, h, "GET /test.cgi HTTP/1.0\nHost: example.com\n\n", expectedMap)
}
func TestEnvOverride(t *testing.T) {
cgifile, _ := filepath.Abs("testdata/test.cgi")
var perl string
var err error
perl, err = exec.LookPath("perl")
if err != nil {
t.Skipf("Skipping test: perl not found.")
}
perl, _ = filepath.Abs(perl)
cwd, _ := os.Getwd()
h := &Handler{
Path: perl,
Root: "/test.cgi",
Dir: cwd,
Args: []string{cgifile},
Env: []string{
"SCRIPT_FILENAME=" + cgifile,
"REQUEST_URI=/foo/bar"},
}
expectedMap := map[string]string{
"cwd": cwd,
"env-SCRIPT_FILENAME": cgifile,
"env-REQUEST_URI": "/foo/bar",
}
runCgiTest(t, h, "GET /test.cgi HTTP/1.0\nHost: example.com\n\n", expectedMap)
}

View File

@ -1,228 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Tests a Go CGI program running under a Go CGI host process.
// Further, the two programs are the same binary, just checking
// their environment to figure out what mode to run in.
package cgi
import (
"bytes"
"errors"
"fmt"
"io"
"net/http"
"net/http/httptest"
"os"
"runtime"
"testing"
"time"
)
// This test is a CGI host (testing host.go) that runs its own binary
// as a child process testing the other half of CGI (child.go).
func TestHostingOurselves(t *testing.T) {
if runtime.GOOS == "nacl" {
t.Skip("skipping on nacl")
}
h := &Handler{
Path: os.Args[0],
Root: "/test.go",
Args: []string{"-test.run=TestBeChildCGIProcess"},
}
expectedMap := map[string]string{
"test": "Hello CGI-in-CGI",
"param-a": "b",
"param-foo": "bar",
"env-GATEWAY_INTERFACE": "CGI/1.1",
"env-HTTP_HOST": "example.com",
"env-PATH_INFO": "",
"env-QUERY_STRING": "foo=bar&a=b",
"env-REMOTE_ADDR": "1.2.3.4",
"env-REMOTE_HOST": "1.2.3.4",
"env-REQUEST_METHOD": "GET",
"env-REQUEST_URI": "/test.go?foo=bar&a=b",
"env-SCRIPT_FILENAME": os.Args[0],
"env-SCRIPT_NAME": "/test.go",
"env-SERVER_NAME": "example.com",
"env-SERVER_PORT": "80",
"env-SERVER_SOFTWARE": "go",
}
replay := runCgiTest(t, h, "GET /test.go?foo=bar&a=b HTTP/1.0\nHost: example.com\n\n", expectedMap)
if expected, got := "text/html; charset=utf-8", replay.Header().Get("Content-Type"); got != expected {
t.Errorf("got a Content-Type of %q; expected %q", got, expected)
}
if expected, got := "X-Test-Value", replay.Header().Get("X-Test-Header"); got != expected {
t.Errorf("got a X-Test-Header of %q; expected %q", got, expected)
}
}
type customWriterRecorder struct {
w io.Writer
*httptest.ResponseRecorder
}
func (r *customWriterRecorder) Write(p []byte) (n int, err error) {
return r.w.Write(p)
}
type limitWriter struct {
w io.Writer
n int
}
func (w *limitWriter) Write(p []byte) (n int, err error) {
if len(p) > w.n {
p = p[:w.n]
}
if len(p) > 0 {
n, err = w.w.Write(p)
w.n -= n
}
if w.n == 0 {
err = errors.New("past write limit")
}
return
}
// If there's an error copying the child's output to the parent, test
// that we kill the child.
func TestKillChildAfterCopyError(t *testing.T) {
if runtime.GOOS == "nacl" {
t.Skip("skipping on nacl")
}
defer func() { testHookStartProcess = nil }()
proc := make(chan *os.Process, 1)
testHookStartProcess = func(p *os.Process) {
proc <- p
}
h := &Handler{
Path: os.Args[0],
Root: "/test.go",
Args: []string{"-test.run=TestBeChildCGIProcess"},
}
req, _ := http.NewRequest("GET", "http://example.com/test.cgi?write-forever=1", nil)
rec := httptest.NewRecorder()
var out bytes.Buffer
const writeLen = 50 << 10
rw := &customWriterRecorder{&limitWriter{&out, writeLen}, rec}
donec := make(chan bool, 1)
go func() {
h.ServeHTTP(rw, req)
donec <- true
}()
select {
case <-donec:
if out.Len() != writeLen || out.Bytes()[0] != 'a' {
t.Errorf("unexpected output: %q", out.Bytes())
}
case <-time.After(5 * time.Second):
t.Errorf("timeout. ServeHTTP hung and didn't kill the child process?")
select {
case p := <-proc:
p.Kill()
t.Logf("killed process")
default:
t.Logf("didn't kill process")
}
}
}
// Test that a child handler writing only headers works.
// golang.org/issue/7196
func TestChildOnlyHeaders(t *testing.T) {
if runtime.GOOS == "nacl" {
t.Skip("skipping on nacl")
}
h := &Handler{
Path: os.Args[0],
Root: "/test.go",
Args: []string{"-test.run=TestBeChildCGIProcess"},
}
expectedMap := map[string]string{
"_body": "",
}
replay := runCgiTest(t, h, "GET /test.go?no-body=1 HTTP/1.0\nHost: example.com\n\n", expectedMap)
if expected, got := "X-Test-Value", replay.Header().Get("X-Test-Header"); got != expected {
t.Errorf("got a X-Test-Header of %q; expected %q", got, expected)
}
}
// golang.org/issue/7198
func Test500WithNoHeaders(t *testing.T) { want500Test(t, "/immediate-disconnect") }
func Test500WithNoContentType(t *testing.T) { want500Test(t, "/no-content-type") }
func Test500WithEmptyHeaders(t *testing.T) { want500Test(t, "/empty-headers") }
func want500Test(t *testing.T, path string) {
h := &Handler{
Path: os.Args[0],
Root: "/test.go",
Args: []string{"-test.run=TestBeChildCGIProcess"},
}
expectedMap := map[string]string{
"_body": "",
}
replay := runCgiTest(t, h, "GET "+path+" HTTP/1.0\nHost: example.com\n\n", expectedMap)
if replay.Code != 500 {
t.Errorf("Got code %d; want 500", replay.Code)
}
}
type neverEnding byte
func (b neverEnding) Read(p []byte) (n int, err error) {
for i := range p {
p[i] = byte(b)
}
return len(p), nil
}
// Note: not actually a test.
func TestBeChildCGIProcess(t *testing.T) {
if os.Getenv("REQUEST_METHOD") == "" {
// Not in a CGI environment; skipping test.
return
}
switch os.Getenv("REQUEST_URI") {
case "/immediate-disconnect":
os.Exit(0)
case "/no-content-type":
fmt.Printf("Content-Length: 6\n\nHello\n")
os.Exit(0)
case "/empty-headers":
fmt.Printf("\nHello")
os.Exit(0)
}
Serve(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
rw.Header().Set("X-Test-Header", "X-Test-Value")
req.ParseForm()
if req.FormValue("no-body") == "1" {
return
}
if req.FormValue("write-forever") == "1" {
io.Copy(rw, neverEnding('a'))
for {
time.Sleep(5 * time.Second) // hang forever, until killed
}
}
fmt.Fprintf(rw, "test=Hello CGI-in-CGI\n")
for k, vv := range req.Form {
for _, v := range vv {
fmt.Fprintf(rw, "param-%s=%s\n", k, v)
}
}
for _, kv := range os.Environ() {
fmt.Fprintf(rw, "env-%s\n", kv)
}
}))
os.Exit(0)
}

View File

@ -1,18 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build plan9
package cgi
import (
"os"
"strconv"
"testing"
)
func isProcessRunning(t *testing.T, pid int) bool {
_, err := os.Stat("/proc/" + strconv.Itoa(pid))
return err == nil
}

View File

@ -1,21 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !plan9
package cgi
import (
"os"
"syscall"
"testing"
)
func isProcessRunning(t *testing.T, pid int) bool {
p, err := os.FindProcess(pid)
if err != nil {
return false
}
return p.Signal(syscall.Signal(0)) == nil
}

View File

@ -1,159 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This code is duplicated in net/http and net/http/httputil.
// Please make any changes in both files.
package http
import (
"bufio"
"bytes"
"fmt"
"io"
"io/ioutil"
"strings"
"testing"
)
func TestChunk(t *testing.T) {
var b bytes.Buffer
w := newChunkedWriter(&b)
const chunk1 = "hello, "
const chunk2 = "world! 0123456789abcdef"
w.Write([]byte(chunk1))
w.Write([]byte(chunk2))
w.Close()
if g, e := b.String(), "7\r\nhello, \r\n17\r\nworld! 0123456789abcdef\r\n0\r\n"; g != e {
t.Fatalf("chunk writer wrote %q; want %q", g, e)
}
r := newChunkedReader(&b)
data, err := ioutil.ReadAll(r)
if err != nil {
t.Logf(`data: "%s"`, data)
t.Fatalf("ReadAll from reader: %v", err)
}
if g, e := string(data), chunk1+chunk2; g != e {
t.Errorf("chunk reader read %q; want %q", g, e)
}
}
func TestChunkReadMultiple(t *testing.T) {
// Bunch of small chunks, all read together.
{
var b bytes.Buffer
w := newChunkedWriter(&b)
w.Write([]byte("foo"))
w.Write([]byte("bar"))
w.Close()
r := newChunkedReader(&b)
buf := make([]byte, 10)
n, err := r.Read(buf)
if n != 6 || err != io.EOF {
t.Errorf("Read = %d, %v; want 6, EOF", n, err)
}
buf = buf[:n]
if string(buf) != "foobar" {
t.Errorf("Read = %q; want %q", buf, "foobar")
}
}
// One big chunk followed by a little chunk, but the small bufio.Reader size
// should prevent the second chunk header from being read.
{
var b bytes.Buffer
w := newChunkedWriter(&b)
// fillBufChunk is 11 bytes + 3 bytes header + 2 bytes footer = 16 bytes,
// the same as the bufio ReaderSize below (the minimum), so even
// though we're going to try to Read with a buffer larger enough to also
// receive "foo", the second chunk header won't be read yet.
const fillBufChunk = "0123456789a"
const shortChunk = "foo"
w.Write([]byte(fillBufChunk))
w.Write([]byte(shortChunk))
w.Close()
r := newChunkedReader(bufio.NewReaderSize(&b, 16))
buf := make([]byte, len(fillBufChunk)+len(shortChunk))
n, err := r.Read(buf)
if n != len(fillBufChunk) || err != nil {
t.Errorf("Read = %d, %v; want %d, nil", n, err, len(fillBufChunk))
}
buf = buf[:n]
if string(buf) != fillBufChunk {
t.Errorf("Read = %q; want %q", buf, fillBufChunk)
}
n, err = r.Read(buf)
if n != len(shortChunk) || err != io.EOF {
t.Errorf("Read = %d, %v; want %d, EOF", n, err, len(shortChunk))
}
}
// And test that we see an EOF chunk, even though our buffer is already full:
{
r := newChunkedReader(bufio.NewReader(strings.NewReader("3\r\nfoo\r\n0\r\n")))
buf := make([]byte, 3)
n, err := r.Read(buf)
if n != 3 || err != io.EOF {
t.Errorf("Read = %d, %v; want 3, EOF", n, err)
}
if string(buf) != "foo" {
t.Errorf("buf = %q; want foo", buf)
}
}
}
func TestChunkReaderAllocs(t *testing.T) {
if testing.Short() {
t.Skip("skipping in short mode")
}
var buf bytes.Buffer
w := newChunkedWriter(&buf)
a, b, c := []byte("aaaaaa"), []byte("bbbbbbbbbbbb"), []byte("cccccccccccccccccccccccc")
w.Write(a)
w.Write(b)
w.Write(c)
w.Close()
readBuf := make([]byte, len(a)+len(b)+len(c)+1)
byter := bytes.NewReader(buf.Bytes())
bufr := bufio.NewReader(byter)
mallocs := testing.AllocsPerRun(100, func() {
byter.Seek(0, 0)
bufr.Reset(byter)
r := newChunkedReader(bufr)
n, err := io.ReadFull(r, readBuf)
if n != len(readBuf)-1 {
t.Fatalf("read %d bytes; want %d", n, len(readBuf)-1)
}
if err != io.ErrUnexpectedEOF {
t.Fatalf("read error = %v; want ErrUnexpectedEOF", err)
}
})
if mallocs > 1.5 {
t.Errorf("mallocs = %v; want 1", mallocs)
}
}
func TestParseHexUint(t *testing.T) {
for i := uint64(0); i <= 1234; i++ {
line := []byte(fmt.Sprintf("%x", i))
got, err := parseHexUint(line)
if err != nil {
t.Fatalf("on %d: %v", i, err)
}
if got != i {
t.Errorf("for input %q = %d; want %d", line, got, i)
}
}
_, err := parseHexUint([]byte("bogus"))
if err == nil {
t.Error("expected error on bogus input")
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,380 +0,0 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http
import (
"bytes"
"encoding/json"
"fmt"
"log"
"os"
"reflect"
"strings"
"testing"
"time"
)
var writeSetCookiesTests = []struct {
Cookie *Cookie
Raw string
}{
{
&Cookie{Name: "cookie-1", Value: "v$1"},
"cookie-1=v$1",
},
{
&Cookie{Name: "cookie-2", Value: "two", MaxAge: 3600},
"cookie-2=two; Max-Age=3600",
},
{
&Cookie{Name: "cookie-3", Value: "three", Domain: ".example.com"},
"cookie-3=three; Domain=example.com",
},
{
&Cookie{Name: "cookie-4", Value: "four", Path: "/restricted/"},
"cookie-4=four; Path=/restricted/",
},
{
&Cookie{Name: "cookie-5", Value: "five", Domain: "wrong;bad.abc"},
"cookie-5=five",
},
{
&Cookie{Name: "cookie-6", Value: "six", Domain: "bad-.abc"},
"cookie-6=six",
},
{
&Cookie{Name: "cookie-7", Value: "seven", Domain: "127.0.0.1"},
"cookie-7=seven; Domain=127.0.0.1",
},
{
&Cookie{Name: "cookie-8", Value: "eight", Domain: "::1"},
"cookie-8=eight",
},
// The "special" cookies have values containing commas or spaces which
// are disallowed by RFC 6265 but are common in the wild.
{
&Cookie{Name: "special-1", Value: "a z"},
`special-1=a z`,
},
{
&Cookie{Name: "special-2", Value: " z"},
`special-2=" z"`,
},
{
&Cookie{Name: "special-3", Value: "a "},
`special-3="a "`,
},
{
&Cookie{Name: "special-4", Value: " "},
`special-4=" "`,
},
{
&Cookie{Name: "special-5", Value: "a,z"},
`special-5=a,z`,
},
{
&Cookie{Name: "special-6", Value: ",z"},
`special-6=",z"`,
},
{
&Cookie{Name: "special-7", Value: "a,"},
`special-7="a,"`,
},
{
&Cookie{Name: "special-8", Value: ","},
`special-8=","`,
},
{
&Cookie{Name: "empty-value", Value: ""},
`empty-value=`,
},
}
func TestWriteSetCookies(t *testing.T) {
defer log.SetOutput(os.Stderr)
var logbuf bytes.Buffer
log.SetOutput(&logbuf)
for i, tt := range writeSetCookiesTests {
if g, e := tt.Cookie.String(), tt.Raw; g != e {
t.Errorf("Test %d, expecting:\n%s\nGot:\n%s\n", i, e, g)
continue
}
}
if got, sub := logbuf.String(), "dropping domain attribute"; !strings.Contains(got, sub) {
t.Errorf("Expected substring %q in log output. Got:\n%s", sub, got)
}
}
type headerOnlyResponseWriter Header
func (ho headerOnlyResponseWriter) Header() Header {
return Header(ho)
}
func (ho headerOnlyResponseWriter) Write([]byte) (int, error) {
panic("NOIMPL")
}
func (ho headerOnlyResponseWriter) WriteHeader(int) {
panic("NOIMPL")
}
func TestSetCookie(t *testing.T) {
m := make(Header)
SetCookie(headerOnlyResponseWriter(m), &Cookie{Name: "cookie-1", Value: "one", Path: "/restricted/"})
SetCookie(headerOnlyResponseWriter(m), &Cookie{Name: "cookie-2", Value: "two", MaxAge: 3600})
if l := len(m["Set-Cookie"]); l != 2 {
t.Fatalf("expected %d cookies, got %d", 2, l)
}
if g, e := m["Set-Cookie"][0], "cookie-1=one; Path=/restricted/"; g != e {
t.Errorf("cookie #1: want %q, got %q", e, g)
}
if g, e := m["Set-Cookie"][1], "cookie-2=two; Max-Age=3600"; g != e {
t.Errorf("cookie #2: want %q, got %q", e, g)
}
}
var addCookieTests = []struct {
Cookies []*Cookie
Raw string
}{
{
[]*Cookie{},
"",
},
{
[]*Cookie{{Name: "cookie-1", Value: "v$1"}},
"cookie-1=v$1",
},
{
[]*Cookie{
{Name: "cookie-1", Value: "v$1"},
{Name: "cookie-2", Value: "v$2"},
{Name: "cookie-3", Value: "v$3"},
},
"cookie-1=v$1; cookie-2=v$2; cookie-3=v$3",
},
}
func TestAddCookie(t *testing.T) {
for i, tt := range addCookieTests {
req, _ := NewRequest("GET", "http://example.com/", nil)
for _, c := range tt.Cookies {
req.AddCookie(c)
}
if g := req.Header.Get("Cookie"); g != tt.Raw {
t.Errorf("Test %d:\nwant: %s\n got: %s\n", i, tt.Raw, g)
continue
}
}
}
var readSetCookiesTests = []struct {
Header Header
Cookies []*Cookie
}{
{
Header{"Set-Cookie": {"Cookie-1=v$1"}},
[]*Cookie{{Name: "Cookie-1", Value: "v$1", Raw: "Cookie-1=v$1"}},
},
{
Header{"Set-Cookie": {"NID=99=YsDT5i3E-CXax-; expires=Wed, 23-Nov-2011 01:05:03 GMT; path=/; domain=.google.ch; HttpOnly"}},
[]*Cookie{{
Name: "NID",
Value: "99=YsDT5i3E-CXax-",
Path: "/",
Domain: ".google.ch",
HttpOnly: true,
Expires: time.Date(2011, 11, 23, 1, 5, 3, 0, time.UTC),
RawExpires: "Wed, 23-Nov-2011 01:05:03 GMT",
Raw: "NID=99=YsDT5i3E-CXax-; expires=Wed, 23-Nov-2011 01:05:03 GMT; path=/; domain=.google.ch; HttpOnly",
}},
},
{
Header{"Set-Cookie": {".ASPXAUTH=7E3AA; expires=Wed, 07-Mar-2012 14:25:06 GMT; path=/; HttpOnly"}},
[]*Cookie{{
Name: ".ASPXAUTH",
Value: "7E3AA",
Path: "/",
Expires: time.Date(2012, 3, 7, 14, 25, 6, 0, time.UTC),
RawExpires: "Wed, 07-Mar-2012 14:25:06 GMT",
HttpOnly: true,
Raw: ".ASPXAUTH=7E3AA; expires=Wed, 07-Mar-2012 14:25:06 GMT; path=/; HttpOnly",
}},
},
{
Header{"Set-Cookie": {"ASP.NET_SessionId=foo; path=/; HttpOnly"}},
[]*Cookie{{
Name: "ASP.NET_SessionId",
Value: "foo",
Path: "/",
HttpOnly: true,
Raw: "ASP.NET_SessionId=foo; path=/; HttpOnly",
}},
},
// Make sure we can properly read back the Set-Cookie headers we create
// for values containing spaces or commas:
{
Header{"Set-Cookie": {`special-1=a z`}},
[]*Cookie{{Name: "special-1", Value: "a z", Raw: `special-1=a z`}},
},
{
Header{"Set-Cookie": {`special-2=" z"`}},
[]*Cookie{{Name: "special-2", Value: " z", Raw: `special-2=" z"`}},
},
{
Header{"Set-Cookie": {`special-3="a "`}},
[]*Cookie{{Name: "special-3", Value: "a ", Raw: `special-3="a "`}},
},
{
Header{"Set-Cookie": {`special-4=" "`}},
[]*Cookie{{Name: "special-4", Value: " ", Raw: `special-4=" "`}},
},
{
Header{"Set-Cookie": {`special-5=a,z`}},
[]*Cookie{{Name: "special-5", Value: "a,z", Raw: `special-5=a,z`}},
},
{
Header{"Set-Cookie": {`special-6=",z"`}},
[]*Cookie{{Name: "special-6", Value: ",z", Raw: `special-6=",z"`}},
},
{
Header{"Set-Cookie": {`special-7=a,`}},
[]*Cookie{{Name: "special-7", Value: "a,", Raw: `special-7=a,`}},
},
{
Header{"Set-Cookie": {`special-8=","`}},
[]*Cookie{{Name: "special-8", Value: ",", Raw: `special-8=","`}},
},
// TODO(bradfitz): users have reported seeing this in the
// wild, but do browsers handle it? RFC 6265 just says "don't
// do that" (section 3) and then never mentions header folding
// again.
// Header{"Set-Cookie": {"ASP.NET_SessionId=foo; path=/; HttpOnly, .ASPXAUTH=7E3AA; expires=Wed, 07-Mar-2012 14:25:06 GMT; path=/; HttpOnly"}},
}
func toJSON(v interface{}) string {
b, err := json.Marshal(v)
if err != nil {
return fmt.Sprintf("%#v", v)
}
return string(b)
}
func TestReadSetCookies(t *testing.T) {
for i, tt := range readSetCookiesTests {
for n := 0; n < 2; n++ { // to verify readSetCookies doesn't mutate its input
c := readSetCookies(tt.Header)
if !reflect.DeepEqual(c, tt.Cookies) {
t.Errorf("#%d readSetCookies: have\n%s\nwant\n%s\n", i, toJSON(c), toJSON(tt.Cookies))
continue
}
}
}
}
var readCookiesTests = []struct {
Header Header
Filter string
Cookies []*Cookie
}{
{
Header{"Cookie": {"Cookie-1=v$1", "c2=v2"}},
"",
[]*Cookie{
{Name: "Cookie-1", Value: "v$1"},
{Name: "c2", Value: "v2"},
},
},
{
Header{"Cookie": {"Cookie-1=v$1", "c2=v2"}},
"c2",
[]*Cookie{
{Name: "c2", Value: "v2"},
},
},
{
Header{"Cookie": {"Cookie-1=v$1; c2=v2"}},
"",
[]*Cookie{
{Name: "Cookie-1", Value: "v$1"},
{Name: "c2", Value: "v2"},
},
},
{
Header{"Cookie": {"Cookie-1=v$1; c2=v2"}},
"c2",
[]*Cookie{
{Name: "c2", Value: "v2"},
},
},
}
func TestReadCookies(t *testing.T) {
for i, tt := range readCookiesTests {
for n := 0; n < 2; n++ { // to verify readCookies doesn't mutate its input
c := readCookies(tt.Header, tt.Filter)
if !reflect.DeepEqual(c, tt.Cookies) {
t.Errorf("#%d readCookies:\nhave: %s\nwant: %s\n", i, toJSON(c), toJSON(tt.Cookies))
continue
}
}
}
}
func TestCookieSanitizeValue(t *testing.T) {
defer log.SetOutput(os.Stderr)
var logbuf bytes.Buffer
log.SetOutput(&logbuf)
tests := []struct {
in, want string
}{
{"foo", "foo"},
{"foo;bar", "foobar"},
{"foo\\bar", "foobar"},
{"foo\"bar", "foobar"},
{"\x00\x7e\x7f\x80", "\x7e"},
{`"withquotes"`, "withquotes"},
{"a z", "a z"},
{" z", `" z"`},
{"a ", `"a "`},
}
for _, tt := range tests {
if got := sanitizeCookieValue(tt.in); got != tt.want {
t.Errorf("sanitizeCookieValue(%q) = %q; want %q", tt.in, got, tt.want)
}
}
if got, sub := logbuf.String(), "dropping invalid bytes"; !strings.Contains(got, sub) {
t.Errorf("Expected substring %q in log output. Got:\n%s", sub, got)
}
}
func TestCookieSanitizePath(t *testing.T) {
defer log.SetOutput(os.Stderr)
var logbuf bytes.Buffer
log.SetOutput(&logbuf)
tests := []struct {
in, want string
}{
{"/path", "/path"},
{"/path with space/", "/path with space/"},
{"/just;no;semicolon\x00orstuff/", "/justnosemicolonorstuff/"},
}
for _, tt := range tests {
if got := sanitizeCookiePath(tt.in); got != tt.want {
t.Errorf("sanitizeCookiePath(%q) = %q; want %q", tt.in, got, tt.want)
}
}
if got, sub := logbuf.String(), "dropping invalid bytes"; !strings.Contains(got, sub) {
t.Errorf("Expected substring %q in log output. Got:\n%s", sub, got)
}
}

View File

@ -1,497 +0,0 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package cookiejar implements an in-memory RFC 6265-compliant http.CookieJar.
package cookiejar
import (
"errors"
"fmt"
"net"
"net/http"
"net/url"
"sort"
"strings"
"sync"
"time"
)
// PublicSuffixList provides the public suffix of a domain. For example:
// - the public suffix of "example.com" is "com",
// - the public suffix of "foo1.foo2.foo3.co.uk" is "co.uk", and
// - the public suffix of "bar.pvt.k12.ma.us" is "pvt.k12.ma.us".
//
// Implementations of PublicSuffixList must be safe for concurrent use by
// multiple goroutines.
//
// An implementation that always returns "" is valid and may be useful for
// testing but it is not secure: it means that the HTTP server for foo.com can
// set a cookie for bar.com.
//
// A public suffix list implementation is in the package
// code.google.com/p/go.net/publicsuffix.
type PublicSuffixList interface {
// PublicSuffix returns the public suffix of domain.
//
// TODO: specify which of the caller and callee is responsible for IP
// addresses, for leading and trailing dots, for case sensitivity, and
// for IDN/Punycode.
PublicSuffix(domain string) string
// String returns a description of the source of this public suffix
// list. The description will typically contain something like a time
// stamp or version number.
String() string
}
// Options are the options for creating a new Jar.
type Options struct {
// PublicSuffixList is the public suffix list that determines whether
// an HTTP server can set a cookie for a domain.
//
// A nil value is valid and may be useful for testing but it is not
// secure: it means that the HTTP server for foo.co.uk can set a cookie
// for bar.co.uk.
PublicSuffixList PublicSuffixList
}
// Jar implements the http.CookieJar interface from the net/http package.
type Jar struct {
psList PublicSuffixList
// mu locks the remaining fields.
mu sync.Mutex
// entries is a set of entries, keyed by their eTLD+1 and subkeyed by
// their name/domain/path.
entries map[string]map[string]entry
// nextSeqNum is the next sequence number assigned to a new cookie
// created SetCookies.
nextSeqNum uint64
}
// New returns a new cookie jar. A nil *Options is equivalent to a zero
// Options.
func New(o *Options) (*Jar, error) {
jar := &Jar{
entries: make(map[string]map[string]entry),
}
if o != nil {
jar.psList = o.PublicSuffixList
}
return jar, nil
}
// entry is the internal representation of a cookie.
//
// This struct type is not used outside of this package per se, but the exported
// fields are those of RFC 6265.
type entry struct {
Name string
Value string
Domain string
Path string
Secure bool
HttpOnly bool
Persistent bool
HostOnly bool
Expires time.Time
Creation time.Time
LastAccess time.Time
// seqNum is a sequence number so that Cookies returns cookies in a
// deterministic order, even for cookies that have equal Path length and
// equal Creation time. This simplifies testing.
seqNum uint64
}
// Id returns the domain;path;name triple of e as an id.
func (e *entry) id() string {
return fmt.Sprintf("%s;%s;%s", e.Domain, e.Path, e.Name)
}
// shouldSend determines whether e's cookie qualifies to be included in a
// request to host/path. It is the caller's responsibility to check if the
// cookie is expired.
func (e *entry) shouldSend(https bool, host, path string) bool {
return e.domainMatch(host) && e.pathMatch(path) && (https || !e.Secure)
}
// domainMatch implements "domain-match" of RFC 6265 section 5.1.3.
func (e *entry) domainMatch(host string) bool {
if e.Domain == host {
return true
}
return !e.HostOnly && hasDotSuffix(host, e.Domain)
}
// pathMatch implements "path-match" according to RFC 6265 section 5.1.4.
func (e *entry) pathMatch(requestPath string) bool {
if requestPath == e.Path {
return true
}
if strings.HasPrefix(requestPath, e.Path) {
if e.Path[len(e.Path)-1] == '/' {
return true // The "/any/" matches "/any/path" case.
} else if requestPath[len(e.Path)] == '/' {
return true // The "/any" matches "/any/path" case.
}
}
return false
}
// hasDotSuffix reports whether s ends in "."+suffix.
func hasDotSuffix(s, suffix string) bool {
return len(s) > len(suffix) && s[len(s)-len(suffix)-1] == '.' && s[len(s)-len(suffix):] == suffix
}
// byPathLength is a []entry sort.Interface that sorts according to RFC 6265
// section 5.4 point 2: by longest path and then by earliest creation time.
type byPathLength []entry
func (s byPathLength) Len() int { return len(s) }
func (s byPathLength) Less(i, j int) bool {
if len(s[i].Path) != len(s[j].Path) {
return len(s[i].Path) > len(s[j].Path)
}
if !s[i].Creation.Equal(s[j].Creation) {
return s[i].Creation.Before(s[j].Creation)
}
return s[i].seqNum < s[j].seqNum
}
func (s byPathLength) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
// Cookies implements the Cookies method of the http.CookieJar interface.
//
// It returns an empty slice if the URL's scheme is not HTTP or HTTPS.
func (j *Jar) Cookies(u *url.URL) (cookies []*http.Cookie) {
return j.cookies(u, time.Now())
}
// cookies is like Cookies but takes the current time as a parameter.
func (j *Jar) cookies(u *url.URL, now time.Time) (cookies []*http.Cookie) {
if u.Scheme != "http" && u.Scheme != "https" {
return cookies
}
host, err := canonicalHost(u.Host)
if err != nil {
return cookies
}
key := jarKey(host, j.psList)
j.mu.Lock()
defer j.mu.Unlock()
submap := j.entries[key]
if submap == nil {
return cookies
}
https := u.Scheme == "https"
path := u.Path
if path == "" {
path = "/"
}
modified := false
var selected []entry
for id, e := range submap {
if e.Persistent && !e.Expires.After(now) {
delete(submap, id)
modified = true
continue
}
if !e.shouldSend(https, host, path) {
continue
}
e.LastAccess = now
submap[id] = e
selected = append(selected, e)
modified = true
}
if modified {
if len(submap) == 0 {
delete(j.entries, key)
} else {
j.entries[key] = submap
}
}
sort.Sort(byPathLength(selected))
for _, e := range selected {
cookies = append(cookies, &http.Cookie{Name: e.Name, Value: e.Value})
}
return cookies
}
// SetCookies implements the SetCookies method of the http.CookieJar interface.
//
// It does nothing if the URL's scheme is not HTTP or HTTPS.
func (j *Jar) SetCookies(u *url.URL, cookies []*http.Cookie) {
j.setCookies(u, cookies, time.Now())
}
// setCookies is like SetCookies but takes the current time as parameter.
func (j *Jar) setCookies(u *url.URL, cookies []*http.Cookie, now time.Time) {
if len(cookies) == 0 {
return
}
if u.Scheme != "http" && u.Scheme != "https" {
return
}
host, err := canonicalHost(u.Host)
if err != nil {
return
}
key := jarKey(host, j.psList)
defPath := defaultPath(u.Path)
j.mu.Lock()
defer j.mu.Unlock()
submap := j.entries[key]
modified := false
for _, cookie := range cookies {
e, remove, err := j.newEntry(cookie, now, defPath, host)
if err != nil {
continue
}
id := e.id()
if remove {
if submap != nil {
if _, ok := submap[id]; ok {
delete(submap, id)
modified = true
}
}
continue
}
if submap == nil {
submap = make(map[string]entry)
}
if old, ok := submap[id]; ok {
e.Creation = old.Creation
e.seqNum = old.seqNum
} else {
e.Creation = now
e.seqNum = j.nextSeqNum
j.nextSeqNum++
}
e.LastAccess = now
submap[id] = e
modified = true
}
if modified {
if len(submap) == 0 {
delete(j.entries, key)
} else {
j.entries[key] = submap
}
}
}
// canonicalHost strips port from host if present and returns the canonicalized
// host name.
func canonicalHost(host string) (string, error) {
var err error
host = strings.ToLower(host)
if hasPort(host) {
host, _, err = net.SplitHostPort(host)
if err != nil {
return "", err
}
}
if strings.HasSuffix(host, ".") {
// Strip trailing dot from fully qualified domain names.
host = host[:len(host)-1]
}
return toASCII(host)
}
// hasPort reports whether host contains a port number. host may be a host
// name, an IPv4 or an IPv6 address.
func hasPort(host string) bool {
colons := strings.Count(host, ":")
if colons == 0 {
return false
}
if colons == 1 {
return true
}
return host[0] == '[' && strings.Contains(host, "]:")
}
// jarKey returns the key to use for a jar.
func jarKey(host string, psl PublicSuffixList) string {
if isIP(host) {
return host
}
var i int
if psl == nil {
i = strings.LastIndex(host, ".")
if i == -1 {
return host
}
} else {
suffix := psl.PublicSuffix(host)
if suffix == host {
return host
}
i = len(host) - len(suffix)
if i <= 0 || host[i-1] != '.' {
// The provided public suffix list psl is broken.
// Storing cookies under host is a safe stopgap.
return host
}
}
prevDot := strings.LastIndex(host[:i-1], ".")
return host[prevDot+1:]
}
// isIP reports whether host is an IP address.
func isIP(host string) bool {
return net.ParseIP(host) != nil
}
// defaultPath returns the directory part of an URL's path according to
// RFC 6265 section 5.1.4.
func defaultPath(path string) string {
if len(path) == 0 || path[0] != '/' {
return "/" // Path is empty or malformed.
}
i := strings.LastIndex(path, "/") // Path starts with "/", so i != -1.
if i == 0 {
return "/" // Path has the form "/abc".
}
return path[:i] // Path is either of form "/abc/xyz" or "/abc/xyz/".
}
// newEntry creates an entry from a http.Cookie c. now is the current time and
// is compared to c.Expires to determine deletion of c. defPath and host are the
// default-path and the canonical host name of the URL c was received from.
//
// remove records whether the jar should delete this cookie, as it has already
// expired with respect to now. In this case, e may be incomplete, but it will
// be valid to call e.id (which depends on e's Name, Domain and Path).
//
// A malformed c.Domain will result in an error.
func (j *Jar) newEntry(c *http.Cookie, now time.Time, defPath, host string) (e entry, remove bool, err error) {
e.Name = c.Name
if c.Path == "" || c.Path[0] != '/' {
e.Path = defPath
} else {
e.Path = c.Path
}
e.Domain, e.HostOnly, err = j.domainAndType(host, c.Domain)
if err != nil {
return e, false, err
}
// MaxAge takes precedence over Expires.
if c.MaxAge < 0 {
return e, true, nil
} else if c.MaxAge > 0 {
e.Expires = now.Add(time.Duration(c.MaxAge) * time.Second)
e.Persistent = true
} else {
if c.Expires.IsZero() {
e.Expires = endOfTime
e.Persistent = false
} else {
if !c.Expires.After(now) {
return e, true, nil
}
e.Expires = c.Expires
e.Persistent = true
}
}
e.Value = c.Value
e.Secure = c.Secure
e.HttpOnly = c.HttpOnly
return e, false, nil
}
var (
errIllegalDomain = errors.New("cookiejar: illegal cookie domain attribute")
errMalformedDomain = errors.New("cookiejar: malformed cookie domain attribute")
errNoHostname = errors.New("cookiejar: no host name available (IP only)")
)
// endOfTime is the time when session (non-persistent) cookies expire.
// This instant is representable in most date/time formats (not just
// Go's time.Time) and should be far enough in the future.
var endOfTime = time.Date(9999, 12, 31, 23, 59, 59, 0, time.UTC)
// domainAndType determines the cookie's domain and hostOnly attribute.
func (j *Jar) domainAndType(host, domain string) (string, bool, error) {
if domain == "" {
// No domain attribute in the SetCookie header indicates a
// host cookie.
return host, true, nil
}
if isIP(host) {
// According to RFC 6265 domain-matching includes not being
// an IP address.
// TODO: This might be relaxed as in common browsers.
return "", false, errNoHostname
}
// From here on: If the cookie is valid, it is a domain cookie (with
// the one exception of a public suffix below).
// See RFC 6265 section 5.2.3.
if domain[0] == '.' {
domain = domain[1:]
}
if len(domain) == 0 || domain[0] == '.' {
// Received either "Domain=." or "Domain=..some.thing",
// both are illegal.
return "", false, errMalformedDomain
}
domain = strings.ToLower(domain)
if domain[len(domain)-1] == '.' {
// We received stuff like "Domain=www.example.com.".
// Browsers do handle such stuff (actually differently) but
// RFC 6265 seems to be clear here (e.g. section 4.1.2.3) in
// requiring a reject. 4.1.2.3 is not normative, but
// "Domain Matching" (5.1.3) and "Canonicalized Host Names"
// (5.1.2) are.
return "", false, errMalformedDomain
}
// See RFC 6265 section 5.3 #5.
if j.psList != nil {
if ps := j.psList.PublicSuffix(domain); ps != "" && !hasDotSuffix(domain, ps) {
if host == domain {
// This is the one exception in which a cookie
// with a domain attribute is a host cookie.
return host, true, nil
}
return "", false, errIllegalDomain
}
}
// The domain must domain-match host: www.mycompany.com cannot
// set cookies for .ourcompetitors.com.
if host != domain && !hasDotSuffix(host, domain) {
return "", false, errIllegalDomain
}
return domain, false, nil
}

File diff suppressed because it is too large Load Diff

View File

@ -1,159 +0,0 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cookiejar
// This file implements the Punycode algorithm from RFC 3492.
import (
"fmt"
"strings"
"unicode/utf8"
)
// These parameter values are specified in section 5.
//
// All computation is done with int32s, so that overflow behavior is identical
// regardless of whether int is 32-bit or 64-bit.
const (
base int32 = 36
damp int32 = 700
initialBias int32 = 72
initialN int32 = 128
skew int32 = 38
tmax int32 = 26
tmin int32 = 1
)
// encode encodes a string as specified in section 6.3 and prepends prefix to
// the result.
//
// The "while h < length(input)" line in the specification becomes "for
// remaining != 0" in the Go code, because len(s) in Go is in bytes, not runes.
func encode(prefix, s string) (string, error) {
output := make([]byte, len(prefix), len(prefix)+1+2*len(s))
copy(output, prefix)
delta, n, bias := int32(0), initialN, initialBias
b, remaining := int32(0), int32(0)
for _, r := range s {
if r < 0x80 {
b++
output = append(output, byte(r))
} else {
remaining++
}
}
h := b
if b > 0 {
output = append(output, '-')
}
for remaining != 0 {
m := int32(0x7fffffff)
for _, r := range s {
if m > r && r >= n {
m = r
}
}
delta += (m - n) * (h + 1)
if delta < 0 {
return "", fmt.Errorf("cookiejar: invalid label %q", s)
}
n = m
for _, r := range s {
if r < n {
delta++
if delta < 0 {
return "", fmt.Errorf("cookiejar: invalid label %q", s)
}
continue
}
if r > n {
continue
}
q := delta
for k := base; ; k += base {
t := k - bias
if t < tmin {
t = tmin
} else if t > tmax {
t = tmax
}
if q < t {
break
}
output = append(output, encodeDigit(t+(q-t)%(base-t)))
q = (q - t) / (base - t)
}
output = append(output, encodeDigit(q))
bias = adapt(delta, h+1, h == b)
delta = 0
h++
remaining--
}
delta++
n++
}
return string(output), nil
}
func encodeDigit(digit int32) byte {
switch {
case 0 <= digit && digit < 26:
return byte(digit + 'a')
case 26 <= digit && digit < 36:
return byte(digit + ('0' - 26))
}
panic("cookiejar: internal error in punycode encoding")
}
// adapt is the bias adaptation function specified in section 6.1.
func adapt(delta, numPoints int32, firstTime bool) int32 {
if firstTime {
delta /= damp
} else {
delta /= 2
}
delta += delta / numPoints
k := int32(0)
for delta > ((base-tmin)*tmax)/2 {
delta /= base - tmin
k += base
}
return k + (base-tmin+1)*delta/(delta+skew)
}
// Strictly speaking, the remaining code below deals with IDNA (RFC 5890 and
// friends) and not Punycode (RFC 3492) per se.
// acePrefix is the ASCII Compatible Encoding prefix.
const acePrefix = "xn--"
// toASCII converts a domain or domain label to its ASCII form. For example,
// toASCII("bücher.example.com") is "xn--bcher-kva.example.com", and
// toASCII("golang") is "golang".
func toASCII(s string) (string, error) {
if ascii(s) {
return s, nil
}
labels := strings.Split(s, ".")
for i, label := range labels {
if !ascii(label) {
a, err := encode(acePrefix, label)
if err != nil {
return "", err
}
labels[i] = a
}
}
return strings.Join(labels, "."), nil
}
func ascii(s string) bool {
for i := 0; i < len(s); i++ {
if s[i] >= utf8.RuneSelf {
return false
}
}
return true
}

View File

@ -1,161 +0,0 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cookiejar
import (
"testing"
)
var punycodeTestCases = [...]struct {
s, encoded string
}{
{"", ""},
{"-", "--"},
{"-a", "-a-"},
{"-a-", "-a--"},
{"a", "a-"},
{"a-", "a--"},
{"a-b", "a-b-"},
{"books", "books-"},
{"bücher", "bcher-kva"},
{"Hello世界", "Hello-ck1hg65u"},
{"ü", "tda"},
{"üý", "tdac"},
// The test cases below come from RFC 3492 section 7.1 with Errata 3026.
{
// (A) Arabic (Egyptian).
"\u0644\u064A\u0647\u0645\u0627\u0628\u062A\u0643\u0644" +
"\u0645\u0648\u0634\u0639\u0631\u0628\u064A\u061F",
"egbpdaj6bu4bxfgehfvwxn",
},
{
// (B) Chinese (simplified).
"\u4ED6\u4EEC\u4E3A\u4EC0\u4E48\u4E0D\u8BF4\u4E2D\u6587",
"ihqwcrb4cv8a8dqg056pqjye",
},
{
// (C) Chinese (traditional).
"\u4ED6\u5011\u7232\u4EC0\u9EBD\u4E0D\u8AAA\u4E2D\u6587",
"ihqwctvzc91f659drss3x8bo0yb",
},
{
// (D) Czech.
"\u0050\u0072\u006F\u010D\u0070\u0072\u006F\u0073\u0074" +
"\u011B\u006E\u0065\u006D\u006C\u0075\u0076\u00ED\u010D" +
"\u0065\u0073\u006B\u0079",
"Proprostnemluvesky-uyb24dma41a",
},
{
// (E) Hebrew.
"\u05DC\u05DE\u05D4\u05D4\u05DD\u05E4\u05E9\u05D5\u05D8" +
"\u05DC\u05D0\u05DE\u05D3\u05D1\u05E8\u05D9\u05DD\u05E2" +
"\u05D1\u05E8\u05D9\u05EA",
"4dbcagdahymbxekheh6e0a7fei0b",
},
{
// (F) Hindi (Devanagari).
"\u092F\u0939\u0932\u094B\u0917\u0939\u093F\u0928\u094D" +
"\u0926\u0940\u0915\u094D\u092F\u094B\u0902\u0928\u0939" +
"\u0940\u0902\u092C\u094B\u0932\u0938\u0915\u0924\u0947" +
"\u0939\u0948\u0902",
"i1baa7eci9glrd9b2ae1bj0hfcgg6iyaf8o0a1dig0cd",
},
{
// (G) Japanese (kanji and hiragana).
"\u306A\u305C\u307F\u3093\u306A\u65E5\u672C\u8A9E\u3092" +
"\u8A71\u3057\u3066\u304F\u308C\u306A\u3044\u306E\u304B",
"n8jok5ay5dzabd5bym9f0cm5685rrjetr6pdxa",
},
{
// (H) Korean (Hangul syllables).
"\uC138\uACC4\uC758\uBAA8\uB4E0\uC0AC\uB78C\uB4E4\uC774" +
"\uD55C\uAD6D\uC5B4\uB97C\uC774\uD574\uD55C\uB2E4\uBA74" +
"\uC5BC\uB9C8\uB098\uC88B\uC744\uAE4C",
"989aomsvi5e83db1d2a355cv1e0vak1dwrv93d5xbh15a0dt30a5j" +
"psd879ccm6fea98c",
},
{
// (I) Russian (Cyrillic).
"\u043F\u043E\u0447\u0435\u043C\u0443\u0436\u0435\u043E" +
"\u043D\u0438\u043D\u0435\u0433\u043E\u0432\u043E\u0440" +
"\u044F\u0442\u043F\u043E\u0440\u0443\u0441\u0441\u043A" +
"\u0438",
"b1abfaaepdrnnbgefbadotcwatmq2g4l",
},
{
// (J) Spanish.
"\u0050\u006F\u0072\u0071\u0075\u00E9\u006E\u006F\u0070" +
"\u0075\u0065\u0064\u0065\u006E\u0073\u0069\u006D\u0070" +
"\u006C\u0065\u006D\u0065\u006E\u0074\u0065\u0068\u0061" +
"\u0062\u006C\u0061\u0072\u0065\u006E\u0045\u0073\u0070" +
"\u0061\u00F1\u006F\u006C",
"PorqunopuedensimplementehablarenEspaol-fmd56a",
},
{
// (K) Vietnamese.
"\u0054\u1EA1\u0069\u0073\u0061\u006F\u0068\u1ECD\u006B" +
"\u0068\u00F4\u006E\u0067\u0074\u0068\u1EC3\u0063\u0068" +
"\u1EC9\u006E\u00F3\u0069\u0074\u0069\u1EBF\u006E\u0067" +
"\u0056\u0069\u1EC7\u0074",
"TisaohkhngthchnitingVit-kjcr8268qyxafd2f1b9g",
},
{
// (L) 3<nen>B<gumi><kinpachi><sensei>.
"\u0033\u5E74\u0042\u7D44\u91D1\u516B\u5148\u751F",
"3B-ww4c5e180e575a65lsy2b",
},
{
// (M) <amuro><namie>-with-SUPER-MONKEYS.
"\u5B89\u5BA4\u5948\u7F8E\u6075\u002D\u0077\u0069\u0074" +
"\u0068\u002D\u0053\u0055\u0050\u0045\u0052\u002D\u004D" +
"\u004F\u004E\u004B\u0045\u0059\u0053",
"-with-SUPER-MONKEYS-pc58ag80a8qai00g7n9n",
},
{
// (N) Hello-Another-Way-<sorezore><no><basho>.
"\u0048\u0065\u006C\u006C\u006F\u002D\u0041\u006E\u006F" +
"\u0074\u0068\u0065\u0072\u002D\u0057\u0061\u0079\u002D" +
"\u305D\u308C\u305E\u308C\u306E\u5834\u6240",
"Hello-Another-Way--fc4qua05auwb3674vfr0b",
},
{
// (O) <hitotsu><yane><no><shita>2.
"\u3072\u3068\u3064\u5C4B\u6839\u306E\u4E0B\u0032",
"2-u9tlzr9756bt3uc0v",
},
{
// (P) Maji<de>Koi<suru>5<byou><mae>
"\u004D\u0061\u006A\u0069\u3067\u004B\u006F\u0069\u3059" +
"\u308B\u0035\u79D2\u524D",
"MajiKoi5-783gue6qz075azm5e",
},
{
// (Q) <pafii>de<runba>
"\u30D1\u30D5\u30A3\u30FC\u0064\u0065\u30EB\u30F3\u30D0",
"de-jg4avhby1noc0d",
},
{
// (R) <sono><supiido><de>
"\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067",
"d9juau41awczczp",
},
{
// (S) -> $1.00 <-
"\u002D\u003E\u0020\u0024\u0031\u002E\u0030\u0030\u0020" +
"\u003C\u002D",
"-> $1.00 <--",
},
}
func TestPunycode(t *testing.T) {
for _, tc := range punycodeTestCases {
if got, err := encode("", tc.s); err != nil {
t.Errorf(`encode("", %q): %v`, tc.s, err)
} else if got != tc.encoded {
t.Errorf(`encode("", %q): got %q, want %q`, tc.s, got, tc.encoded)
}
}
}

View File

@ -1,88 +0,0 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http_test
import (
"fmt"
"io/ioutil"
"log"
"net/http"
)
func ExampleHijacker() {
http.HandleFunc("/hijack", func(w http.ResponseWriter, r *http.Request) {
hj, ok := w.(http.Hijacker)
if !ok {
http.Error(w, "webserver doesn't support hijacking", http.StatusInternalServerError)
return
}
conn, bufrw, err := hj.Hijack()
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
// Don't forget to close the connection:
defer conn.Close()
bufrw.WriteString("Now we're speaking raw TCP. Say hi: ")
bufrw.Flush()
s, err := bufrw.ReadString('\n')
if err != nil {
log.Printf("error reading string: %v", err)
return
}
fmt.Fprintf(bufrw, "You said: %q\nBye.\n", s)
bufrw.Flush()
})
}
func ExampleGet() {
res, err := http.Get("http://www.google.com/robots.txt")
if err != nil {
log.Fatal(err)
}
robots, err := ioutil.ReadAll(res.Body)
res.Body.Close()
if err != nil {
log.Fatal(err)
}
fmt.Printf("%s", robots)
}
func ExampleFileServer() {
// Simple static webserver:
log.Fatal(http.ListenAndServe(":8080", http.FileServer(http.Dir("/usr/share/doc"))))
}
func ExampleFileServer_stripPrefix() {
// To serve a directory on disk (/tmp) under an alternate URL
// path (/tmpfiles/), use StripPrefix to modify the request
// URL's path before the FileServer sees it:
http.Handle("/tmpfiles/", http.StripPrefix("/tmpfiles/", http.FileServer(http.Dir("/tmp"))))
}
func ExampleStripPrefix() {
// To serve a directory on disk (/tmp) under an alternate URL
// path (/tmpfiles/), use StripPrefix to modify the request
// URL's path before the FileServer sees it:
http.Handle("/tmpfiles/", http.StripPrefix("/tmpfiles/", http.FileServer(http.Dir("/tmp"))))
}
type apiHandler struct{}
func (apiHandler) ServeHTTP(http.ResponseWriter, *http.Request) {}
func ExampleServeMux_Handle() {
mux := http.NewServeMux()
mux.Handle("/api/", apiHandler{})
mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) {
// The "/" pattern matches everything, so we need to check
// that we're at the root here.
if req.URL.Path != "/" {
http.NotFound(w, req)
return
}
fmt.Fprintf(w, "Welcome to the home page!")
})
}

View File

@ -1,72 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Bridge package to expose http internals to tests in the http_test
// package.
package http
import (
"net"
"time"
)
func NewLoggingConn(baseName string, c net.Conn) net.Conn {
return newLoggingConn(baseName, c)
}
var ExportAppendTime = appendTime
func (t *Transport) NumPendingRequestsForTesting() int {
t.reqMu.Lock()
defer t.reqMu.Unlock()
return len(t.reqCanceler)
}
func (t *Transport) IdleConnKeysForTesting() (keys []string) {
keys = make([]string, 0)
t.idleMu.Lock()
defer t.idleMu.Unlock()
if t.idleConn == nil {
return
}
for key := range t.idleConn {
keys = append(keys, key.String())
}
return
}
func (t *Transport) IdleConnCountForTesting(cacheKey string) int {
t.idleMu.Lock()
defer t.idleMu.Unlock()
if t.idleConn == nil {
return 0
}
for k, conns := range t.idleConn {
if k.String() == cacheKey {
return len(conns)
}
}
return 0
}
func (t *Transport) IdleConnChMapSizeForTesting() int {
t.idleMu.Lock()
defer t.idleMu.Unlock()
return len(t.idleConnCh)
}
func NewTestTimeoutHandler(handler Handler, ch <-chan time.Time) Handler {
f := func() <-chan time.Time {
return ch
}
return &timeoutHandler{handler, f, ""}
}
func ResetCachedEnvironment() {
httpProxyEnv.reset()
noProxyEnv.reset()
}
var DefaultUserAgent = defaultUserAgent

View File

@ -1,305 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package fcgi
// This file implements FastCGI from the perspective of a child process.
import (
"errors"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"net/http/cgi"
"os"
"strings"
"sync"
"time"
)
// request holds the state for an in-progress request. As soon as it's complete,
// it's converted to an http.Request.
type request struct {
pw *io.PipeWriter
reqId uint16
params map[string]string
buf [1024]byte
rawParams []byte
keepConn bool
}
func newRequest(reqId uint16, flags uint8) *request {
r := &request{
reqId: reqId,
params: map[string]string{},
keepConn: flags&flagKeepConn != 0,
}
r.rawParams = r.buf[:0]
return r
}
// parseParams reads an encoded []byte into Params.
func (r *request) parseParams() {
text := r.rawParams
r.rawParams = nil
for len(text) > 0 {
keyLen, n := readSize(text)
if n == 0 {
return
}
text = text[n:]
valLen, n := readSize(text)
if n == 0 {
return
}
text = text[n:]
key := readString(text, keyLen)
text = text[keyLen:]
val := readString(text, valLen)
text = text[valLen:]
r.params[key] = val
}
}
// response implements http.ResponseWriter.
type response struct {
req *request
header http.Header
w *bufWriter
wroteHeader bool
}
func newResponse(c *child, req *request) *response {
return &response{
req: req,
header: http.Header{},
w: newWriter(c.conn, typeStdout, req.reqId),
}
}
func (r *response) Header() http.Header {
return r.header
}
func (r *response) Write(data []byte) (int, error) {
if !r.wroteHeader {
r.WriteHeader(http.StatusOK)
}
return r.w.Write(data)
}
func (r *response) WriteHeader(code int) {
if r.wroteHeader {
return
}
r.wroteHeader = true
if code == http.StatusNotModified {
// Must not have body.
r.header.Del("Content-Type")
r.header.Del("Content-Length")
r.header.Del("Transfer-Encoding")
} else if r.header.Get("Content-Type") == "" {
r.header.Set("Content-Type", "text/html; charset=utf-8")
}
if r.header.Get("Date") == "" {
r.header.Set("Date", time.Now().UTC().Format(http.TimeFormat))
}
fmt.Fprintf(r.w, "Status: %d %s\r\n", code, http.StatusText(code))
r.header.Write(r.w)
r.w.WriteString("\r\n")
}
func (r *response) Flush() {
if !r.wroteHeader {
r.WriteHeader(http.StatusOK)
}
r.w.Flush()
}
func (r *response) Close() error {
r.Flush()
return r.w.Close()
}
type child struct {
conn *conn
handler http.Handler
mu sync.Mutex // protects requests:
requests map[uint16]*request // keyed by request ID
}
func newChild(rwc io.ReadWriteCloser, handler http.Handler) *child {
return &child{
conn: newConn(rwc),
handler: handler,
requests: make(map[uint16]*request),
}
}
func (c *child) serve() {
defer c.conn.Close()
var rec record
for {
if err := rec.read(c.conn.rwc); err != nil {
return
}
if err := c.handleRecord(&rec); err != nil {
return
}
}
}
var errCloseConn = errors.New("fcgi: connection should be closed")
var emptyBody = ioutil.NopCloser(strings.NewReader(""))
func (c *child) handleRecord(rec *record) error {
c.mu.Lock()
req, ok := c.requests[rec.h.Id]
c.mu.Unlock()
if !ok && rec.h.Type != typeBeginRequest && rec.h.Type != typeGetValues {
// The spec says to ignore unknown request IDs.
return nil
}
switch rec.h.Type {
case typeBeginRequest:
if req != nil {
// The server is trying to begin a request with the same ID
// as an in-progress request. This is an error.
return errors.New("fcgi: received ID that is already in-flight")
}
var br beginRequest
if err := br.read(rec.content()); err != nil {
return err
}
if br.role != roleResponder {
c.conn.writeEndRequest(rec.h.Id, 0, statusUnknownRole)
return nil
}
req = newRequest(rec.h.Id, br.flags)
c.mu.Lock()
c.requests[rec.h.Id] = req
c.mu.Unlock()
return nil
case typeParams:
// NOTE(eds): Technically a key-value pair can straddle the boundary
// between two packets. We buffer until we've received all parameters.
if len(rec.content()) > 0 {
req.rawParams = append(req.rawParams, rec.content()...)
return nil
}
req.parseParams()
return nil
case typeStdin:
content := rec.content()
if req.pw == nil {
var body io.ReadCloser
if len(content) > 0 {
// body could be an io.LimitReader, but it shouldn't matter
// as long as both sides are behaving.
body, req.pw = io.Pipe()
} else {
body = emptyBody
}
go c.serveRequest(req, body)
}
if len(content) > 0 {
// TODO(eds): This blocks until the handler reads from the pipe.
// If the handler takes a long time, it might be a problem.
req.pw.Write(content)
} else if req.pw != nil {
req.pw.Close()
}
return nil
case typeGetValues:
values := map[string]string{"FCGI_MPXS_CONNS": "1"}
c.conn.writePairs(typeGetValuesResult, 0, values)
return nil
case typeData:
// If the filter role is implemented, read the data stream here.
return nil
case typeAbortRequest:
println("abort")
c.mu.Lock()
delete(c.requests, rec.h.Id)
c.mu.Unlock()
c.conn.writeEndRequest(rec.h.Id, 0, statusRequestComplete)
if !req.keepConn {
// connection will close upon return
return errCloseConn
}
return nil
default:
b := make([]byte, 8)
b[0] = byte(rec.h.Type)
c.conn.writeRecord(typeUnknownType, 0, b)
return nil
}
}
func (c *child) serveRequest(req *request, body io.ReadCloser) {
r := newResponse(c, req)
httpReq, err := cgi.RequestFromMap(req.params)
if err != nil {
// there was an error reading the request
r.WriteHeader(http.StatusInternalServerError)
c.conn.writeRecord(typeStderr, req.reqId, []byte(err.Error()))
} else {
httpReq.Body = body
c.handler.ServeHTTP(r, httpReq)
}
r.Close()
c.mu.Lock()
delete(c.requests, req.reqId)
c.mu.Unlock()
c.conn.writeEndRequest(req.reqId, 0, statusRequestComplete)
// Consume the entire body, so the host isn't still writing to
// us when we close the socket below in the !keepConn case,
// otherwise we'd send a RST. (golang.org/issue/4183)
// TODO(bradfitz): also bound this copy in time. Or send
// some sort of abort request to the host, so the host
// can properly cut off the client sending all the data.
// For now just bound it a little and
io.CopyN(ioutil.Discard, body, 100<<20)
body.Close()
if !req.keepConn {
c.conn.Close()
}
}
// Serve accepts incoming FastCGI connections on the listener l, creating a new
// goroutine for each. The goroutine reads requests and then calls handler
// to reply to them.
// If l is nil, Serve accepts connections from os.Stdin.
// If handler is nil, http.DefaultServeMux is used.
func Serve(l net.Listener, handler http.Handler) error {
if l == nil {
var err error
l, err = net.FileListener(os.Stdin)
if err != nil {
return err
}
defer l.Close()
}
if handler == nil {
handler = http.DefaultServeMux
}
for {
rw, err := l.Accept()
if err != nil {
return err
}
c := newChild(rw, handler)
go c.serve()
}
}

View File

@ -1,274 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package fcgi implements the FastCGI protocol.
// Currently only the responder role is supported.
// The protocol is defined at http://www.fastcgi.com/drupal/node/6?q=node/22
package fcgi
// This file defines the raw protocol and some utilities used by the child and
// the host.
import (
"bufio"
"bytes"
"encoding/binary"
"errors"
"io"
"sync"
)
// recType is a record type, as defined by
// http://www.fastcgi.com/devkit/doc/fcgi-spec.html#S8
type recType uint8
const (
typeBeginRequest recType = 1
typeAbortRequest recType = 2
typeEndRequest recType = 3
typeParams recType = 4
typeStdin recType = 5
typeStdout recType = 6
typeStderr recType = 7
typeData recType = 8
typeGetValues recType = 9
typeGetValuesResult recType = 10
typeUnknownType recType = 11
)
// keep the connection between web-server and responder open after request
const flagKeepConn = 1
const (
maxWrite = 65535 // maximum record body
maxPad = 255
)
const (
roleResponder = iota + 1 // only Responders are implemented.
roleAuthorizer
roleFilter
)
const (
statusRequestComplete = iota
statusCantMultiplex
statusOverloaded
statusUnknownRole
)
const headerLen = 8
type header struct {
Version uint8
Type recType
Id uint16
ContentLength uint16
PaddingLength uint8
Reserved uint8
}
type beginRequest struct {
role uint16
flags uint8
reserved [5]uint8
}
func (br *beginRequest) read(content []byte) error {
if len(content) != 8 {
return errors.New("fcgi: invalid begin request record")
}
br.role = binary.BigEndian.Uint16(content)
br.flags = content[2]
return nil
}
// for padding so we don't have to allocate all the time
// not synchronized because we don't care what the contents are
var pad [maxPad]byte
func (h *header) init(recType recType, reqId uint16, contentLength int) {
h.Version = 1
h.Type = recType
h.Id = reqId
h.ContentLength = uint16(contentLength)
h.PaddingLength = uint8(-contentLength & 7)
}
// conn sends records over rwc
type conn struct {
mutex sync.Mutex
rwc io.ReadWriteCloser
// to avoid allocations
buf bytes.Buffer
h header
}
func newConn(rwc io.ReadWriteCloser) *conn {
return &conn{rwc: rwc}
}
func (c *conn) Close() error {
c.mutex.Lock()
defer c.mutex.Unlock()
return c.rwc.Close()
}
type record struct {
h header
buf [maxWrite + maxPad]byte
}
func (rec *record) read(r io.Reader) (err error) {
if err = binary.Read(r, binary.BigEndian, &rec.h); err != nil {
return err
}
if rec.h.Version != 1 {
return errors.New("fcgi: invalid header version")
}
n := int(rec.h.ContentLength) + int(rec.h.PaddingLength)
if _, err = io.ReadFull(r, rec.buf[:n]); err != nil {
return err
}
return nil
}
func (r *record) content() []byte {
return r.buf[:r.h.ContentLength]
}
// writeRecord writes and sends a single record.
func (c *conn) writeRecord(recType recType, reqId uint16, b []byte) error {
c.mutex.Lock()
defer c.mutex.Unlock()
c.buf.Reset()
c.h.init(recType, reqId, len(b))
if err := binary.Write(&c.buf, binary.BigEndian, c.h); err != nil {
return err
}
if _, err := c.buf.Write(b); err != nil {
return err
}
if _, err := c.buf.Write(pad[:c.h.PaddingLength]); err != nil {
return err
}
_, err := c.rwc.Write(c.buf.Bytes())
return err
}
func (c *conn) writeBeginRequest(reqId uint16, role uint16, flags uint8) error {
b := [8]byte{byte(role >> 8), byte(role), flags}
return c.writeRecord(typeBeginRequest, reqId, b[:])
}
func (c *conn) writeEndRequest(reqId uint16, appStatus int, protocolStatus uint8) error {
b := make([]byte, 8)
binary.BigEndian.PutUint32(b, uint32(appStatus))
b[4] = protocolStatus
return c.writeRecord(typeEndRequest, reqId, b)
}
func (c *conn) writePairs(recType recType, reqId uint16, pairs map[string]string) error {
w := newWriter(c, recType, reqId)
b := make([]byte, 8)
for k, v := range pairs {
n := encodeSize(b, uint32(len(k)))
n += encodeSize(b[n:], uint32(len(v)))
if _, err := w.Write(b[:n]); err != nil {
return err
}
if _, err := w.WriteString(k); err != nil {
return err
}
if _, err := w.WriteString(v); err != nil {
return err
}
}
w.Close()
return nil
}
func readSize(s []byte) (uint32, int) {
if len(s) == 0 {
return 0, 0
}
size, n := uint32(s[0]), 1
if size&(1<<7) != 0 {
if len(s) < 4 {
return 0, 0
}
n = 4
size = binary.BigEndian.Uint32(s)
size &^= 1 << 31
}
return size, n
}
func readString(s []byte, size uint32) string {
if size > uint32(len(s)) {
return ""
}
return string(s[:size])
}
func encodeSize(b []byte, size uint32) int {
if size > 127 {
size |= 1 << 31
binary.BigEndian.PutUint32(b, size)
return 4
}
b[0] = byte(size)
return 1
}
// bufWriter encapsulates bufio.Writer but also closes the underlying stream when
// Closed.
type bufWriter struct {
closer io.Closer
*bufio.Writer
}
func (w *bufWriter) Close() error {
if err := w.Writer.Flush(); err != nil {
w.closer.Close()
return err
}
return w.closer.Close()
}
func newWriter(c *conn, recType recType, reqId uint16) *bufWriter {
s := &streamWriter{c: c, recType: recType, reqId: reqId}
w := bufio.NewWriterSize(s, maxWrite)
return &bufWriter{s, w}
}
// streamWriter abstracts out the separation of a stream into discrete records.
// It only writes maxWrite bytes at a time.
type streamWriter struct {
c *conn
recType recType
reqId uint16
}
func (w *streamWriter) Write(p []byte) (int, error) {
nn := 0
for len(p) > 0 {
n := len(p)
if n > maxWrite {
n = maxWrite
}
if err := w.c.writeRecord(w.recType, w.reqId, p[:n]); err != nil {
return nn, err
}
nn += n
p = p[n:]
}
return nn, nil
}
func (w *streamWriter) Close() error {
// send empty record to close the stream
return w.c.writeRecord(w.recType, w.reqId, nil)
}

View File

@ -1,150 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package fcgi
import (
"bytes"
"errors"
"io"
"testing"
)
var sizeTests = []struct {
size uint32
bytes []byte
}{
{0, []byte{0x00}},
{127, []byte{0x7F}},
{128, []byte{0x80, 0x00, 0x00, 0x80}},
{1000, []byte{0x80, 0x00, 0x03, 0xE8}},
{33554431, []byte{0x81, 0xFF, 0xFF, 0xFF}},
}
func TestSize(t *testing.T) {
b := make([]byte, 4)
for i, test := range sizeTests {
n := encodeSize(b, test.size)
if !bytes.Equal(b[:n], test.bytes) {
t.Errorf("%d expected %x, encoded %x", i, test.bytes, b)
}
size, n := readSize(test.bytes)
if size != test.size {
t.Errorf("%d expected %d, read %d", i, test.size, size)
}
if len(test.bytes) != n {
t.Errorf("%d did not consume all the bytes", i)
}
}
}
var streamTests = []struct {
desc string
recType recType
reqId uint16
content []byte
raw []byte
}{
{"single record", typeStdout, 1, nil,
[]byte{1, byte(typeStdout), 0, 1, 0, 0, 0, 0},
},
// this data will have to be split into two records
{"two records", typeStdin, 300, make([]byte, 66000),
bytes.Join([][]byte{
// header for the first record
{1, byte(typeStdin), 0x01, 0x2C, 0xFF, 0xFF, 1, 0},
make([]byte, 65536),
// header for the second
{1, byte(typeStdin), 0x01, 0x2C, 0x01, 0xD1, 7, 0},
make([]byte, 472),
// header for the empty record
{1, byte(typeStdin), 0x01, 0x2C, 0, 0, 0, 0},
},
nil),
},
}
type nilCloser struct {
io.ReadWriter
}
func (c *nilCloser) Close() error { return nil }
func TestStreams(t *testing.T) {
var rec record
outer:
for _, test := range streamTests {
buf := bytes.NewBuffer(test.raw)
var content []byte
for buf.Len() > 0 {
if err := rec.read(buf); err != nil {
t.Errorf("%s: error reading record: %v", test.desc, err)
continue outer
}
content = append(content, rec.content()...)
}
if rec.h.Type != test.recType {
t.Errorf("%s: got type %d expected %d", test.desc, rec.h.Type, test.recType)
continue
}
if rec.h.Id != test.reqId {
t.Errorf("%s: got request ID %d expected %d", test.desc, rec.h.Id, test.reqId)
continue
}
if !bytes.Equal(content, test.content) {
t.Errorf("%s: read wrong content", test.desc)
continue
}
buf.Reset()
c := newConn(&nilCloser{buf})
w := newWriter(c, test.recType, test.reqId)
if _, err := w.Write(test.content); err != nil {
t.Errorf("%s: error writing record: %v", test.desc, err)
continue
}
if err := w.Close(); err != nil {
t.Errorf("%s: error closing stream: %v", test.desc, err)
continue
}
if !bytes.Equal(buf.Bytes(), test.raw) {
t.Errorf("%s: wrote wrong content", test.desc)
}
}
}
type writeOnlyConn struct {
buf []byte
}
func (c *writeOnlyConn) Write(p []byte) (int, error) {
c.buf = append(c.buf, p...)
return len(p), nil
}
func (c *writeOnlyConn) Read(p []byte) (int, error) {
return 0, errors.New("conn is write-only")
}
func (c *writeOnlyConn) Close() error {
return nil
}
func TestGetValues(t *testing.T) {
var rec record
rec.h.Type = typeGetValues
wc := new(writeOnlyConn)
c := newChild(wc, nil)
err := c.handleRecord(&rec)
if err != nil {
t.Fatalf("handleRecord: %v", err)
}
const want = "\x01\n\x00\x00\x00\x12\x06\x00" +
"\x0f\x01FCGI_MPXS_CONNS1" +
"\x00\x00\x00\x00\x00\x00\x01\n\x00\x00\x00\x00\x00\x00"
if got := string(wc.buf); got != want {
t.Errorf(" got: %q\nwant: %q\n", got, want)
}
}

View File

@ -1,65 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http
import (
"io/ioutil"
"os"
"path/filepath"
"testing"
)
func checker(t *testing.T) func(string, error) {
return func(call string, err error) {
if err == nil {
return
}
t.Fatalf("%s: %v", call, err)
}
}
func TestFileTransport(t *testing.T) {
check := checker(t)
dname, err := ioutil.TempDir("", "")
check("TempDir", err)
fname := filepath.Join(dname, "foo.txt")
err = ioutil.WriteFile(fname, []byte("Bar"), 0644)
check("WriteFile", err)
defer os.Remove(dname)
defer os.Remove(fname)
tr := &Transport{}
tr.RegisterProtocol("file", NewFileTransport(Dir(dname)))
c := &Client{Transport: tr}
fooURLs := []string{"file:///foo.txt", "file://../foo.txt"}
for _, urlstr := range fooURLs {
res, err := c.Get(urlstr)
check("Get "+urlstr, err)
if res.StatusCode != 200 {
t.Errorf("for %s, StatusCode = %d, want 200", urlstr, res.StatusCode)
}
if res.ContentLength != -1 {
t.Errorf("for %s, ContentLength = %d, want -1", urlstr, res.ContentLength)
}
if res.Body == nil {
t.Fatalf("for %s, nil Body", urlstr)
}
slurp, err := ioutil.ReadAll(res.Body)
check("ReadAll "+urlstr, err)
if string(slurp) != "Bar" {
t.Errorf("for %s, got content %q, want %q", urlstr, string(slurp), "Bar")
}
}
const badURL = "file://../no-exist.txt"
res, err := c.Get(badURL)
check("Get "+badURL, err)
if res.StatusCode != 404 {
t.Errorf("for %s, StatusCode = %d, want 404", badURL, res.StatusCode)
}
res.Body.Close()
}

View File

@ -1,858 +0,0 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http_test
import (
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"mime"
"mime/multipart"
"net"
. "net/http"
"net/http/httptest"
"net/url"
"os"
"os/exec"
"path"
"path/filepath"
"reflect"
"regexp"
"runtime"
"strconv"
"strings"
"testing"
"time"
)
const (
testFile = "testdata/file"
testFileLen = 11
)
type wantRange struct {
start, end int64 // range [start,end)
}
var itoa = strconv.Itoa
var ServeFileRangeTests = []struct {
r string
code int
ranges []wantRange
}{
{r: "", code: StatusOK},
{r: "bytes=0-4", code: StatusPartialContent, ranges: []wantRange{{0, 5}}},
{r: "bytes=2-", code: StatusPartialContent, ranges: []wantRange{{2, testFileLen}}},
{r: "bytes=-5", code: StatusPartialContent, ranges: []wantRange{{testFileLen - 5, testFileLen}}},
{r: "bytes=3-7", code: StatusPartialContent, ranges: []wantRange{{3, 8}}},
{r: "bytes=20-", code: StatusRequestedRangeNotSatisfiable},
{r: "bytes=0-0,-2", code: StatusPartialContent, ranges: []wantRange{{0, 1}, {testFileLen - 2, testFileLen}}},
{r: "bytes=0-1,5-8", code: StatusPartialContent, ranges: []wantRange{{0, 2}, {5, 9}}},
{r: "bytes=0-1,5-", code: StatusPartialContent, ranges: []wantRange{{0, 2}, {5, testFileLen}}},
{r: "bytes=5-1000", code: StatusPartialContent, ranges: []wantRange{{5, testFileLen}}},
{r: "bytes=0-,1-,2-,3-,4-", code: StatusOK}, // ignore wasteful range request
{r: "bytes=0-" + itoa(testFileLen-2), code: StatusPartialContent, ranges: []wantRange{{0, testFileLen - 1}}},
{r: "bytes=0-" + itoa(testFileLen-1), code: StatusPartialContent, ranges: []wantRange{{0, testFileLen}}},
{r: "bytes=0-" + itoa(testFileLen), code: StatusPartialContent, ranges: []wantRange{{0, testFileLen}}},
}
func TestServeFile(t *testing.T) {
defer afterTest(t)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
ServeFile(w, r, "testdata/file")
}))
defer ts.Close()
var err error
file, err := ioutil.ReadFile(testFile)
if err != nil {
t.Fatal("reading file:", err)
}
// set up the Request (re-used for all tests)
var req Request
req.Header = make(Header)
if req.URL, err = url.Parse(ts.URL); err != nil {
t.Fatal("ParseURL:", err)
}
req.Method = "GET"
// straight GET
_, body := getBody(t, "straight get", req)
if !bytes.Equal(body, file) {
t.Fatalf("body mismatch: got %q, want %q", body, file)
}
// Range tests
Cases:
for _, rt := range ServeFileRangeTests {
if rt.r != "" {
req.Header.Set("Range", rt.r)
}
resp, body := getBody(t, fmt.Sprintf("range test %q", rt.r), req)
if resp.StatusCode != rt.code {
t.Errorf("range=%q: StatusCode=%d, want %d", rt.r, resp.StatusCode, rt.code)
}
if rt.code == StatusRequestedRangeNotSatisfiable {
continue
}
wantContentRange := ""
if len(rt.ranges) == 1 {
rng := rt.ranges[0]
wantContentRange = fmt.Sprintf("bytes %d-%d/%d", rng.start, rng.end-1, testFileLen)
}
cr := resp.Header.Get("Content-Range")
if cr != wantContentRange {
t.Errorf("range=%q: Content-Range = %q, want %q", rt.r, cr, wantContentRange)
}
ct := resp.Header.Get("Content-Type")
if len(rt.ranges) == 1 {
rng := rt.ranges[0]
wantBody := file[rng.start:rng.end]
if !bytes.Equal(body, wantBody) {
t.Errorf("range=%q: body = %q, want %q", rt.r, body, wantBody)
}
if strings.HasPrefix(ct, "multipart/byteranges") {
t.Errorf("range=%q content-type = %q; unexpected multipart/byteranges", rt.r, ct)
}
}
if len(rt.ranges) > 1 {
typ, params, err := mime.ParseMediaType(ct)
if err != nil {
t.Errorf("range=%q content-type = %q; %v", rt.r, ct, err)
continue
}
if typ != "multipart/byteranges" {
t.Errorf("range=%q content-type = %q; want multipart/byteranges", rt.r, typ)
continue
}
if params["boundary"] == "" {
t.Errorf("range=%q content-type = %q; lacks boundary", rt.r, ct)
continue
}
if g, w := resp.ContentLength, int64(len(body)); g != w {
t.Errorf("range=%q Content-Length = %d; want %d", rt.r, g, w)
continue
}
mr := multipart.NewReader(bytes.NewReader(body), params["boundary"])
for ri, rng := range rt.ranges {
part, err := mr.NextPart()
if err != nil {
t.Errorf("range=%q, reading part index %d: %v", rt.r, ri, err)
continue Cases
}
wantContentRange = fmt.Sprintf("bytes %d-%d/%d", rng.start, rng.end-1, testFileLen)
if g, w := part.Header.Get("Content-Range"), wantContentRange; g != w {
t.Errorf("range=%q: part Content-Range = %q; want %q", rt.r, g, w)
}
body, err := ioutil.ReadAll(part)
if err != nil {
t.Errorf("range=%q, reading part index %d body: %v", rt.r, ri, err)
continue Cases
}
wantBody := file[rng.start:rng.end]
if !bytes.Equal(body, wantBody) {
t.Errorf("range=%q: body = %q, want %q", rt.r, body, wantBody)
}
}
_, err = mr.NextPart()
if err != io.EOF {
t.Errorf("range=%q; expected final error io.EOF; got %v", rt.r, err)
}
}
}
}
var fsRedirectTestData = []struct {
original, redirect string
}{
{"/test/index.html", "/test/"},
{"/test/testdata", "/test/testdata/"},
{"/test/testdata/file/", "/test/testdata/file"},
}
func TestFSRedirect(t *testing.T) {
defer afterTest(t)
ts := httptest.NewServer(StripPrefix("/test", FileServer(Dir("."))))
defer ts.Close()
for _, data := range fsRedirectTestData {
res, err := Get(ts.URL + data.original)
if err != nil {
t.Fatal(err)
}
res.Body.Close()
if g, e := res.Request.URL.Path, data.redirect; g != e {
t.Errorf("redirect from %s: got %s, want %s", data.original, g, e)
}
}
}
type testFileSystem struct {
open func(name string) (File, error)
}
func (fs *testFileSystem) Open(name string) (File, error) {
return fs.open(name)
}
func TestFileServerCleans(t *testing.T) {
defer afterTest(t)
ch := make(chan string, 1)
fs := FileServer(&testFileSystem{func(name string) (File, error) {
ch <- name
return nil, errors.New("file does not exist")
}})
tests := []struct {
reqPath, openArg string
}{
{"/foo.txt", "/foo.txt"},
{"//foo.txt", "/foo.txt"},
{"/../foo.txt", "/foo.txt"},
}
req, _ := NewRequest("GET", "http://example.com", nil)
for n, test := range tests {
rec := httptest.NewRecorder()
req.URL.Path = test.reqPath
fs.ServeHTTP(rec, req)
if got := <-ch; got != test.openArg {
t.Errorf("test %d: got %q, want %q", n, got, test.openArg)
}
}
}
func TestFileServerEscapesNames(t *testing.T) {
defer afterTest(t)
const dirListPrefix = "<pre>\n"
const dirListSuffix = "\n</pre>\n"
tests := []struct {
name, escaped string
}{
{`simple_name`, `<a href="simple_name">simple_name</a>`},
{`"'<>&`, `<a href="%22%27%3C%3E&">&#34;&#39;&lt;&gt;&amp;</a>`},
{`?foo=bar#baz`, `<a href="%3Ffoo=bar%23baz">?foo=bar#baz</a>`},
{`<combo>?foo`, `<a href="%3Ccombo%3E%3Ffoo">&lt;combo&gt;?foo</a>`},
}
// We put each test file in its own directory in the fakeFS so we can look at it in isolation.
fs := make(fakeFS)
for i, test := range tests {
testFile := &fakeFileInfo{basename: test.name}
fs[fmt.Sprintf("/%d", i)] = &fakeFileInfo{
dir: true,
modtime: time.Unix(1000000000, 0).UTC(),
ents: []*fakeFileInfo{testFile},
}
fs[fmt.Sprintf("/%d/%s", i, test.name)] = testFile
}
ts := httptest.NewServer(FileServer(&fs))
defer ts.Close()
for i, test := range tests {
url := fmt.Sprintf("%s/%d", ts.URL, i)
res, err := Get(url)
if err != nil {
t.Fatalf("test %q: Get: %v", test.name, err)
}
b, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatalf("test %q: read Body: %v", test.name, err)
}
s := string(b)
if !strings.HasPrefix(s, dirListPrefix) || !strings.HasSuffix(s, dirListSuffix) {
t.Errorf("test %q: listing dir, full output is %q, want prefix %q and suffix %q", test.name, s, dirListPrefix, dirListSuffix)
}
if trimmed := strings.TrimSuffix(strings.TrimPrefix(s, dirListPrefix), dirListSuffix); trimmed != test.escaped {
t.Errorf("test %q: listing dir, filename escaped to %q, want %q", test.name, trimmed, test.escaped)
}
res.Body.Close()
}
}
func mustRemoveAll(dir string) {
err := os.RemoveAll(dir)
if err != nil {
panic(err)
}
}
func TestFileServerImplicitLeadingSlash(t *testing.T) {
defer afterTest(t)
tempDir, err := ioutil.TempDir("", "")
if err != nil {
t.Fatalf("TempDir: %v", err)
}
defer mustRemoveAll(tempDir)
if err := ioutil.WriteFile(filepath.Join(tempDir, "foo.txt"), []byte("Hello world"), 0644); err != nil {
t.Fatalf("WriteFile: %v", err)
}
ts := httptest.NewServer(StripPrefix("/bar/", FileServer(Dir(tempDir))))
defer ts.Close()
get := func(suffix string) string {
res, err := Get(ts.URL + suffix)
if err != nil {
t.Fatalf("Get %s: %v", suffix, err)
}
b, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatalf("ReadAll %s: %v", suffix, err)
}
res.Body.Close()
return string(b)
}
if s := get("/bar/"); !strings.Contains(s, ">foo.txt<") {
t.Logf("expected a directory listing with foo.txt, got %q", s)
}
if s := get("/bar/foo.txt"); s != "Hello world" {
t.Logf("expected %q, got %q", "Hello world", s)
}
}
func TestDirJoin(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("skipping test on windows")
}
wfi, err := os.Stat("/etc/hosts")
if err != nil {
t.Skip("skipping test; no /etc/hosts file")
}
test := func(d Dir, name string) {
f, err := d.Open(name)
if err != nil {
t.Fatalf("open of %s: %v", name, err)
}
defer f.Close()
gfi, err := f.Stat()
if err != nil {
t.Fatalf("stat of %s: %v", name, err)
}
if !os.SameFile(gfi, wfi) {
t.Errorf("%s got different file", name)
}
}
test(Dir("/etc/"), "/hosts")
test(Dir("/etc/"), "hosts")
test(Dir("/etc/"), "../../../../hosts")
test(Dir("/etc"), "/hosts")
test(Dir("/etc"), "hosts")
test(Dir("/etc"), "../../../../hosts")
// Not really directories, but since we use this trick in
// ServeFile, test it:
test(Dir("/etc/hosts"), "")
test(Dir("/etc/hosts"), "/")
test(Dir("/etc/hosts"), "../")
}
func TestEmptyDirOpenCWD(t *testing.T) {
test := func(d Dir) {
name := "fs_test.go"
f, err := d.Open(name)
if err != nil {
t.Fatalf("open of %s: %v", name, err)
}
defer f.Close()
}
test(Dir(""))
test(Dir("."))
test(Dir("./"))
}
func TestServeFileContentType(t *testing.T) {
defer afterTest(t)
const ctype = "icecream/chocolate"
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
switch r.FormValue("override") {
case "1":
w.Header().Set("Content-Type", ctype)
case "2":
// Explicitly inhibit sniffing.
w.Header()["Content-Type"] = []string{}
}
ServeFile(w, r, "testdata/file")
}))
defer ts.Close()
get := func(override string, want []string) {
resp, err := Get(ts.URL + "?override=" + override)
if err != nil {
t.Fatal(err)
}
if h := resp.Header["Content-Type"]; !reflect.DeepEqual(h, want) {
t.Errorf("Content-Type mismatch: got %v, want %v", h, want)
}
resp.Body.Close()
}
get("0", []string{"text/plain; charset=utf-8"})
get("1", []string{ctype})
get("2", nil)
}
func TestServeFileMimeType(t *testing.T) {
defer afterTest(t)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
ServeFile(w, r, "testdata/style.css")
}))
defer ts.Close()
resp, err := Get(ts.URL)
if err != nil {
t.Fatal(err)
}
resp.Body.Close()
want := "text/css; charset=utf-8"
if h := resp.Header.Get("Content-Type"); h != want {
t.Errorf("Content-Type mismatch: got %q, want %q", h, want)
}
}
func TestServeFileFromCWD(t *testing.T) {
defer afterTest(t)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
ServeFile(w, r, "fs_test.go")
}))
defer ts.Close()
r, err := Get(ts.URL)
if err != nil {
t.Fatal(err)
}
r.Body.Close()
if r.StatusCode != 200 {
t.Fatalf("expected 200 OK, got %s", r.Status)
}
}
func TestServeFileWithContentEncoding(t *testing.T) {
defer afterTest(t)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
w.Header().Set("Content-Encoding", "foo")
ServeFile(w, r, "testdata/file")
}))
defer ts.Close()
resp, err := Get(ts.URL)
if err != nil {
t.Fatal(err)
}
resp.Body.Close()
if g, e := resp.ContentLength, int64(-1); g != e {
t.Errorf("Content-Length mismatch: got %d, want %d", g, e)
}
}
func TestServeIndexHtml(t *testing.T) {
defer afterTest(t)
const want = "index.html says hello\n"
ts := httptest.NewServer(FileServer(Dir(".")))
defer ts.Close()
for _, path := range []string{"/testdata/", "/testdata/index.html"} {
res, err := Get(ts.URL + path)
if err != nil {
t.Fatal(err)
}
b, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatal("reading Body:", err)
}
if s := string(b); s != want {
t.Errorf("for path %q got %q, want %q", path, s, want)
}
res.Body.Close()
}
}
func TestFileServerZeroByte(t *testing.T) {
defer afterTest(t)
ts := httptest.NewServer(FileServer(Dir(".")))
defer ts.Close()
res, err := Get(ts.URL + "/..\x00")
if err != nil {
t.Fatal(err)
}
b, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatal("reading Body:", err)
}
if res.StatusCode == 200 {
t.Errorf("got status 200; want an error. Body is:\n%s", string(b))
}
}
type fakeFileInfo struct {
dir bool
basename string
modtime time.Time
ents []*fakeFileInfo
contents string
}
func (f *fakeFileInfo) Name() string { return f.basename }
func (f *fakeFileInfo) Sys() interface{} { return nil }
func (f *fakeFileInfo) ModTime() time.Time { return f.modtime }
func (f *fakeFileInfo) IsDir() bool { return f.dir }
func (f *fakeFileInfo) Size() int64 { return int64(len(f.contents)) }
func (f *fakeFileInfo) Mode() os.FileMode {
if f.dir {
return 0755 | os.ModeDir
}
return 0644
}
type fakeFile struct {
io.ReadSeeker
fi *fakeFileInfo
path string // as opened
entpos int
}
func (f *fakeFile) Close() error { return nil }
func (f *fakeFile) Stat() (os.FileInfo, error) { return f.fi, nil }
func (f *fakeFile) Readdir(count int) ([]os.FileInfo, error) {
if !f.fi.dir {
return nil, os.ErrInvalid
}
var fis []os.FileInfo
limit := f.entpos + count
if count <= 0 || limit > len(f.fi.ents) {
limit = len(f.fi.ents)
}
for ; f.entpos < limit; f.entpos++ {
fis = append(fis, f.fi.ents[f.entpos])
}
if len(fis) == 0 && count > 0 {
return fis, io.EOF
} else {
return fis, nil
}
}
type fakeFS map[string]*fakeFileInfo
func (fs fakeFS) Open(name string) (File, error) {
name = path.Clean(name)
f, ok := fs[name]
if !ok {
return nil, os.ErrNotExist
}
return &fakeFile{ReadSeeker: strings.NewReader(f.contents), fi: f, path: name}, nil
}
func TestDirectoryIfNotModified(t *testing.T) {
defer afterTest(t)
const indexContents = "I am a fake index.html file"
fileMod := time.Unix(1000000000, 0).UTC()
fileModStr := fileMod.Format(TimeFormat)
dirMod := time.Unix(123, 0).UTC()
indexFile := &fakeFileInfo{
basename: "index.html",
modtime: fileMod,
contents: indexContents,
}
fs := fakeFS{
"/": &fakeFileInfo{
dir: true,
modtime: dirMod,
ents: []*fakeFileInfo{indexFile},
},
"/index.html": indexFile,
}
ts := httptest.NewServer(FileServer(fs))
defer ts.Close()
res, err := Get(ts.URL)
if err != nil {
t.Fatal(err)
}
b, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatal(err)
}
if string(b) != indexContents {
t.Fatalf("Got body %q; want %q", b, indexContents)
}
res.Body.Close()
lastMod := res.Header.Get("Last-Modified")
if lastMod != fileModStr {
t.Fatalf("initial Last-Modified = %q; want %q", lastMod, fileModStr)
}
req, _ := NewRequest("GET", ts.URL, nil)
req.Header.Set("If-Modified-Since", lastMod)
res, err = DefaultClient.Do(req)
if err != nil {
t.Fatal(err)
}
if res.StatusCode != 304 {
t.Fatalf("Code after If-Modified-Since request = %v; want 304", res.StatusCode)
}
res.Body.Close()
// Advance the index.html file's modtime, but not the directory's.
indexFile.modtime = indexFile.modtime.Add(1 * time.Hour)
res, err = DefaultClient.Do(req)
if err != nil {
t.Fatal(err)
}
if res.StatusCode != 200 {
t.Fatalf("Code after second If-Modified-Since request = %v; want 200; res is %#v", res.StatusCode, res)
}
res.Body.Close()
}
func mustStat(t *testing.T, fileName string) os.FileInfo {
fi, err := os.Stat(fileName)
if err != nil {
t.Fatal(err)
}
return fi
}
func TestServeContent(t *testing.T) {
defer afterTest(t)
type serveParam struct {
name string
modtime time.Time
content io.ReadSeeker
contentType string
etag string
}
servec := make(chan serveParam, 1)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
p := <-servec
if p.etag != "" {
w.Header().Set("ETag", p.etag)
}
if p.contentType != "" {
w.Header().Set("Content-Type", p.contentType)
}
ServeContent(w, r, p.name, p.modtime, p.content)
}))
defer ts.Close()
type testCase struct {
// One of file or content must be set:
file string
content io.ReadSeeker
modtime time.Time
serveETag string // optional
serveContentType string // optional
reqHeader map[string]string
wantLastMod string
wantContentType string
wantStatus int
}
htmlModTime := mustStat(t, "testdata/index.html").ModTime()
tests := map[string]testCase{
"no_last_modified": {
file: "testdata/style.css",
wantContentType: "text/css; charset=utf-8",
wantStatus: 200,
},
"with_last_modified": {
file: "testdata/index.html",
wantContentType: "text/html; charset=utf-8",
modtime: htmlModTime,
wantLastMod: htmlModTime.UTC().Format(TimeFormat),
wantStatus: 200,
},
"not_modified_modtime": {
file: "testdata/style.css",
modtime: htmlModTime,
reqHeader: map[string]string{
"If-Modified-Since": htmlModTime.UTC().Format(TimeFormat),
},
wantStatus: 304,
},
"not_modified_modtime_with_contenttype": {
file: "testdata/style.css",
serveContentType: "text/css", // explicit content type
modtime: htmlModTime,
reqHeader: map[string]string{
"If-Modified-Since": htmlModTime.UTC().Format(TimeFormat),
},
wantStatus: 304,
},
"not_modified_etag": {
file: "testdata/style.css",
serveETag: `"foo"`,
reqHeader: map[string]string{
"If-None-Match": `"foo"`,
},
wantStatus: 304,
},
"not_modified_etag_no_seek": {
content: panicOnSeek{nil}, // should never be called
serveETag: `"foo"`,
reqHeader: map[string]string{
"If-None-Match": `"foo"`,
},
wantStatus: 304,
},
"range_good": {
file: "testdata/style.css",
serveETag: `"A"`,
reqHeader: map[string]string{
"Range": "bytes=0-4",
},
wantStatus: StatusPartialContent,
wantContentType: "text/css; charset=utf-8",
},
// An If-Range resource for entity "A", but entity "B" is now current.
// The Range request should be ignored.
"range_no_match": {
file: "testdata/style.css",
serveETag: `"A"`,
reqHeader: map[string]string{
"Range": "bytes=0-4",
"If-Range": `"B"`,
},
wantStatus: 200,
wantContentType: "text/css; charset=utf-8",
},
}
for testName, tt := range tests {
var content io.ReadSeeker
if tt.file != "" {
f, err := os.Open(tt.file)
if err != nil {
t.Fatalf("test %q: %v", testName, err)
}
defer f.Close()
content = f
} else {
content = tt.content
}
servec <- serveParam{
name: filepath.Base(tt.file),
content: content,
modtime: tt.modtime,
etag: tt.serveETag,
contentType: tt.serveContentType,
}
req, err := NewRequest("GET", ts.URL, nil)
if err != nil {
t.Fatal(err)
}
for k, v := range tt.reqHeader {
req.Header.Set(k, v)
}
res, err := DefaultClient.Do(req)
if err != nil {
t.Fatal(err)
}
io.Copy(ioutil.Discard, res.Body)
res.Body.Close()
if res.StatusCode != tt.wantStatus {
t.Errorf("test %q: status = %d; want %d", testName, res.StatusCode, tt.wantStatus)
}
if g, e := res.Header.Get("Content-Type"), tt.wantContentType; g != e {
t.Errorf("test %q: content-type = %q, want %q", testName, g, e)
}
if g, e := res.Header.Get("Last-Modified"), tt.wantLastMod; g != e {
t.Errorf("test %q: last-modified = %q, want %q", testName, g, e)
}
}
}
// verifies that sendfile is being used on Linux
func TestLinuxSendfile(t *testing.T) {
defer afterTest(t)
if runtime.GOOS != "linux" {
t.Skip("skipping; linux-only test")
}
if _, err := exec.LookPath("strace"); err != nil {
t.Skip("skipping; strace not found in path")
}
ln, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Fatal(err)
}
lnf, err := ln.(*net.TCPListener).File()
if err != nil {
t.Fatal(err)
}
defer ln.Close()
var buf bytes.Buffer
child := exec.Command("strace", "-f", "-q", "-e", "trace=sendfile,sendfile64", os.Args[0], "-test.run=TestLinuxSendfileChild")
child.ExtraFiles = append(child.ExtraFiles, lnf)
child.Env = append([]string{"GO_WANT_HELPER_PROCESS=1"}, os.Environ()...)
child.Stdout = &buf
child.Stderr = &buf
if err := child.Start(); err != nil {
t.Skipf("skipping; failed to start straced child: %v", err)
}
res, err := Get(fmt.Sprintf("http://%s/", ln.Addr()))
if err != nil {
t.Fatalf("http client error: %v", err)
}
_, err = io.Copy(ioutil.Discard, res.Body)
if err != nil {
t.Fatalf("client body read error: %v", err)
}
res.Body.Close()
// Force child to exit cleanly.
Get(fmt.Sprintf("http://%s/quit", ln.Addr()))
child.Wait()
rx := regexp.MustCompile(`sendfile(64)?\(\d+,\s*\d+,\s*NULL,\s*\d+\)\s*=\s*\d+\s*\n`)
rxResume := regexp.MustCompile(`<\.\.\. sendfile(64)? resumed> \)\s*=\s*\d+\s*\n`)
out := buf.String()
if !rx.MatchString(out) && !rxResume.MatchString(out) {
t.Errorf("no sendfile system call found in:\n%s", out)
}
}
func getBody(t *testing.T, testName string, req Request) (*Response, []byte) {
r, err := DefaultClient.Do(&req)
if err != nil {
t.Fatalf("%s: for URL %q, send error: %v", testName, req.URL.String(), err)
}
b, err := ioutil.ReadAll(r.Body)
if err != nil {
t.Fatalf("%s: for URL %q, reading body: %v", testName, req.URL.String(), err)
}
return r, b
}
// TestLinuxSendfileChild isn't a real test. It's used as a helper process
// for TestLinuxSendfile.
func TestLinuxSendfileChild(*testing.T) {
if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
return
}
defer os.Exit(0)
fd3 := os.NewFile(3, "ephemeral-port-listener")
ln, err := net.FileListener(fd3)
if err != nil {
panic(err)
}
mux := NewServeMux()
mux.Handle("/", FileServer(Dir("testdata")))
mux.HandleFunc("/quit", func(ResponseWriter, *Request) {
os.Exit(0)
})
s := &Server{Handler: mux}
err = s.Serve(ln)
if err != nil {
panic(err)
}
}
type panicOnSeek struct{ io.ReadSeeker }

View File

@ -1,212 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http
import (
"bytes"
"runtime"
"testing"
"time"
)
var headerWriteTests = []struct {
h Header
exclude map[string]bool
expected string
}{
{Header{}, nil, ""},
{
Header{
"Content-Type": {"text/html; charset=UTF-8"},
"Content-Length": {"0"},
},
nil,
"Content-Length: 0\r\nContent-Type: text/html; charset=UTF-8\r\n",
},
{
Header{
"Content-Length": {"0", "1", "2"},
},
nil,
"Content-Length: 0\r\nContent-Length: 1\r\nContent-Length: 2\r\n",
},
{
Header{
"Expires": {"-1"},
"Content-Length": {"0"},
"Content-Encoding": {"gzip"},
},
map[string]bool{"Content-Length": true},
"Content-Encoding: gzip\r\nExpires: -1\r\n",
},
{
Header{
"Expires": {"-1"},
"Content-Length": {"0", "1", "2"},
"Content-Encoding": {"gzip"},
},
map[string]bool{"Content-Length": true},
"Content-Encoding: gzip\r\nExpires: -1\r\n",
},
{
Header{
"Expires": {"-1"},
"Content-Length": {"0"},
"Content-Encoding": {"gzip"},
},
map[string]bool{"Content-Length": true, "Expires": true, "Content-Encoding": true},
"",
},
{
Header{
"Nil": nil,
"Empty": {},
"Blank": {""},
"Double-Blank": {"", ""},
},
nil,
"Blank: \r\nDouble-Blank: \r\nDouble-Blank: \r\n",
},
// Tests header sorting when over the insertion sort threshold side:
{
Header{
"k1": {"1a", "1b"},
"k2": {"2a", "2b"},
"k3": {"3a", "3b"},
"k4": {"4a", "4b"},
"k5": {"5a", "5b"},
"k6": {"6a", "6b"},
"k7": {"7a", "7b"},
"k8": {"8a", "8b"},
"k9": {"9a", "9b"},
},
map[string]bool{"k5": true},
"k1: 1a\r\nk1: 1b\r\nk2: 2a\r\nk2: 2b\r\nk3: 3a\r\nk3: 3b\r\n" +
"k4: 4a\r\nk4: 4b\r\nk6: 6a\r\nk6: 6b\r\n" +
"k7: 7a\r\nk7: 7b\r\nk8: 8a\r\nk8: 8b\r\nk9: 9a\r\nk9: 9b\r\n",
},
}
func TestHeaderWrite(t *testing.T) {
var buf bytes.Buffer
for i, test := range headerWriteTests {
test.h.WriteSubset(&buf, test.exclude)
if buf.String() != test.expected {
t.Errorf("#%d:\n got: %q\nwant: %q", i, buf.String(), test.expected)
}
buf.Reset()
}
}
var parseTimeTests = []struct {
h Header
err bool
}{
{Header{"Date": {""}}, true},
{Header{"Date": {"invalid"}}, true},
{Header{"Date": {"1994-11-06T08:49:37Z00:00"}}, true},
{Header{"Date": {"Sun, 06 Nov 1994 08:49:37 GMT"}}, false},
{Header{"Date": {"Sunday, 06-Nov-94 08:49:37 GMT"}}, false},
{Header{"Date": {"Sun Nov 6 08:49:37 1994"}}, false},
}
func TestParseTime(t *testing.T) {
expect := time.Date(1994, 11, 6, 8, 49, 37, 0, time.UTC)
for i, test := range parseTimeTests {
d, err := ParseTime(test.h.Get("Date"))
if err != nil {
if !test.err {
t.Errorf("#%d:\n got err: %v", i, err)
}
continue
}
if test.err {
t.Errorf("#%d:\n should err", i)
continue
}
if !expect.Equal(d) {
t.Errorf("#%d:\n got: %v\nwant: %v", i, d, expect)
}
}
}
type hasTokenTest struct {
header string
token string
want bool
}
var hasTokenTests = []hasTokenTest{
{"", "", false},
{"", "foo", false},
{"foo", "foo", true},
{"foo ", "foo", true},
{" foo", "foo", true},
{" foo ", "foo", true},
{"foo,bar", "foo", true},
{"bar,foo", "foo", true},
{"bar, foo", "foo", true},
{"bar,foo, baz", "foo", true},
{"bar, foo,baz", "foo", true},
{"bar,foo, baz", "foo", true},
{"bar, foo, baz", "foo", true},
{"FOO", "foo", true},
{"FOO ", "foo", true},
{" FOO", "foo", true},
{" FOO ", "foo", true},
{"FOO,BAR", "foo", true},
{"BAR,FOO", "foo", true},
{"BAR, FOO", "foo", true},
{"BAR,FOO, baz", "foo", true},
{"BAR, FOO,BAZ", "foo", true},
{"BAR,FOO, BAZ", "foo", true},
{"BAR, FOO, BAZ", "foo", true},
{"foobar", "foo", false},
{"barfoo ", "foo", false},
}
func TestHasToken(t *testing.T) {
for _, tt := range hasTokenTests {
if hasToken(tt.header, tt.token) != tt.want {
t.Errorf("hasToken(%q, %q) = %v; want %v", tt.header, tt.token, !tt.want, tt.want)
}
}
}
var testHeader = Header{
"Content-Length": {"123"},
"Content-Type": {"text/plain"},
"Date": {"some date at some time Z"},
"Server": {DefaultUserAgent},
}
var buf bytes.Buffer
func BenchmarkHeaderWriteSubset(b *testing.B) {
b.ReportAllocs()
for i := 0; i < b.N; i++ {
buf.Reset()
testHeader.WriteSubset(&buf, nil)
}
}
func TestHeaderWriteSubsetAllocs(t *testing.T) {
if testing.Short() {
t.Skip("skipping alloc test in short mode")
}
if raceEnabled {
t.Skip("skipping test under race detector")
}
if runtime.GOMAXPROCS(0) > 1 {
t.Skip("skipping; GOMAXPROCS>1")
}
n := testing.AllocsPerRun(100, func() {
buf.Reset()
testHeader.WriteSubset(&buf, nil)
})
if n > 0 {
t.Errorf("allocs = %g; want 0", n)
}
}

View File

@ -1,50 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package httptest_test
import (
"fmt"
"io/ioutil"
"log"
"net/http"
"net/http/httptest"
)
func ExampleResponseRecorder() {
handler := func(w http.ResponseWriter, r *http.Request) {
http.Error(w, "something failed", http.StatusInternalServerError)
}
req, err := http.NewRequest("GET", "http://example.com/foo", nil)
if err != nil {
log.Fatal(err)
}
w := httptest.NewRecorder()
handler(w, req)
fmt.Printf("%d - %s", w.Code, w.Body.String())
// Output: 500 - something failed
}
func ExampleServer() {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, "Hello, client")
}))
defer ts.Close()
res, err := http.Get(ts.URL)
if err != nil {
log.Fatal(err)
}
greeting, err := ioutil.ReadAll(res.Body)
res.Body.Close()
if err != nil {
log.Fatal(err)
}
fmt.Printf("%s", greeting)
// Output: Hello, client
}

View File

@ -1,72 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package httptest provides utilities for HTTP testing.
package httptest
import (
"bytes"
"net/http"
)
// ResponseRecorder is an implementation of http.ResponseWriter that
// records its mutations for later inspection in tests.
type ResponseRecorder struct {
Code int // the HTTP response code from WriteHeader
HeaderMap http.Header // the HTTP response headers
Body *bytes.Buffer // if non-nil, the bytes.Buffer to append written data to
Flushed bool
wroteHeader bool
}
// NewRecorder returns an initialized ResponseRecorder.
func NewRecorder() *ResponseRecorder {
return &ResponseRecorder{
HeaderMap: make(http.Header),
Body: new(bytes.Buffer),
Code: 200,
}
}
// DefaultRemoteAddr is the default remote address to return in RemoteAddr if
// an explicit DefaultRemoteAddr isn't set on ResponseRecorder.
const DefaultRemoteAddr = "1.2.3.4"
// Header returns the response headers.
func (rw *ResponseRecorder) Header() http.Header {
m := rw.HeaderMap
if m == nil {
m = make(http.Header)
rw.HeaderMap = m
}
return m
}
// Write always succeeds and writes to rw.Body, if not nil.
func (rw *ResponseRecorder) Write(buf []byte) (int, error) {
if !rw.wroteHeader {
rw.WriteHeader(200)
}
if rw.Body != nil {
rw.Body.Write(buf)
}
return len(buf), nil
}
// WriteHeader sets rw.Code.
func (rw *ResponseRecorder) WriteHeader(code int) {
if !rw.wroteHeader {
rw.Code = code
}
rw.wroteHeader = true
}
// Flush sets rw.Flushed to true.
func (rw *ResponseRecorder) Flush() {
if !rw.wroteHeader {
rw.WriteHeader(200)
}
rw.Flushed = true
}

View File

@ -1,90 +0,0 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package httptest
import (
"fmt"
"net/http"
"testing"
)
func TestRecorder(t *testing.T) {
type checkFunc func(*ResponseRecorder) error
check := func(fns ...checkFunc) []checkFunc { return fns }
hasStatus := func(wantCode int) checkFunc {
return func(rec *ResponseRecorder) error {
if rec.Code != wantCode {
return fmt.Errorf("Status = %d; want %d", rec.Code, wantCode)
}
return nil
}
}
hasContents := func(want string) checkFunc {
return func(rec *ResponseRecorder) error {
if rec.Body.String() != want {
return fmt.Errorf("wrote = %q; want %q", rec.Body.String(), want)
}
return nil
}
}
hasFlush := func(want bool) checkFunc {
return func(rec *ResponseRecorder) error {
if rec.Flushed != want {
return fmt.Errorf("Flushed = %v; want %v", rec.Flushed, want)
}
return nil
}
}
tests := []struct {
name string
h func(w http.ResponseWriter, r *http.Request)
checks []checkFunc
}{
{
"200 default",
func(w http.ResponseWriter, r *http.Request) {},
check(hasStatus(200), hasContents("")),
},
{
"first code only",
func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(201)
w.WriteHeader(202)
w.Write([]byte("hi"))
},
check(hasStatus(201), hasContents("hi")),
},
{
"write sends 200",
func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("hi first"))
w.WriteHeader(201)
w.WriteHeader(202)
},
check(hasStatus(200), hasContents("hi first"), hasFlush(false)),
},
{
"flush",
func(w http.ResponseWriter, r *http.Request) {
w.(http.Flusher).Flush() // also sends a 200
w.WriteHeader(201)
},
check(hasStatus(200), hasFlush(true)),
},
}
r, _ := http.NewRequest("GET", "http://foo.com/", nil)
for _, tt := range tests {
h := http.HandlerFunc(tt.h)
rec := NewRecorder()
h.ServeHTTP(rec, r)
for _, check := range tt.checks {
if err := check(rec); err != nil {
t.Errorf("%s: %v", tt.name, err)
}
}
}
}

View File

@ -1,228 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Implementation of Server
package httptest
import (
"crypto/tls"
"flag"
"fmt"
"net"
"net/http"
"os"
"sync"
)
// A Server is an HTTP server listening on a system-chosen port on the
// local loopback interface, for use in end-to-end HTTP tests.
type Server struct {
URL string // base URL of form http://ipaddr:port with no trailing slash
Listener net.Listener
// TLS is the optional TLS configuration, populated with a new config
// after TLS is started. If set on an unstarted server before StartTLS
// is called, existing fields are copied into the new config.
TLS *tls.Config
// Config may be changed after calling NewUnstartedServer and
// before Start or StartTLS.
Config *http.Server
// wg counts the number of outstanding HTTP requests on this server.
// Close blocks until all requests are finished.
wg sync.WaitGroup
}
// historyListener keeps track of all connections that it's ever
// accepted.
type historyListener struct {
net.Listener
sync.Mutex // protects history
history []net.Conn
}
func (hs *historyListener) Accept() (c net.Conn, err error) {
c, err = hs.Listener.Accept()
if err == nil {
hs.Lock()
hs.history = append(hs.history, c)
hs.Unlock()
}
return
}
func newLocalListener() net.Listener {
if *serve != "" {
l, err := net.Listen("tcp", *serve)
if err != nil {
panic(fmt.Sprintf("httptest: failed to listen on %v: %v", *serve, err))
}
return l
}
l, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
if l, err = net.Listen("tcp6", "[::1]:0"); err != nil {
panic(fmt.Sprintf("httptest: failed to listen on a port: %v", err))
}
}
return l
}
// When debugging a particular http server-based test,
// this flag lets you run
// go test -run=BrokenTest -httptest.serve=127.0.0.1:8000
// to start the broken server so you can interact with it manually.
var serve = flag.String("httptest.serve", "", "if non-empty, httptest.NewServer serves on this address and blocks")
// NewServer starts and returns a new Server.
// The caller should call Close when finished, to shut it down.
func NewServer(handler http.Handler) *Server {
ts := NewUnstartedServer(handler)
ts.Start()
return ts
}
// NewUnstartedServer returns a new Server but doesn't start it.
//
// After changing its configuration, the caller should call Start or
// StartTLS.
//
// The caller should call Close when finished, to shut it down.
func NewUnstartedServer(handler http.Handler) *Server {
return &Server{
Listener: newLocalListener(),
Config: &http.Server{Handler: handler},
}
}
// Start starts a server from NewUnstartedServer.
func (s *Server) Start() {
if s.URL != "" {
panic("Server already started")
}
s.Listener = &historyListener{Listener: s.Listener}
s.URL = "http://" + s.Listener.Addr().String()
s.wrapHandler()
go s.Config.Serve(s.Listener)
if *serve != "" {
fmt.Fprintln(os.Stderr, "httptest: serving on", s.URL)
select {}
}
}
// StartTLS starts TLS on a server from NewUnstartedServer.
func (s *Server) StartTLS() {
if s.URL != "" {
panic("Server already started")
}
cert, err := tls.X509KeyPair(localhostCert, localhostKey)
if err != nil {
panic(fmt.Sprintf("httptest: NewTLSServer: %v", err))
}
existingConfig := s.TLS
s.TLS = new(tls.Config)
if existingConfig != nil {
*s.TLS = *existingConfig
}
if s.TLS.NextProtos == nil {
s.TLS.NextProtos = []string{"http/1.1"}
}
if len(s.TLS.Certificates) == 0 {
s.TLS.Certificates = []tls.Certificate{cert}
}
tlsListener := tls.NewListener(s.Listener, s.TLS)
s.Listener = &historyListener{Listener: tlsListener}
s.URL = "https://" + s.Listener.Addr().String()
s.wrapHandler()
go s.Config.Serve(s.Listener)
}
func (s *Server) wrapHandler() {
h := s.Config.Handler
if h == nil {
h = http.DefaultServeMux
}
s.Config.Handler = &waitGroupHandler{
s: s,
h: h,
}
}
// NewTLSServer starts and returns a new Server using TLS.
// The caller should call Close when finished, to shut it down.
func NewTLSServer(handler http.Handler) *Server {
ts := NewUnstartedServer(handler)
ts.StartTLS()
return ts
}
// Close shuts down the server and blocks until all outstanding
// requests on this server have completed.
func (s *Server) Close() {
s.Listener.Close()
s.wg.Wait()
s.CloseClientConnections()
if t, ok := http.DefaultTransport.(*http.Transport); ok {
t.CloseIdleConnections()
}
}
// CloseClientConnections closes any currently open HTTP connections
// to the test Server.
func (s *Server) CloseClientConnections() {
hl, ok := s.Listener.(*historyListener)
if !ok {
return
}
hl.Lock()
for _, conn := range hl.history {
conn.Close()
}
hl.Unlock()
}
// waitGroupHandler wraps a handler, incrementing and decrementing a
// sync.WaitGroup on each request, to enable Server.Close to block
// until outstanding requests are finished.
type waitGroupHandler struct {
s *Server
h http.Handler // non-nil
}
func (h *waitGroupHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
h.s.wg.Add(1)
defer h.s.wg.Done() // a defer, in case ServeHTTP below panics
h.h.ServeHTTP(w, r)
}
// localhostCert is a PEM-encoded TLS cert with SAN IPs
// "127.0.0.1" and "[::1]", expiring at the last second of 2049 (the end
// of ASN.1 time).
// generated from src/pkg/crypto/tls:
// go run generate_cert.go --rsa-bits 512 --host 127.0.0.1,::1,example.com --ca --start-date "Jan 1 00:00:00 1970" --duration=1000000h
var localhostCert = []byte(`-----BEGIN CERTIFICATE-----
MIIBdzCCASOgAwIBAgIBADALBgkqhkiG9w0BAQUwEjEQMA4GA1UEChMHQWNtZSBD
bzAeFw03MDAxMDEwMDAwMDBaFw00OTEyMzEyMzU5NTlaMBIxEDAOBgNVBAoTB0Fj
bWUgQ28wWjALBgkqhkiG9w0BAQEDSwAwSAJBAN55NcYKZeInyTuhcCwFMhDHCmwa
IUSdtXdcbItRB/yfXGBhiex00IaLXQnSU+QZPRZWYqeTEbFSgihqi1PUDy8CAwEA
AaNoMGYwDgYDVR0PAQH/BAQDAgCkMBMGA1UdJQQMMAoGCCsGAQUFBwMBMA8GA1Ud
EwEB/wQFMAMBAf8wLgYDVR0RBCcwJYILZXhhbXBsZS5jb22HBH8AAAGHEAAAAAAA
AAAAAAAAAAAAAAEwCwYJKoZIhvcNAQEFA0EAAoQn/ytgqpiLcZu9XKbCJsJcvkgk
Se6AbGXgSlq+ZCEVo0qIwSgeBqmsJxUu7NCSOwVJLYNEBO2DtIxoYVk+MA==
-----END CERTIFICATE-----`)
// localhostKey is the private key for localhostCert.
var localhostKey = []byte(`-----BEGIN RSA PRIVATE KEY-----
MIIBPAIBAAJBAN55NcYKZeInyTuhcCwFMhDHCmwaIUSdtXdcbItRB/yfXGBhiex0
0IaLXQnSU+QZPRZWYqeTEbFSgihqi1PUDy8CAwEAAQJBAQdUx66rfh8sYsgfdcvV
NoafYpnEcB5s4m/vSVe6SU7dCK6eYec9f9wpT353ljhDUHq3EbmE4foNzJngh35d
AekCIQDhRQG5Li0Wj8TM4obOnnXUXf1jRv0UkzE9AHWLG5q3AwIhAPzSjpYUDjVW
MCUXgckTpKCuGwbJk7424Nb8bLzf3kllAiA5mUBgjfr/WtFSJdWcPQ4Zt9KTMNKD
EUO0ukpTwEIl6wIhAMbGqZK3zAAFdq8DD2jPx+UJXnh0rnOkZBzDtJ6/iN69AiEA
1Aq8MJgTaYsDQWyU/hDq5YkDJc9e9DSCvUIzqxQWMQE=
-----END RSA PRIVATE KEY-----`)

View File

@ -1,52 +0,0 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package httptest
import (
"io/ioutil"
"net/http"
"testing"
"time"
)
func TestServer(t *testing.T) {
ts := NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("hello"))
}))
defer ts.Close()
res, err := http.Get(ts.URL)
if err != nil {
t.Fatal(err)
}
got, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatal(err)
}
if string(got) != "hello" {
t.Errorf("got %q, want hello", string(got))
}
}
func TestIssue7264(t *testing.T) {
for i := 0; i < 1000; i++ {
func() {
inHandler := make(chan bool, 1)
ts := NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
inHandler <- true
}))
defer ts.Close()
tr := &http.Transport{
ResponseHeaderTimeout: time.Nanosecond,
}
defer tr.CloseIdleConnections()
c := &http.Client{Transport: tr}
res, err := c.Get(ts.URL)
<-inHandler
if err == nil {
res.Body.Close()
}
}()
}
}

View File

@ -1,203 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// The wire protocol for HTTP's "chunked" Transfer-Encoding.
// This code is duplicated in net/http and net/http/httputil.
// Please make any changes in both files.
package httputil
import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
)
const maxLineLength = 4096 // assumed <= bufio.defaultBufSize
var ErrLineTooLong = errors.New("header line too long")
// newChunkedReader returns a new chunkedReader that translates the data read from r
// out of HTTP "chunked" format before returning it.
// The chunkedReader returns io.EOF when the final 0-length chunk is read.
//
// newChunkedReader is not needed by normal applications. The http package
// automatically decodes chunking when reading response bodies.
func newChunkedReader(r io.Reader) io.Reader {
br, ok := r.(*bufio.Reader)
if !ok {
br = bufio.NewReader(r)
}
return &chunkedReader{r: br}
}
type chunkedReader struct {
r *bufio.Reader
n uint64 // unread bytes in chunk
err error
buf [2]byte
}
func (cr *chunkedReader) beginChunk() {
// chunk-size CRLF
var line []byte
line, cr.err = readLine(cr.r)
if cr.err != nil {
return
}
cr.n, cr.err = parseHexUint(line)
if cr.err != nil {
return
}
if cr.n == 0 {
cr.err = io.EOF
}
}
func (cr *chunkedReader) chunkHeaderAvailable() bool {
n := cr.r.Buffered()
if n > 0 {
peek, _ := cr.r.Peek(n)
return bytes.IndexByte(peek, '\n') >= 0
}
return false
}
func (cr *chunkedReader) Read(b []uint8) (n int, err error) {
for cr.err == nil {
if cr.n == 0 {
if n > 0 && !cr.chunkHeaderAvailable() {
// We've read enough. Don't potentially block
// reading a new chunk header.
break
}
cr.beginChunk()
continue
}
if len(b) == 0 {
break
}
rbuf := b
if uint64(len(rbuf)) > cr.n {
rbuf = rbuf[:cr.n]
}
var n0 int
n0, cr.err = cr.r.Read(rbuf)
n += n0
b = b[n0:]
cr.n -= uint64(n0)
// If we're at the end of a chunk, read the next two
// bytes to verify they are "\r\n".
if cr.n == 0 && cr.err == nil {
if _, cr.err = io.ReadFull(cr.r, cr.buf[:2]); cr.err == nil {
if cr.buf[0] != '\r' || cr.buf[1] != '\n' {
cr.err = errors.New("malformed chunked encoding")
}
}
}
}
return n, cr.err
}
// Read a line of bytes (up to \n) from b.
// Give up if the line exceeds maxLineLength.
// The returned bytes are a pointer into storage in
// the bufio, so they are only valid until the next bufio read.
func readLine(b *bufio.Reader) (p []byte, err error) {
if p, err = b.ReadSlice('\n'); err != nil {
// We always know when EOF is coming.
// If the caller asked for a line, there should be a line.
if err == io.EOF {
err = io.ErrUnexpectedEOF
} else if err == bufio.ErrBufferFull {
err = ErrLineTooLong
}
return nil, err
}
if len(p) >= maxLineLength {
return nil, ErrLineTooLong
}
return trimTrailingWhitespace(p), nil
}
func trimTrailingWhitespace(b []byte) []byte {
for len(b) > 0 && isASCIISpace(b[len(b)-1]) {
b = b[:len(b)-1]
}
return b
}
func isASCIISpace(b byte) bool {
return b == ' ' || b == '\t' || b == '\n' || b == '\r'
}
// newChunkedWriter returns a new chunkedWriter that translates writes into HTTP
// "chunked" format before writing them to w. Closing the returned chunkedWriter
// sends the final 0-length chunk that marks the end of the stream.
//
// newChunkedWriter is not needed by normal applications. The http
// package adds chunking automatically if handlers don't set a
// Content-Length header. Using newChunkedWriter inside a handler
// would result in double chunking or chunking with a Content-Length
// length, both of which are wrong.
func newChunkedWriter(w io.Writer) io.WriteCloser {
return &chunkedWriter{w}
}
// Writing to chunkedWriter translates to writing in HTTP chunked Transfer
// Encoding wire format to the underlying Wire chunkedWriter.
type chunkedWriter struct {
Wire io.Writer
}
// Write the contents of data as one chunk to Wire.
// NOTE: Note that the corresponding chunk-writing procedure in Conn.Write has
// a bug since it does not check for success of io.WriteString
func (cw *chunkedWriter) Write(data []byte) (n int, err error) {
// Don't send 0-length data. It looks like EOF for chunked encoding.
if len(data) == 0 {
return 0, nil
}
if _, err = fmt.Fprintf(cw.Wire, "%x\r\n", len(data)); err != nil {
return 0, err
}
if n, err = cw.Wire.Write(data); err != nil {
return
}
if n != len(data) {
err = io.ErrShortWrite
return
}
_, err = io.WriteString(cw.Wire, "\r\n")
return
}
func (cw *chunkedWriter) Close() error {
_, err := io.WriteString(cw.Wire, "0\r\n")
return err
}
func parseHexUint(v []byte) (n uint64, err error) {
for _, b := range v {
n <<= 4
switch {
case '0' <= b && b <= '9':
b = b - '0'
case 'a' <= b && b <= 'f':
b = b - 'a' + 10
case 'A' <= b && b <= 'F':
b = b - 'A' + 10
default:
return 0, errors.New("invalid byte in chunk length")
}
n |= uint64(b)
}
return
}

View File

@ -1,159 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This code is duplicated in net/http and net/http/httputil.
// Please make any changes in both files.
package httputil
import (
"bufio"
"bytes"
"fmt"
"io"
"io/ioutil"
"strings"
"testing"
)
func TestChunk(t *testing.T) {
var b bytes.Buffer
w := newChunkedWriter(&b)
const chunk1 = "hello, "
const chunk2 = "world! 0123456789abcdef"
w.Write([]byte(chunk1))
w.Write([]byte(chunk2))
w.Close()
if g, e := b.String(), "7\r\nhello, \r\n17\r\nworld! 0123456789abcdef\r\n0\r\n"; g != e {
t.Fatalf("chunk writer wrote %q; want %q", g, e)
}
r := newChunkedReader(&b)
data, err := ioutil.ReadAll(r)
if err != nil {
t.Logf(`data: "%s"`, data)
t.Fatalf("ReadAll from reader: %v", err)
}
if g, e := string(data), chunk1+chunk2; g != e {
t.Errorf("chunk reader read %q; want %q", g, e)
}
}
func TestChunkReadMultiple(t *testing.T) {
// Bunch of small chunks, all read together.
{
var b bytes.Buffer
w := newChunkedWriter(&b)
w.Write([]byte("foo"))
w.Write([]byte("bar"))
w.Close()
r := newChunkedReader(&b)
buf := make([]byte, 10)
n, err := r.Read(buf)
if n != 6 || err != io.EOF {
t.Errorf("Read = %d, %v; want 6, EOF", n, err)
}
buf = buf[:n]
if string(buf) != "foobar" {
t.Errorf("Read = %q; want %q", buf, "foobar")
}
}
// One big chunk followed by a little chunk, but the small bufio.Reader size
// should prevent the second chunk header from being read.
{
var b bytes.Buffer
w := newChunkedWriter(&b)
// fillBufChunk is 11 bytes + 3 bytes header + 2 bytes footer = 16 bytes,
// the same as the bufio ReaderSize below (the minimum), so even
// though we're going to try to Read with a buffer larger enough to also
// receive "foo", the second chunk header won't be read yet.
const fillBufChunk = "0123456789a"
const shortChunk = "foo"
w.Write([]byte(fillBufChunk))
w.Write([]byte(shortChunk))
w.Close()
r := newChunkedReader(bufio.NewReaderSize(&b, 16))
buf := make([]byte, len(fillBufChunk)+len(shortChunk))
n, err := r.Read(buf)
if n != len(fillBufChunk) || err != nil {
t.Errorf("Read = %d, %v; want %d, nil", n, err, len(fillBufChunk))
}
buf = buf[:n]
if string(buf) != fillBufChunk {
t.Errorf("Read = %q; want %q", buf, fillBufChunk)
}
n, err = r.Read(buf)
if n != len(shortChunk) || err != io.EOF {
t.Errorf("Read = %d, %v; want %d, EOF", n, err, len(shortChunk))
}
}
// And test that we see an EOF chunk, even though our buffer is already full:
{
r := newChunkedReader(bufio.NewReader(strings.NewReader("3\r\nfoo\r\n0\r\n")))
buf := make([]byte, 3)
n, err := r.Read(buf)
if n != 3 || err != io.EOF {
t.Errorf("Read = %d, %v; want 3, EOF", n, err)
}
if string(buf) != "foo" {
t.Errorf("buf = %q; want foo", buf)
}
}
}
func TestChunkReaderAllocs(t *testing.T) {
if testing.Short() {
t.Skip("skipping in short mode")
}
var buf bytes.Buffer
w := newChunkedWriter(&buf)
a, b, c := []byte("aaaaaa"), []byte("bbbbbbbbbbbb"), []byte("cccccccccccccccccccccccc")
w.Write(a)
w.Write(b)
w.Write(c)
w.Close()
readBuf := make([]byte, len(a)+len(b)+len(c)+1)
byter := bytes.NewReader(buf.Bytes())
bufr := bufio.NewReader(byter)
mallocs := testing.AllocsPerRun(100, func() {
byter.Seek(0, 0)
bufr.Reset(byter)
r := newChunkedReader(bufr)
n, err := io.ReadFull(r, readBuf)
if n != len(readBuf)-1 {
t.Fatalf("read %d bytes; want %d", n, len(readBuf)-1)
}
if err != io.ErrUnexpectedEOF {
t.Fatalf("read error = %v; want ErrUnexpectedEOF", err)
}
})
if mallocs > 1.5 {
t.Errorf("mallocs = %v; want 1", mallocs)
}
}
func TestParseHexUint(t *testing.T) {
for i := uint64(0); i <= 1234; i++ {
line := []byte(fmt.Sprintf("%x", i))
got, err := parseHexUint(line)
if err != nil {
t.Fatalf("on %d: %v", i, err)
}
if got != i {
t.Errorf("for input %q = %d; want %d", line, got, i)
}
}
_, err := parseHexUint([]byte("bogus"))
if err == nil {
t.Error("expected error on bogus input")
}
}

View File

@ -1,276 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package httputil
import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"net/url"
"strings"
"time"
)
// One of the copies, say from b to r2, could be avoided by using a more
// elaborate trick where the other copy is made during Request/Response.Write.
// This would complicate things too much, given that these functions are for
// debugging only.
func drainBody(b io.ReadCloser) (r1, r2 io.ReadCloser, err error) {
var buf bytes.Buffer
if _, err = buf.ReadFrom(b); err != nil {
return nil, nil, err
}
if err = b.Close(); err != nil {
return nil, nil, err
}
return ioutil.NopCloser(&buf), ioutil.NopCloser(bytes.NewReader(buf.Bytes())), nil
}
// dumpConn is a net.Conn which writes to Writer and reads from Reader
type dumpConn struct {
io.Writer
io.Reader
}
func (c *dumpConn) Close() error { return nil }
func (c *dumpConn) LocalAddr() net.Addr { return nil }
func (c *dumpConn) RemoteAddr() net.Addr { return nil }
func (c *dumpConn) SetDeadline(t time.Time) error { return nil }
func (c *dumpConn) SetReadDeadline(t time.Time) error { return nil }
func (c *dumpConn) SetWriteDeadline(t time.Time) error { return nil }
type neverEnding byte
func (b neverEnding) Read(p []byte) (n int, err error) {
for i := range p {
p[i] = byte(b)
}
return len(p), nil
}
// DumpRequestOut is like DumpRequest but includes
// headers that the standard http.Transport adds,
// such as User-Agent.
func DumpRequestOut(req *http.Request, body bool) ([]byte, error) {
save := req.Body
dummyBody := false
if !body || req.Body == nil {
req.Body = nil
if req.ContentLength != 0 {
req.Body = ioutil.NopCloser(io.LimitReader(neverEnding('x'), req.ContentLength))
dummyBody = true
}
} else {
var err error
save, req.Body, err = drainBody(req.Body)
if err != nil {
return nil, err
}
}
// Since we're using the actual Transport code to write the request,
// switch to http so the Transport doesn't try to do an SSL
// negotiation with our dumpConn and its bytes.Buffer & pipe.
// The wire format for https and http are the same, anyway.
reqSend := req
if req.URL.Scheme == "https" {
reqSend = new(http.Request)
*reqSend = *req
reqSend.URL = new(url.URL)
*reqSend.URL = *req.URL
reqSend.URL.Scheme = "http"
}
// Use the actual Transport code to record what we would send
// on the wire, but not using TCP. Use a Transport with a
// custom dialer that returns a fake net.Conn that waits
// for the full input (and recording it), and then responds
// with a dummy response.
var buf bytes.Buffer // records the output
pr, pw := io.Pipe()
dr := &delegateReader{c: make(chan io.Reader)}
// Wait for the request before replying with a dummy response:
go func() {
http.ReadRequest(bufio.NewReader(pr))
dr.c <- strings.NewReader("HTTP/1.1 204 No Content\r\n\r\n")
}()
t := &http.Transport{
Dial: func(net, addr string) (net.Conn, error) {
return &dumpConn{io.MultiWriter(&buf, pw), dr}, nil
},
}
defer t.CloseIdleConnections()
_, err := t.RoundTrip(reqSend)
req.Body = save
if err != nil {
return nil, err
}
dump := buf.Bytes()
// If we used a dummy body above, remove it now.
// TODO: if the req.ContentLength is large, we allocate memory
// unnecessarily just to slice it off here. But this is just
// a debug function, so this is acceptable for now. We could
// discard the body earlier if this matters.
if dummyBody {
if i := bytes.Index(dump, []byte("\r\n\r\n")); i >= 0 {
dump = dump[:i+4]
}
}
return dump, nil
}
// delegateReader is a reader that delegates to another reader,
// once it arrives on a channel.
type delegateReader struct {
c chan io.Reader
r io.Reader // nil until received from c
}
func (r *delegateReader) Read(p []byte) (int, error) {
if r.r == nil {
r.r = <-r.c
}
return r.r.Read(p)
}
// Return value if nonempty, def otherwise.
func valueOrDefault(value, def string) string {
if value != "" {
return value
}
return def
}
var reqWriteExcludeHeaderDump = map[string]bool{
"Host": true, // not in Header map anyway
"Content-Length": true,
"Transfer-Encoding": true,
"Trailer": true,
}
// dumpAsReceived writes req to w in the form as it was received, or
// at least as accurately as possible from the information retained in
// the request.
func dumpAsReceived(req *http.Request, w io.Writer) error {
return nil
}
// DumpRequest returns the as-received wire representation of req,
// optionally including the request body, for debugging.
// DumpRequest is semantically a no-op, but in order to
// dump the body, it reads the body data into memory and
// changes req.Body to refer to the in-memory copy.
// The documentation for http.Request.Write details which fields
// of req are used.
func DumpRequest(req *http.Request, body bool) (dump []byte, err error) {
save := req.Body
if !body || req.Body == nil {
req.Body = nil
} else {
save, req.Body, err = drainBody(req.Body)
if err != nil {
return
}
}
var b bytes.Buffer
fmt.Fprintf(&b, "%s %s HTTP/%d.%d\r\n", valueOrDefault(req.Method, "GET"),
req.URL.RequestURI(), req.ProtoMajor, req.ProtoMinor)
host := req.Host
if host == "" && req.URL != nil {
host = req.URL.Host
}
if host != "" {
fmt.Fprintf(&b, "Host: %s\r\n", host)
}
chunked := len(req.TransferEncoding) > 0 && req.TransferEncoding[0] == "chunked"
if len(req.TransferEncoding) > 0 {
fmt.Fprintf(&b, "Transfer-Encoding: %s\r\n", strings.Join(req.TransferEncoding, ","))
}
if req.Close {
fmt.Fprintf(&b, "Connection: close\r\n")
}
err = req.Header.WriteSubset(&b, reqWriteExcludeHeaderDump)
if err != nil {
return
}
io.WriteString(&b, "\r\n")
if req.Body != nil {
var dest io.Writer = &b
if chunked {
dest = NewChunkedWriter(dest)
}
_, err = io.Copy(dest, req.Body)
if chunked {
dest.(io.Closer).Close()
io.WriteString(&b, "\r\n")
}
}
req.Body = save
if err != nil {
return
}
dump = b.Bytes()
return
}
// errNoBody is a sentinel error value used by failureToReadBody so we can detect
// that the lack of body was intentional.
var errNoBody = errors.New("sentinel error value")
// failureToReadBody is a io.ReadCloser that just returns errNoBody on
// Read. It's swapped in when we don't actually want to consume the
// body, but need a non-nil one, and want to distinguish the error
// from reading the dummy body.
type failureToReadBody struct{}
func (failureToReadBody) Read([]byte) (int, error) { return 0, errNoBody }
func (failureToReadBody) Close() error { return nil }
var emptyBody = ioutil.NopCloser(strings.NewReader(""))
// DumpResponse is like DumpRequest but dumps a response.
func DumpResponse(resp *http.Response, body bool) (dump []byte, err error) {
var b bytes.Buffer
save := resp.Body
savecl := resp.ContentLength
if !body {
resp.Body = failureToReadBody{}
} else if resp.Body == nil {
resp.Body = emptyBody
} else {
save, resp.Body, err = drainBody(resp.Body)
if err != nil {
return
}
}
err = resp.Write(&b)
if err == errNoBody {
err = nil
}
resp.Body = save
resp.ContentLength = savecl
if err != nil {
return nil, err
}
return b.Bytes(), nil
}

View File

@ -1,263 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package httputil
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"runtime"
"strings"
"testing"
)
type dumpTest struct {
Req http.Request
Body interface{} // optional []byte or func() io.ReadCloser to populate Req.Body
WantDump string
WantDumpOut string
NoBody bool // if true, set DumpRequest{,Out} body to false
}
var dumpTests = []dumpTest{
// HTTP/1.1 => chunked coding; body; empty trailer
{
Req: http.Request{
Method: "GET",
URL: &url.URL{
Scheme: "http",
Host: "www.google.com",
Path: "/search",
},
ProtoMajor: 1,
ProtoMinor: 1,
TransferEncoding: []string{"chunked"},
},
Body: []byte("abcdef"),
WantDump: "GET /search HTTP/1.1\r\n" +
"Host: www.google.com\r\n" +
"Transfer-Encoding: chunked\r\n\r\n" +
chunk("abcdef") + chunk(""),
},
// Verify that DumpRequest preserves the HTTP version number, doesn't add a Host,
// and doesn't add a User-Agent.
{
Req: http.Request{
Method: "GET",
URL: mustParseURL("/foo"),
ProtoMajor: 1,
ProtoMinor: 0,
Header: http.Header{
"X-Foo": []string{"X-Bar"},
},
},
WantDump: "GET /foo HTTP/1.0\r\n" +
"X-Foo: X-Bar\r\n\r\n",
},
{
Req: *mustNewRequest("GET", "http://example.com/foo", nil),
WantDumpOut: "GET /foo HTTP/1.1\r\n" +
"Host: example.com\r\n" +
"User-Agent: Go 1.1 package http\r\n" +
"Accept-Encoding: gzip\r\n\r\n",
},
// Test that an https URL doesn't try to do an SSL negotiation
// with a bytes.Buffer and hang with all goroutines not
// runnable.
{
Req: *mustNewRequest("GET", "https://example.com/foo", nil),
WantDumpOut: "GET /foo HTTP/1.1\r\n" +
"Host: example.com\r\n" +
"User-Agent: Go 1.1 package http\r\n" +
"Accept-Encoding: gzip\r\n\r\n",
},
// Request with Body, but Dump requested without it.
{
Req: http.Request{
Method: "POST",
URL: &url.URL{
Scheme: "http",
Host: "post.tld",
Path: "/",
},
ContentLength: 6,
ProtoMajor: 1,
ProtoMinor: 1,
},
Body: []byte("abcdef"),
WantDumpOut: "POST / HTTP/1.1\r\n" +
"Host: post.tld\r\n" +
"User-Agent: Go 1.1 package http\r\n" +
"Content-Length: 6\r\n" +
"Accept-Encoding: gzip\r\n\r\n",
NoBody: true,
},
}
func TestDumpRequest(t *testing.T) {
numg0 := runtime.NumGoroutine()
for i, tt := range dumpTests {
setBody := func() {
if tt.Body == nil {
return
}
switch b := tt.Body.(type) {
case []byte:
tt.Req.Body = ioutil.NopCloser(bytes.NewReader(b))
case func() io.ReadCloser:
tt.Req.Body = b()
}
}
setBody()
if tt.Req.Header == nil {
tt.Req.Header = make(http.Header)
}
if tt.WantDump != "" {
setBody()
dump, err := DumpRequest(&tt.Req, !tt.NoBody)
if err != nil {
t.Errorf("DumpRequest #%d: %s", i, err)
continue
}
if string(dump) != tt.WantDump {
t.Errorf("DumpRequest %d, expecting:\n%s\nGot:\n%s\n", i, tt.WantDump, string(dump))
continue
}
}
if tt.WantDumpOut != "" {
setBody()
dump, err := DumpRequestOut(&tt.Req, !tt.NoBody)
if err != nil {
t.Errorf("DumpRequestOut #%d: %s", i, err)
continue
}
if string(dump) != tt.WantDumpOut {
t.Errorf("DumpRequestOut %d, expecting:\n%s\nGot:\n%s\n", i, tt.WantDumpOut, string(dump))
continue
}
}
}
if dg := runtime.NumGoroutine() - numg0; dg > 4 {
t.Errorf("Unexpectedly large number of new goroutines: %d new", dg)
}
}
func chunk(s string) string {
return fmt.Sprintf("%x\r\n%s\r\n", len(s), s)
}
func mustParseURL(s string) *url.URL {
u, err := url.Parse(s)
if err != nil {
panic(fmt.Sprintf("Error parsing URL %q: %v", s, err))
}
return u
}
func mustNewRequest(method, url string, body io.Reader) *http.Request {
req, err := http.NewRequest(method, url, body)
if err != nil {
panic(fmt.Sprintf("NewRequest(%q, %q, %p) err = %v", method, url, body, err))
}
return req
}
var dumpResTests = []struct {
res *http.Response
body bool
want string
}{
{
res: &http.Response{
Status: "200 OK",
StatusCode: 200,
Proto: "HTTP/1.1",
ProtoMajor: 1,
ProtoMinor: 1,
ContentLength: 50,
Header: http.Header{
"Foo": []string{"Bar"},
},
Body: ioutil.NopCloser(strings.NewReader("foo")), // shouldn't be used
},
body: false, // to verify we see 50, not empty or 3.
want: `HTTP/1.1 200 OK
Content-Length: 50
Foo: Bar`,
},
{
res: &http.Response{
Status: "200 OK",
StatusCode: 200,
Proto: "HTTP/1.1",
ProtoMajor: 1,
ProtoMinor: 1,
ContentLength: 3,
Body: ioutil.NopCloser(strings.NewReader("foo")),
},
body: true,
want: `HTTP/1.1 200 OK
Content-Length: 3
foo`,
},
{
res: &http.Response{
Status: "200 OK",
StatusCode: 200,
Proto: "HTTP/1.1",
ProtoMajor: 1,
ProtoMinor: 1,
ContentLength: -1,
Body: ioutil.NopCloser(strings.NewReader("foo")),
TransferEncoding: []string{"chunked"},
},
body: true,
want: `HTTP/1.1 200 OK
Transfer-Encoding: chunked
3
foo
0`,
},
}
func TestDumpResponse(t *testing.T) {
for i, tt := range dumpResTests {
gotb, err := DumpResponse(tt.res, tt.body)
if err != nil {
t.Errorf("%d. DumpResponse = %v", i, err)
continue
}
got := string(gotb)
got = strings.TrimSpace(got)
got = strings.Replace(got, "\r", "", -1)
if got != tt.want {
t.Errorf("%d.\nDumpResponse got:\n%s\n\nWant:\n%s\n", i, got, tt.want)
}
}
}

View File

@ -1,32 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package httputil provides HTTP utility functions, complementing the
// more common ones in the net/http package.
package httputil
import "io"
// NewChunkedReader returns a new chunkedReader that translates the data read from r
// out of HTTP "chunked" format before returning it.
// The chunkedReader returns io.EOF when the final 0-length chunk is read.
//
// NewChunkedReader is not needed by normal applications. The http package
// automatically decodes chunking when reading response bodies.
func NewChunkedReader(r io.Reader) io.Reader {
return newChunkedReader(r)
}
// NewChunkedWriter returns a new chunkedWriter that translates writes into HTTP
// "chunked" format before writing them to w. Closing the returned chunkedWriter
// sends the final 0-length chunk that marks the end of the stream.
//
// NewChunkedWriter is not needed by normal applications. The http
// package adds chunking automatically if handlers don't set a
// Content-Length header. Using NewChunkedWriter inside a handler
// would result in double chunking or chunking with a Content-Length
// length, both of which are wrong.
func NewChunkedWriter(w io.Writer) io.WriteCloser {
return newChunkedWriter(w)
}

View File

@ -1,429 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package httputil
import (
"bufio"
"errors"
"io"
"net"
"net/http"
"net/textproto"
"sync"
)
var (
ErrPersistEOF = &http.ProtocolError{ErrorString: "persistent connection closed"}
ErrClosed = &http.ProtocolError{ErrorString: "connection closed by user"}
ErrPipeline = &http.ProtocolError{ErrorString: "pipeline error"}
)
// This is an API usage error - the local side is closed.
// ErrPersistEOF (above) reports that the remote side is closed.
var errClosed = errors.New("i/o operation on closed connection")
// A ServerConn reads requests and sends responses over an underlying
// connection, until the HTTP keepalive logic commands an end. ServerConn
// also allows hijacking the underlying connection by calling Hijack
// to regain control over the connection. ServerConn supports pipe-lining,
// i.e. requests can be read out of sync (but in the same order) while the
// respective responses are sent.
//
// ServerConn is low-level and old. Applications should instead use Server
// in the net/http package.
type ServerConn struct {
lk sync.Mutex // read-write protects the following fields
c net.Conn
r *bufio.Reader
re, we error // read/write errors
lastbody io.ReadCloser
nread, nwritten int
pipereq map[*http.Request]uint
pipe textproto.Pipeline
}
// NewServerConn returns a new ServerConn reading and writing c. If r is not
// nil, it is the buffer to use when reading c.
//
// ServerConn is low-level and old. Applications should instead use Server
// in the net/http package.
func NewServerConn(c net.Conn, r *bufio.Reader) *ServerConn {
if r == nil {
r = bufio.NewReader(c)
}
return &ServerConn{c: c, r: r, pipereq: make(map[*http.Request]uint)}
}
// Hijack detaches the ServerConn and returns the underlying connection as well
// as the read-side bufio which may have some left over data. Hijack may be
// called before Read has signaled the end of the keep-alive logic. The user
// should not call Hijack while Read or Write is in progress.
func (sc *ServerConn) Hijack() (c net.Conn, r *bufio.Reader) {
sc.lk.Lock()
defer sc.lk.Unlock()
c = sc.c
r = sc.r
sc.c = nil
sc.r = nil
return
}
// Close calls Hijack and then also closes the underlying connection
func (sc *ServerConn) Close() error {
c, _ := sc.Hijack()
if c != nil {
return c.Close()
}
return nil
}
// Read returns the next request on the wire. An ErrPersistEOF is returned if
// it is gracefully determined that there are no more requests (e.g. after the
// first request on an HTTP/1.0 connection, or after a Connection:close on a
// HTTP/1.1 connection).
func (sc *ServerConn) Read() (req *http.Request, err error) {
// Ensure ordered execution of Reads and Writes
id := sc.pipe.Next()
sc.pipe.StartRequest(id)
defer func() {
sc.pipe.EndRequest(id)
if req == nil {
sc.pipe.StartResponse(id)
sc.pipe.EndResponse(id)
} else {
// Remember the pipeline id of this request
sc.lk.Lock()
sc.pipereq[req] = id
sc.lk.Unlock()
}
}()
sc.lk.Lock()
if sc.we != nil { // no point receiving if write-side broken or closed
defer sc.lk.Unlock()
return nil, sc.we
}
if sc.re != nil {
defer sc.lk.Unlock()
return nil, sc.re
}
if sc.r == nil { // connection closed by user in the meantime
defer sc.lk.Unlock()
return nil, errClosed
}
r := sc.r
lastbody := sc.lastbody
sc.lastbody = nil
sc.lk.Unlock()
// Make sure body is fully consumed, even if user does not call body.Close
if lastbody != nil {
// body.Close is assumed to be idempotent and multiple calls to
// it should return the error that its first invocation
// returned.
err = lastbody.Close()
if err != nil {
sc.lk.Lock()
defer sc.lk.Unlock()
sc.re = err
return nil, err
}
}
req, err = http.ReadRequest(r)
sc.lk.Lock()
defer sc.lk.Unlock()
if err != nil {
if err == io.ErrUnexpectedEOF {
// A close from the opposing client is treated as a
// graceful close, even if there was some unparse-able
// data before the close.
sc.re = ErrPersistEOF
return nil, sc.re
} else {
sc.re = err
return req, err
}
}
sc.lastbody = req.Body
sc.nread++
if req.Close {
sc.re = ErrPersistEOF
return req, sc.re
}
return req, err
}
// Pending returns the number of unanswered requests
// that have been received on the connection.
func (sc *ServerConn) Pending() int {
sc.lk.Lock()
defer sc.lk.Unlock()
return sc.nread - sc.nwritten
}
// Write writes resp in response to req. To close the connection gracefully, set the
// Response.Close field to true. Write should be considered operational until
// it returns an error, regardless of any errors returned on the Read side.
func (sc *ServerConn) Write(req *http.Request, resp *http.Response) error {
// Retrieve the pipeline ID of this request/response pair
sc.lk.Lock()
id, ok := sc.pipereq[req]
delete(sc.pipereq, req)
if !ok {
sc.lk.Unlock()
return ErrPipeline
}
sc.lk.Unlock()
// Ensure pipeline order
sc.pipe.StartResponse(id)
defer sc.pipe.EndResponse(id)
sc.lk.Lock()
if sc.we != nil {
defer sc.lk.Unlock()
return sc.we
}
if sc.c == nil { // connection closed by user in the meantime
defer sc.lk.Unlock()
return ErrClosed
}
c := sc.c
if sc.nread <= sc.nwritten {
defer sc.lk.Unlock()
return errors.New("persist server pipe count")
}
if resp.Close {
// After signaling a keep-alive close, any pipelined unread
// requests will be lost. It is up to the user to drain them
// before signaling.
sc.re = ErrPersistEOF
}
sc.lk.Unlock()
err := resp.Write(c)
sc.lk.Lock()
defer sc.lk.Unlock()
if err != nil {
sc.we = err
return err
}
sc.nwritten++
return nil
}
// A ClientConn sends request and receives headers over an underlying
// connection, while respecting the HTTP keepalive logic. ClientConn
// supports hijacking the connection calling Hijack to
// regain control of the underlying net.Conn and deal with it as desired.
//
// ClientConn is low-level and old. Applications should instead use
// Client or Transport in the net/http package.
type ClientConn struct {
lk sync.Mutex // read-write protects the following fields
c net.Conn
r *bufio.Reader
re, we error // read/write errors
lastbody io.ReadCloser
nread, nwritten int
pipereq map[*http.Request]uint
pipe textproto.Pipeline
writeReq func(*http.Request, io.Writer) error
}
// NewClientConn returns a new ClientConn reading and writing c. If r is not
// nil, it is the buffer to use when reading c.
//
// ClientConn is low-level and old. Applications should use Client or
// Transport in the net/http package.
func NewClientConn(c net.Conn, r *bufio.Reader) *ClientConn {
if r == nil {
r = bufio.NewReader(c)
}
return &ClientConn{
c: c,
r: r,
pipereq: make(map[*http.Request]uint),
writeReq: (*http.Request).Write,
}
}
// NewProxyClientConn works like NewClientConn but writes Requests
// using Request's WriteProxy method.
//
// New code should not use NewProxyClientConn. See Client or
// Transport in the net/http package instead.
func NewProxyClientConn(c net.Conn, r *bufio.Reader) *ClientConn {
cc := NewClientConn(c, r)
cc.writeReq = (*http.Request).WriteProxy
return cc
}
// Hijack detaches the ClientConn and returns the underlying connection as well
// as the read-side bufio which may have some left over data. Hijack may be
// called before the user or Read have signaled the end of the keep-alive
// logic. The user should not call Hijack while Read or Write is in progress.
func (cc *ClientConn) Hijack() (c net.Conn, r *bufio.Reader) {
cc.lk.Lock()
defer cc.lk.Unlock()
c = cc.c
r = cc.r
cc.c = nil
cc.r = nil
return
}
// Close calls Hijack and then also closes the underlying connection
func (cc *ClientConn) Close() error {
c, _ := cc.Hijack()
if c != nil {
return c.Close()
}
return nil
}
// Write writes a request. An ErrPersistEOF error is returned if the connection
// has been closed in an HTTP keepalive sense. If req.Close equals true, the
// keepalive connection is logically closed after this request and the opposing
// server is informed. An ErrUnexpectedEOF indicates the remote closed the
// underlying TCP connection, which is usually considered as graceful close.
func (cc *ClientConn) Write(req *http.Request) (err error) {
// Ensure ordered execution of Writes
id := cc.pipe.Next()
cc.pipe.StartRequest(id)
defer func() {
cc.pipe.EndRequest(id)
if err != nil {
cc.pipe.StartResponse(id)
cc.pipe.EndResponse(id)
} else {
// Remember the pipeline id of this request
cc.lk.Lock()
cc.pipereq[req] = id
cc.lk.Unlock()
}
}()
cc.lk.Lock()
if cc.re != nil { // no point sending if read-side closed or broken
defer cc.lk.Unlock()
return cc.re
}
if cc.we != nil {
defer cc.lk.Unlock()
return cc.we
}
if cc.c == nil { // connection closed by user in the meantime
defer cc.lk.Unlock()
return errClosed
}
c := cc.c
if req.Close {
// We write the EOF to the write-side error, because there
// still might be some pipelined reads
cc.we = ErrPersistEOF
}
cc.lk.Unlock()
err = cc.writeReq(req, c)
cc.lk.Lock()
defer cc.lk.Unlock()
if err != nil {
cc.we = err
return err
}
cc.nwritten++
return nil
}
// Pending returns the number of unanswered requests
// that have been sent on the connection.
func (cc *ClientConn) Pending() int {
cc.lk.Lock()
defer cc.lk.Unlock()
return cc.nwritten - cc.nread
}
// Read reads the next response from the wire. A valid response might be
// returned together with an ErrPersistEOF, which means that the remote
// requested that this be the last request serviced. Read can be called
// concurrently with Write, but not with another Read.
func (cc *ClientConn) Read(req *http.Request) (resp *http.Response, err error) {
// Retrieve the pipeline ID of this request/response pair
cc.lk.Lock()
id, ok := cc.pipereq[req]
delete(cc.pipereq, req)
if !ok {
cc.lk.Unlock()
return nil, ErrPipeline
}
cc.lk.Unlock()
// Ensure pipeline order
cc.pipe.StartResponse(id)
defer cc.pipe.EndResponse(id)
cc.lk.Lock()
if cc.re != nil {
defer cc.lk.Unlock()
return nil, cc.re
}
if cc.r == nil { // connection closed by user in the meantime
defer cc.lk.Unlock()
return nil, errClosed
}
r := cc.r
lastbody := cc.lastbody
cc.lastbody = nil
cc.lk.Unlock()
// Make sure body is fully consumed, even if user does not call body.Close
if lastbody != nil {
// body.Close is assumed to be idempotent and multiple calls to
// it should return the error that its first invocation
// returned.
err = lastbody.Close()
if err != nil {
cc.lk.Lock()
defer cc.lk.Unlock()
cc.re = err
return nil, err
}
}
resp, err = http.ReadResponse(r, req)
cc.lk.Lock()
defer cc.lk.Unlock()
if err != nil {
cc.re = err
return resp, err
}
cc.lastbody = resp.Body
cc.nread++
if resp.Close {
cc.re = ErrPersistEOF // don't send any more requests
return resp, cc.re
}
return resp, err
}
// Do is convenience method that writes a request and reads a response.
func (cc *ClientConn) Do(req *http.Request) (resp *http.Response, err error) {
err = cc.Write(req)
if err != nil {
return
}
return cc.Read(req)
}

View File

@ -1,211 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// HTTP reverse proxy handler
package httputil
import (
"io"
"log"
"net"
"net/http"
"net/url"
"strings"
"sync"
"time"
)
// onExitFlushLoop is a callback set by tests to detect the state of the
// flushLoop() goroutine.
var onExitFlushLoop func()
// ReverseProxy is an HTTP Handler that takes an incoming request and
// sends it to another server, proxying the response back to the
// client.
type ReverseProxy struct {
// Director must be a function which modifies
// the request into a new request to be sent
// using Transport. Its response is then copied
// back to the original client unmodified.
Director func(*http.Request)
// The transport used to perform proxy requests.
// If nil, http.DefaultTransport is used.
Transport http.RoundTripper
// FlushInterval specifies the flush interval
// to flush to the client while copying the
// response body.
// If zero, no periodic flushing is done.
FlushInterval time.Duration
}
func singleJoiningSlash(a, b string) string {
aslash := strings.HasSuffix(a, "/")
bslash := strings.HasPrefix(b, "/")
switch {
case aslash && bslash:
return a + b[1:]
case !aslash && !bslash:
return a + "/" + b
}
return a + b
}
// NewSingleHostReverseProxy returns a new ReverseProxy that rewrites
// URLs to the scheme, host, and base path provided in target. If the
// target's path is "/base" and the incoming request was for "/dir",
// the target request will be for /base/dir.
func NewSingleHostReverseProxy(target *url.URL) *ReverseProxy {
targetQuery := target.RawQuery
director := func(req *http.Request) {
req.URL.Scheme = target.Scheme
req.URL.Host = target.Host
req.URL.Path = singleJoiningSlash(target.Path, req.URL.Path)
if targetQuery == "" || req.URL.RawQuery == "" {
req.URL.RawQuery = targetQuery + req.URL.RawQuery
} else {
req.URL.RawQuery = targetQuery + "&" + req.URL.RawQuery
}
}
return &ReverseProxy{Director: director}
}
func copyHeader(dst, src http.Header) {
for k, vv := range src {
for _, v := range vv {
dst.Add(k, v)
}
}
}
// Hop-by-hop headers. These are removed when sent to the backend.
// http://www.w3.org/Protocols/rfc2616/rfc2616-sec13.html
var hopHeaders = []string{
"Connection",
"Keep-Alive",
"Proxy-Authenticate",
"Proxy-Authorization",
"Te", // canonicalized version of "TE"
"Trailers",
"Transfer-Encoding",
"Upgrade",
}
func (p *ReverseProxy) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
transport := p.Transport
if transport == nil {
transport = http.DefaultTransport
}
outreq := new(http.Request)
*outreq = *req // includes shallow copies of maps, but okay
p.Director(outreq)
outreq.Proto = "HTTP/1.1"
outreq.ProtoMajor = 1
outreq.ProtoMinor = 1
outreq.Close = false
// Remove hop-by-hop headers to the backend. Especially
// important is "Connection" because we want a persistent
// connection, regardless of what the client sent to us. This
// is modifying the same underlying map from req (shallow
// copied above) so we only copy it if necessary.
copiedHeaders := false
for _, h := range hopHeaders {
if outreq.Header.Get(h) != "" {
if !copiedHeaders {
outreq.Header = make(http.Header)
copyHeader(outreq.Header, req.Header)
copiedHeaders = true
}
outreq.Header.Del(h)
}
}
if clientIP, _, err := net.SplitHostPort(req.RemoteAddr); err == nil {
// If we aren't the first proxy retain prior
// X-Forwarded-For information as a comma+space
// separated list and fold multiple headers into one.
if prior, ok := outreq.Header["X-Forwarded-For"]; ok {
clientIP = strings.Join(prior, ", ") + ", " + clientIP
}
outreq.Header.Set("X-Forwarded-For", clientIP)
}
res, err := transport.RoundTrip(outreq)
if err != nil {
log.Printf("http: proxy error: %v", err)
rw.WriteHeader(http.StatusInternalServerError)
return
}
defer res.Body.Close()
for _, h := range hopHeaders {
res.Header.Del(h)
}
copyHeader(rw.Header(), res.Header)
rw.WriteHeader(res.StatusCode)
p.copyResponse(rw, res.Body)
}
func (p *ReverseProxy) copyResponse(dst io.Writer, src io.Reader) {
if p.FlushInterval != 0 {
if wf, ok := dst.(writeFlusher); ok {
mlw := &maxLatencyWriter{
dst: wf,
latency: p.FlushInterval,
done: make(chan bool),
}
go mlw.flushLoop()
defer mlw.stop()
dst = mlw
}
}
io.Copy(dst, src)
}
type writeFlusher interface {
io.Writer
http.Flusher
}
type maxLatencyWriter struct {
dst writeFlusher
latency time.Duration
lk sync.Mutex // protects Write + Flush
done chan bool
}
func (m *maxLatencyWriter) Write(p []byte) (int, error) {
m.lk.Lock()
defer m.lk.Unlock()
return m.dst.Write(p)
}
func (m *maxLatencyWriter) flushLoop() {
t := time.NewTicker(m.latency)
defer t.Stop()
for {
select {
case <-m.done:
if onExitFlushLoop != nil {
onExitFlushLoop()
}
return
case <-t.C:
m.lk.Lock()
m.dst.Flush()
m.lk.Unlock()
}
}
}
func (m *maxLatencyWriter) stop() { m.done <- true }

View File

@ -1,213 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Reverse proxy tests.
package httputil
import (
"io/ioutil"
"net/http"
"net/http/httptest"
"net/url"
"strings"
"testing"
"time"
)
const fakeHopHeader = "X-Fake-Hop-Header-For-Test"
func init() {
hopHeaders = append(hopHeaders, fakeHopHeader)
}
func TestReverseProxy(t *testing.T) {
const backendResponse = "I am the backend"
const backendStatus = 404
backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if len(r.TransferEncoding) > 0 {
t.Errorf("backend got unexpected TransferEncoding: %v", r.TransferEncoding)
}
if r.Header.Get("X-Forwarded-For") == "" {
t.Errorf("didn't get X-Forwarded-For header")
}
if c := r.Header.Get("Connection"); c != "" {
t.Errorf("handler got Connection header value %q", c)
}
if c := r.Header.Get("Upgrade"); c != "" {
t.Errorf("handler got Upgrade header value %q", c)
}
if g, e := r.Host, "some-name"; g != e {
t.Errorf("backend got Host header %q, want %q", g, e)
}
w.Header().Set("X-Foo", "bar")
w.Header().Set("Upgrade", "foo")
w.Header().Set(fakeHopHeader, "foo")
w.Header().Add("X-Multi-Value", "foo")
w.Header().Add("X-Multi-Value", "bar")
http.SetCookie(w, &http.Cookie{Name: "flavor", Value: "chocolateChip"})
w.WriteHeader(backendStatus)
w.Write([]byte(backendResponse))
}))
defer backend.Close()
backendURL, err := url.Parse(backend.URL)
if err != nil {
t.Fatal(err)
}
proxyHandler := NewSingleHostReverseProxy(backendURL)
frontend := httptest.NewServer(proxyHandler)
defer frontend.Close()
getReq, _ := http.NewRequest("GET", frontend.URL, nil)
getReq.Host = "some-name"
getReq.Header.Set("Connection", "close")
getReq.Header.Set("Upgrade", "foo")
getReq.Close = true
res, err := http.DefaultClient.Do(getReq)
if err != nil {
t.Fatalf("Get: %v", err)
}
if g, e := res.StatusCode, backendStatus; g != e {
t.Errorf("got res.StatusCode %d; expected %d", g, e)
}
if g, e := res.Header.Get("X-Foo"), "bar"; g != e {
t.Errorf("got X-Foo %q; expected %q", g, e)
}
if c := res.Header.Get(fakeHopHeader); c != "" {
t.Errorf("got %s header value %q", fakeHopHeader, c)
}
if g, e := len(res.Header["X-Multi-Value"]), 2; g != e {
t.Errorf("got %d X-Multi-Value header values; expected %d", g, e)
}
if g, e := len(res.Header["Set-Cookie"]), 1; g != e {
t.Fatalf("got %d SetCookies, want %d", g, e)
}
if cookie := res.Cookies()[0]; cookie.Name != "flavor" {
t.Errorf("unexpected cookie %q", cookie.Name)
}
bodyBytes, _ := ioutil.ReadAll(res.Body)
if g, e := string(bodyBytes), backendResponse; g != e {
t.Errorf("got body %q; expected %q", g, e)
}
}
func TestXForwardedFor(t *testing.T) {
const prevForwardedFor = "client ip"
const backendResponse = "I am the backend"
const backendStatus = 404
backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Header.Get("X-Forwarded-For") == "" {
t.Errorf("didn't get X-Forwarded-For header")
}
if !strings.Contains(r.Header.Get("X-Forwarded-For"), prevForwardedFor) {
t.Errorf("X-Forwarded-For didn't contain prior data")
}
w.WriteHeader(backendStatus)
w.Write([]byte(backendResponse))
}))
defer backend.Close()
backendURL, err := url.Parse(backend.URL)
if err != nil {
t.Fatal(err)
}
proxyHandler := NewSingleHostReverseProxy(backendURL)
frontend := httptest.NewServer(proxyHandler)
defer frontend.Close()
getReq, _ := http.NewRequest("GET", frontend.URL, nil)
getReq.Host = "some-name"
getReq.Header.Set("Connection", "close")
getReq.Header.Set("X-Forwarded-For", prevForwardedFor)
getReq.Close = true
res, err := http.DefaultClient.Do(getReq)
if err != nil {
t.Fatalf("Get: %v", err)
}
if g, e := res.StatusCode, backendStatus; g != e {
t.Errorf("got res.StatusCode %d; expected %d", g, e)
}
bodyBytes, _ := ioutil.ReadAll(res.Body)
if g, e := string(bodyBytes), backendResponse; g != e {
t.Errorf("got body %q; expected %q", g, e)
}
}
var proxyQueryTests = []struct {
baseSuffix string // suffix to add to backend URL
reqSuffix string // suffix to add to frontend's request URL
want string // what backend should see for final request URL (without ?)
}{
{"", "", ""},
{"?sta=tic", "?us=er", "sta=tic&us=er"},
{"", "?us=er", "us=er"},
{"?sta=tic", "", "sta=tic"},
}
func TestReverseProxyQuery(t *testing.T) {
backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("X-Got-Query", r.URL.RawQuery)
w.Write([]byte("hi"))
}))
defer backend.Close()
for i, tt := range proxyQueryTests {
backendURL, err := url.Parse(backend.URL + tt.baseSuffix)
if err != nil {
t.Fatal(err)
}
frontend := httptest.NewServer(NewSingleHostReverseProxy(backendURL))
req, _ := http.NewRequest("GET", frontend.URL+tt.reqSuffix, nil)
req.Close = true
res, err := http.DefaultClient.Do(req)
if err != nil {
t.Fatalf("%d. Get: %v", i, err)
}
if g, e := res.Header.Get("X-Got-Query"), tt.want; g != e {
t.Errorf("%d. got query %q; expected %q", i, g, e)
}
res.Body.Close()
frontend.Close()
}
}
func TestReverseProxyFlushInterval(t *testing.T) {
const expected = "hi"
backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte(expected))
}))
defer backend.Close()
backendURL, err := url.Parse(backend.URL)
if err != nil {
t.Fatal(err)
}
proxyHandler := NewSingleHostReverseProxy(backendURL)
proxyHandler.FlushInterval = time.Microsecond
done := make(chan bool)
onExitFlushLoop = func() { done <- true }
defer func() { onExitFlushLoop = nil }()
frontend := httptest.NewServer(proxyHandler)
defer frontend.Close()
req, _ := http.NewRequest("GET", frontend.URL, nil)
req.Close = true
res, err := http.DefaultClient.Do(req)
if err != nil {
t.Fatalf("Get: %v", err)
}
defer res.Body.Close()
if bodyBytes, _ := ioutil.ReadAll(res.Body); string(bodyBytes) != expected {
t.Errorf("got body %q; expected %q", bodyBytes, expected)
}
select {
case <-done:
// OK
case <-time.After(5 * time.Second):
t.Error("maxLatencyWriter flushLoop() never exited")
}
}

View File

@ -1,31 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http
import (
"testing"
)
func isChar(c rune) bool { return c <= 127 }
func isCtl(c rune) bool { return c <= 31 || c == 127 }
func isSeparator(c rune) bool {
switch c {
case '(', ')', '<', '>', '@', ',', ';', ':', '\\', '"', '/', '[', ']', '?', '=', '{', '}', ' ', '\t':
return true
}
return false
}
func TestIsToken(t *testing.T) {
for i := 0; i <= 130; i++ {
r := rune(i)
expected := isChar(r) && !isCtl(r) && !isSeparator(r)
if isToken(r) != expected {
t.Errorf("isToken(0x%x) = %v", r, !expected)
}
}
}

View File

@ -1,118 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http_test
import (
"bufio"
"crypto/tls"
"fmt"
"io"
"io/ioutil"
. "net/http"
"net/http/httptest"
"strings"
"testing"
)
func TestNextProtoUpgrade(t *testing.T) {
ts := httptest.NewUnstartedServer(HandlerFunc(func(w ResponseWriter, r *Request) {
fmt.Fprintf(w, "path=%s,proto=", r.URL.Path)
if r.TLS != nil {
w.Write([]byte(r.TLS.NegotiatedProtocol))
}
if r.RemoteAddr == "" {
t.Error("request with no RemoteAddr")
}
if r.Body == nil {
t.Errorf("request with nil Body")
}
}))
ts.TLS = &tls.Config{
NextProtos: []string{"unhandled-proto", "tls-0.9"},
}
ts.Config.TLSNextProto = map[string]func(*Server, *tls.Conn, Handler){
"tls-0.9": handleTLSProtocol09,
}
ts.StartTLS()
defer ts.Close()
tr := newTLSTransport(t, ts)
defer tr.CloseIdleConnections()
c := &Client{Transport: tr}
// Normal request, without NPN.
{
res, err := c.Get(ts.URL)
if err != nil {
t.Fatal(err)
}
body, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatal(err)
}
if want := "path=/,proto="; string(body) != want {
t.Errorf("plain request = %q; want %q", body, want)
}
}
// Request to an advertised but unhandled NPN protocol.
// Server will hang up.
{
tr.CloseIdleConnections()
tr.TLSClientConfig.NextProtos = []string{"unhandled-proto"}
_, err := c.Get(ts.URL)
if err == nil {
t.Errorf("expected error on unhandled-proto request")
}
}
// Request using the "tls-0.9" protocol, which we register here.
// It is HTTP/0.9 over TLS.
{
tlsConfig := newTLSTransport(t, ts).TLSClientConfig
tlsConfig.NextProtos = []string{"tls-0.9"}
conn, err := tls.Dial("tcp", ts.Listener.Addr().String(), tlsConfig)
if err != nil {
t.Fatal(err)
}
conn.Write([]byte("GET /foo\n"))
body, err := ioutil.ReadAll(conn)
if err != nil {
t.Fatal(err)
}
if want := "path=/foo,proto=tls-0.9"; string(body) != want {
t.Errorf("plain request = %q; want %q", body, want)
}
}
}
// handleTLSProtocol09 implements the HTTP/0.9 protocol over TLS, for the
// TestNextProtoUpgrade test.
func handleTLSProtocol09(srv *Server, conn *tls.Conn, h Handler) {
br := bufio.NewReader(conn)
line, err := br.ReadString('\n')
if err != nil {
return
}
line = strings.TrimSpace(line)
path := strings.TrimPrefix(line, "GET ")
if path == line {
return
}
req, _ := NewRequest("GET", path, nil)
req.Proto = "HTTP/0.9"
req.ProtoMajor = 0
req.ProtoMinor = 9
rw := &http09Writer{conn, make(Header)}
h.ServeHTTP(rw, req)
}
type http09Writer struct {
io.Writer
h Header
}
func (w http09Writer) Header() Header { return w.h }
func (w http09Writer) WriteHeader(int) {} // no headers

View File

@ -1,205 +0,0 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package pprof serves via its HTTP server runtime profiling data
// in the format expected by the pprof visualization tool.
// For more information about pprof, see
// http://code.google.com/p/google-perftools/.
//
// The package is typically only imported for the side effect of
// registering its HTTP handlers.
// The handled paths all begin with /debug/pprof/.
//
// To use pprof, link this package into your program:
// import _ "net/http/pprof"
//
// If your application is not already running an http server, you
// need to start one. Add "net/http" and "log" to your imports and
// the following code to your main function:
//
// go func() {
// log.Println(http.ListenAndServe("localhost:6060", nil))
// }()
//
// Then use the pprof tool to look at the heap profile:
//
// go tool pprof http://localhost:6060/debug/pprof/heap
//
// Or to look at a 30-second CPU profile:
//
// go tool pprof http://localhost:6060/debug/pprof/profile
//
// Or to look at the goroutine blocking profile:
//
// go tool pprof http://localhost:6060/debug/pprof/block
//
// To view all available profiles, open http://localhost:6060/debug/pprof/
// in your browser.
//
// For a study of the facility in action, visit
//
// http://blog.golang.org/2011/06/profiling-go-programs.html
//
package pprof
import (
"bufio"
"bytes"
"fmt"
"html/template"
"io"
"log"
"net/http"
"os"
"runtime"
"runtime/pprof"
"strconv"
"strings"
"time"
)
func init() {
http.Handle("/debug/pprof/", http.HandlerFunc(Index))
http.Handle("/debug/pprof/cmdline", http.HandlerFunc(Cmdline))
http.Handle("/debug/pprof/profile", http.HandlerFunc(Profile))
http.Handle("/debug/pprof/symbol", http.HandlerFunc(Symbol))
}
// Cmdline responds with the running program's
// command line, with arguments separated by NUL bytes.
// The package initialization registers it as /debug/pprof/cmdline.
func Cmdline(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
fmt.Fprintf(w, strings.Join(os.Args, "\x00"))
}
// Profile responds with the pprof-formatted cpu profile.
// The package initialization registers it as /debug/pprof/profile.
func Profile(w http.ResponseWriter, r *http.Request) {
sec, _ := strconv.ParseInt(r.FormValue("seconds"), 10, 64)
if sec == 0 {
sec = 30
}
// Set Content Type assuming StartCPUProfile will work,
// because if it does it starts writing.
w.Header().Set("Content-Type", "application/octet-stream")
if err := pprof.StartCPUProfile(w); err != nil {
// StartCPUProfile failed, so no writes yet.
// Can change header back to text content
// and send error code.
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
w.WriteHeader(http.StatusInternalServerError)
fmt.Fprintf(w, "Could not enable CPU profiling: %s\n", err)
return
}
time.Sleep(time.Duration(sec) * time.Second)
pprof.StopCPUProfile()
}
// Symbol looks up the program counters listed in the request,
// responding with a table mapping program counters to function names.
// The package initialization registers it as /debug/pprof/symbol.
func Symbol(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
// We have to read the whole POST body before
// writing any output. Buffer the output here.
var buf bytes.Buffer
// We don't know how many symbols we have, but we
// do have symbol information. Pprof only cares whether
// this number is 0 (no symbols available) or > 0.
fmt.Fprintf(&buf, "num_symbols: 1\n")
var b *bufio.Reader
if r.Method == "POST" {
b = bufio.NewReader(r.Body)
} else {
b = bufio.NewReader(strings.NewReader(r.URL.RawQuery))
}
for {
word, err := b.ReadSlice('+')
if err == nil {
word = word[0 : len(word)-1] // trim +
}
pc, _ := strconv.ParseUint(string(word), 0, 64)
if pc != 0 {
f := runtime.FuncForPC(uintptr(pc))
if f != nil {
fmt.Fprintf(&buf, "%#x %s\n", pc, f.Name())
}
}
// Wait until here to check for err; the last
// symbol will have an err because it doesn't end in +.
if err != nil {
if err != io.EOF {
fmt.Fprintf(&buf, "reading request: %v\n", err)
}
break
}
}
w.Write(buf.Bytes())
}
// Handler returns an HTTP handler that serves the named profile.
func Handler(name string) http.Handler {
return handler(name)
}
type handler string
func (name handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
debug, _ := strconv.Atoi(r.FormValue("debug"))
p := pprof.Lookup(string(name))
if p == nil {
w.WriteHeader(404)
fmt.Fprintf(w, "Unknown profile: %s\n", name)
return
}
p.WriteTo(w, debug)
return
}
// Index responds with the pprof-formatted profile named by the request.
// For example, "/debug/pprof/heap" serves the "heap" profile.
// Index responds to a request for "/debug/pprof/" with an HTML page
// listing the available profiles.
func Index(w http.ResponseWriter, r *http.Request) {
if strings.HasPrefix(r.URL.Path, "/debug/pprof/") {
name := strings.TrimPrefix(r.URL.Path, "/debug/pprof/")
if name != "" {
handler(name).ServeHTTP(w, r)
return
}
}
profiles := pprof.Profiles()
if err := indexTmpl.Execute(w, profiles); err != nil {
log.Print(err)
}
}
var indexTmpl = template.Must(template.New("index").Parse(`<html>
<head>
<title>/debug/pprof/</title>
</head>
/debug/pprof/<br>
<br>
<body>
profiles:<br>
<table>
{{range .}}
<tr><td align=right>{{.Count}}<td><a href="/debug/pprof/{{.Name}}?debug=1">{{.Name}}</a>
{{end}}
</table>
<br>
<a href="/debug/pprof/goroutine?debug=2">full goroutine stack dump</a><br>
</body>
</html>
`))

View File

@ -1,81 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http
import (
"net/url"
"os"
"testing"
)
// TODO(mattn):
// test ProxyAuth
var UseProxyTests = []struct {
host string
match bool
}{
// Never proxy localhost:
{"localhost:80", false},
{"127.0.0.1", false},
{"127.0.0.2", false},
{"[::1]", false},
{"[::2]", true}, // not a loopback address
{"barbaz.net", false}, // match as .barbaz.net
{"foobar.com", false}, // have a port but match
{"foofoobar.com", true}, // not match as a part of foobar.com
{"baz.com", true}, // not match as a part of barbaz.com
{"localhost.net", true}, // not match as suffix of address
{"local.localhost", true}, // not match as prefix as address
{"barbarbaz.net", true}, // not match because NO_PROXY have a '.'
{"www.foobar.com", false}, // match because NO_PROXY includes "foobar.com"
}
func TestUseProxy(t *testing.T) {
ResetProxyEnv()
os.Setenv("NO_PROXY", "foobar.com, .barbaz.net")
for _, test := range UseProxyTests {
if useProxy(test.host+":80") != test.match {
t.Errorf("useProxy(%v) = %v, want %v", test.host, !test.match, test.match)
}
}
}
var cacheKeysTests = []struct {
proxy string
scheme string
addr string
key string
}{
{"", "http", "foo.com", "|http|foo.com"},
{"", "https", "foo.com", "|https|foo.com"},
{"http://foo.com", "http", "foo.com", "http://foo.com|http|"},
{"http://foo.com", "https", "foo.com", "http://foo.com|https|foo.com"},
}
func TestCacheKeys(t *testing.T) {
for _, tt := range cacheKeysTests {
var proxy *url.URL
if tt.proxy != "" {
u, err := url.Parse(tt.proxy)
if err != nil {
t.Fatal(err)
}
proxy = u
}
cm := connectMethod{proxy, tt.scheme, tt.addr}
if got := cm.key().String(); got != tt.key {
t.Fatalf("{%q, %q, %q} cache key = %q; want %q", tt.proxy, tt.scheme, tt.addr, got, tt.key)
}
}
}
func ResetProxyEnv() {
for _, v := range []string{"HTTP_PROXY", "http_proxy", "NO_PROXY", "no_proxy"} {
os.Setenv(v, "")
}
ResetCachedEnvironment()
}

View File

@ -1,79 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http
import (
"testing"
)
var ParseRangeTests = []struct {
s string
length int64
r []httpRange
}{
{"", 0, nil},
{"", 1000, nil},
{"foo", 0, nil},
{"bytes=", 0, nil},
{"bytes=7", 10, nil},
{"bytes= 7 ", 10, nil},
{"bytes=1-", 0, nil},
{"bytes=5-4", 10, nil},
{"bytes=0-2,5-4", 10, nil},
{"bytes=2-5,4-3", 10, nil},
{"bytes=--5,4--3", 10, nil},
{"bytes=A-", 10, nil},
{"bytes=A- ", 10, nil},
{"bytes=A-Z", 10, nil},
{"bytes= -Z", 10, nil},
{"bytes=5-Z", 10, nil},
{"bytes=Ran-dom, garbage", 10, nil},
{"bytes=0x01-0x02", 10, nil},
{"bytes= ", 10, nil},
{"bytes= , , , ", 10, nil},
{"bytes=0-9", 10, []httpRange{{0, 10}}},
{"bytes=0-", 10, []httpRange{{0, 10}}},
{"bytes=5-", 10, []httpRange{{5, 5}}},
{"bytes=0-20", 10, []httpRange{{0, 10}}},
{"bytes=15-,0-5", 10, nil},
{"bytes=1-2,5-", 10, []httpRange{{1, 2}, {5, 5}}},
{"bytes=-2 , 7-", 11, []httpRange{{9, 2}, {7, 4}}},
{"bytes=0-0 ,2-2, 7-", 11, []httpRange{{0, 1}, {2, 1}, {7, 4}}},
{"bytes=-5", 10, []httpRange{{5, 5}}},
{"bytes=-15", 10, []httpRange{{0, 10}}},
{"bytes=0-499", 10000, []httpRange{{0, 500}}},
{"bytes=500-999", 10000, []httpRange{{500, 500}}},
{"bytes=-500", 10000, []httpRange{{9500, 500}}},
{"bytes=9500-", 10000, []httpRange{{9500, 500}}},
{"bytes=0-0,-1", 10000, []httpRange{{0, 1}, {9999, 1}}},
{"bytes=500-600,601-999", 10000, []httpRange{{500, 101}, {601, 399}}},
{"bytes=500-700,601-999", 10000, []httpRange{{500, 201}, {601, 399}}},
// Match Apache laxity:
{"bytes= 1 -2 , 4- 5, 7 - 8 , ,,", 11, []httpRange{{1, 2}, {4, 2}, {7, 2}}},
}
func TestParseRange(t *testing.T) {
for _, test := range ParseRangeTests {
r := test.r
ranges, err := parseRange(test.s, test.length)
if err != nil && r != nil {
t.Errorf("parseRange(%q) returned error %q", test.s, err)
}
if len(ranges) != len(r) {
t.Errorf("len(parseRange(%q)) = %d, want %d", test.s, len(ranges), len(r))
continue
}
for i := range r {
if ranges[i].start != r[i].start {
t.Errorf("parseRange(%q)[%d].start = %d, want %d", test.s, i, ranges[i].start, r[i].start)
}
if ranges[i].length != r[i].length {
t.Errorf("parseRange(%q)[%d].length = %d, want %d", test.s, i, ranges[i].length, r[i].length)
}
}
}
}

View File

@ -1,331 +0,0 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http
import (
"bufio"
"bytes"
"fmt"
"io"
"net/url"
"reflect"
"testing"
)
type reqTest struct {
Raw string
Req *Request
Body string
Trailer Header
Error string
}
var noError = ""
var noBody = ""
var noTrailer Header = nil
var reqTests = []reqTest{
// Baseline test; All Request fields included for template use
{
"GET http://www.techcrunch.com/ HTTP/1.1\r\n" +
"Host: www.techcrunch.com\r\n" +
"User-Agent: Fake\r\n" +
"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\n" +
"Accept-Language: en-us,en;q=0.5\r\n" +
"Accept-Encoding: gzip,deflate\r\n" +
"Accept-Charset: ISO-8859-1,utf-8;q=0.7,*;q=0.7\r\n" +
"Keep-Alive: 300\r\n" +
"Content-Length: 7\r\n" +
"Proxy-Connection: keep-alive\r\n\r\n" +
"abcdef\n???",
&Request{
Method: "GET",
URL: &url.URL{
Scheme: "http",
Host: "www.techcrunch.com",
Path: "/",
},
Proto: "HTTP/1.1",
ProtoMajor: 1,
ProtoMinor: 1,
Header: Header{
"Accept": {"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"},
"Accept-Language": {"en-us,en;q=0.5"},
"Accept-Encoding": {"gzip,deflate"},
"Accept-Charset": {"ISO-8859-1,utf-8;q=0.7,*;q=0.7"},
"Keep-Alive": {"300"},
"Proxy-Connection": {"keep-alive"},
"Content-Length": {"7"},
"User-Agent": {"Fake"},
},
Close: false,
ContentLength: 7,
Host: "www.techcrunch.com",
RequestURI: "http://www.techcrunch.com/",
},
"abcdef\n",
noTrailer,
noError,
},
// GET request with no body (the normal case)
{
"GET / HTTP/1.1\r\n" +
"Host: foo.com\r\n\r\n",
&Request{
Method: "GET",
URL: &url.URL{
Path: "/",
},
Proto: "HTTP/1.1",
ProtoMajor: 1,
ProtoMinor: 1,
Header: Header{},
Close: false,
ContentLength: 0,
Host: "foo.com",
RequestURI: "/",
},
noBody,
noTrailer,
noError,
},
// Tests that we don't parse a path that looks like a
// scheme-relative URI as a scheme-relative URI.
{
"GET //user@host/is/actually/a/path/ HTTP/1.1\r\n" +
"Host: test\r\n\r\n",
&Request{
Method: "GET",
URL: &url.URL{
Path: "//user@host/is/actually/a/path/",
},
Proto: "HTTP/1.1",
ProtoMajor: 1,
ProtoMinor: 1,
Header: Header{},
Close: false,
ContentLength: 0,
Host: "test",
RequestURI: "//user@host/is/actually/a/path/",
},
noBody,
noTrailer,
noError,
},
// Tests a bogus abs_path on the Request-Line (RFC 2616 section 5.1.2)
{
"GET ../../../../etc/passwd HTTP/1.1\r\n" +
"Host: test\r\n\r\n",
nil,
noBody,
noTrailer,
"parse ../../../../etc/passwd: invalid URI for request",
},
// Tests missing URL:
{
"GET HTTP/1.1\r\n" +
"Host: test\r\n\r\n",
nil,
noBody,
noTrailer,
"parse : empty url",
},
// Tests chunked body with trailer:
{
"POST / HTTP/1.1\r\n" +
"Host: foo.com\r\n" +
"Transfer-Encoding: chunked\r\n\r\n" +
"3\r\nfoo\r\n" +
"3\r\nbar\r\n" +
"0\r\n" +
"Trailer-Key: Trailer-Value\r\n" +
"\r\n",
&Request{
Method: "POST",
URL: &url.URL{
Path: "/",
},
TransferEncoding: []string{"chunked"},
Proto: "HTTP/1.1",
ProtoMajor: 1,
ProtoMinor: 1,
Header: Header{},
ContentLength: -1,
Host: "foo.com",
RequestURI: "/",
},
"foobar",
Header{
"Trailer-Key": {"Trailer-Value"},
},
noError,
},
// CONNECT request with domain name:
{
"CONNECT www.google.com:443 HTTP/1.1\r\n\r\n",
&Request{
Method: "CONNECT",
URL: &url.URL{
Host: "www.google.com:443",
},
Proto: "HTTP/1.1",
ProtoMajor: 1,
ProtoMinor: 1,
Header: Header{},
Close: false,
ContentLength: 0,
Host: "www.google.com:443",
RequestURI: "www.google.com:443",
},
noBody,
noTrailer,
noError,
},
// CONNECT request with IP address:
{
"CONNECT 127.0.0.1:6060 HTTP/1.1\r\n\r\n",
&Request{
Method: "CONNECT",
URL: &url.URL{
Host: "127.0.0.1:6060",
},
Proto: "HTTP/1.1",
ProtoMajor: 1,
ProtoMinor: 1,
Header: Header{},
Close: false,
ContentLength: 0,
Host: "127.0.0.1:6060",
RequestURI: "127.0.0.1:6060",
},
noBody,
noTrailer,
noError,
},
// CONNECT request for RPC:
{
"CONNECT /_goRPC_ HTTP/1.1\r\n\r\n",
&Request{
Method: "CONNECT",
URL: &url.URL{
Path: "/_goRPC_",
},
Proto: "HTTP/1.1",
ProtoMajor: 1,
ProtoMinor: 1,
Header: Header{},
Close: false,
ContentLength: 0,
Host: "",
RequestURI: "/_goRPC_",
},
noBody,
noTrailer,
noError,
},
// SSDP Notify request. golang.org/issue/3692
{
"NOTIFY * HTTP/1.1\r\nServer: foo\r\n\r\n",
&Request{
Method: "NOTIFY",
URL: &url.URL{
Path: "*",
},
Proto: "HTTP/1.1",
ProtoMajor: 1,
ProtoMinor: 1,
Header: Header{
"Server": []string{"foo"},
},
Close: false,
ContentLength: 0,
RequestURI: "*",
},
noBody,
noTrailer,
noError,
},
// OPTIONS request. Similar to golang.org/issue/3692
{
"OPTIONS * HTTP/1.1\r\nServer: foo\r\n\r\n",
&Request{
Method: "OPTIONS",
URL: &url.URL{
Path: "*",
},
Proto: "HTTP/1.1",
ProtoMajor: 1,
ProtoMinor: 1,
Header: Header{
"Server": []string{"foo"},
},
Close: false,
ContentLength: 0,
RequestURI: "*",
},
noBody,
noTrailer,
noError,
},
}
func TestReadRequest(t *testing.T) {
for i := range reqTests {
tt := &reqTests[i]
var braw bytes.Buffer
braw.WriteString(tt.Raw)
req, err := ReadRequest(bufio.NewReader(&braw))
if err != nil {
if err.Error() != tt.Error {
t.Errorf("#%d: error %q, want error %q", i, err.Error(), tt.Error)
}
continue
}
rbody := req.Body
req.Body = nil
diff(t, fmt.Sprintf("#%d Request", i), req, tt.Req)
var bout bytes.Buffer
if rbody != nil {
_, err := io.Copy(&bout, rbody)
if err != nil {
t.Fatalf("#%d. copying body: %v", i, err)
}
rbody.Close()
}
body := bout.String()
if body != tt.Body {
t.Errorf("#%d: Body = %q want %q", i, body, tt.Body)
}
if !reflect.DeepEqual(tt.Trailer, req.Trailer) {
t.Errorf("#%d. Trailers differ.\n got: %v\nwant: %v", i, req.Trailer, tt.Trailer)
}
}
}

View File

@ -1,610 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http_test
import (
"bufio"
"bytes"
"fmt"
"io"
"io/ioutil"
"mime/multipart"
. "net/http"
"net/http/httptest"
"net/url"
"os"
"reflect"
"regexp"
"strings"
"testing"
)
func TestQuery(t *testing.T) {
req := &Request{Method: "GET"}
req.URL, _ = url.Parse("http://www.google.com/search?q=foo&q=bar")
if q := req.FormValue("q"); q != "foo" {
t.Errorf(`req.FormValue("q") = %q, want "foo"`, q)
}
}
func TestPostQuery(t *testing.T) {
req, _ := NewRequest("POST", "http://www.google.com/search?q=foo&q=bar&both=x&prio=1&empty=not",
strings.NewReader("z=post&both=y&prio=2&empty="))
req.Header.Set("Content-Type", "application/x-www-form-urlencoded; param=value")
if q := req.FormValue("q"); q != "foo" {
t.Errorf(`req.FormValue("q") = %q, want "foo"`, q)
}
if z := req.FormValue("z"); z != "post" {
t.Errorf(`req.FormValue("z") = %q, want "post"`, z)
}
if bq, found := req.PostForm["q"]; found {
t.Errorf(`req.PostForm["q"] = %q, want no entry in map`, bq)
}
if bz := req.PostFormValue("z"); bz != "post" {
t.Errorf(`req.PostFormValue("z") = %q, want "post"`, bz)
}
if qs := req.Form["q"]; !reflect.DeepEqual(qs, []string{"foo", "bar"}) {
t.Errorf(`req.Form["q"] = %q, want ["foo", "bar"]`, qs)
}
if both := req.Form["both"]; !reflect.DeepEqual(both, []string{"y", "x"}) {
t.Errorf(`req.Form["both"] = %q, want ["y", "x"]`, both)
}
if prio := req.FormValue("prio"); prio != "2" {
t.Errorf(`req.FormValue("prio") = %q, want "2" (from body)`, prio)
}
if empty := req.FormValue("empty"); empty != "" {
t.Errorf(`req.FormValue("empty") = %q, want "" (from body)`, empty)
}
}
func TestPatchQuery(t *testing.T) {
req, _ := NewRequest("PATCH", "http://www.google.com/search?q=foo&q=bar&both=x&prio=1&empty=not",
strings.NewReader("z=post&both=y&prio=2&empty="))
req.Header.Set("Content-Type", "application/x-www-form-urlencoded; param=value")
if q := req.FormValue("q"); q != "foo" {
t.Errorf(`req.FormValue("q") = %q, want "foo"`, q)
}
if z := req.FormValue("z"); z != "post" {
t.Errorf(`req.FormValue("z") = %q, want "post"`, z)
}
if bq, found := req.PostForm["q"]; found {
t.Errorf(`req.PostForm["q"] = %q, want no entry in map`, bq)
}
if bz := req.PostFormValue("z"); bz != "post" {
t.Errorf(`req.PostFormValue("z") = %q, want "post"`, bz)
}
if qs := req.Form["q"]; !reflect.DeepEqual(qs, []string{"foo", "bar"}) {
t.Errorf(`req.Form["q"] = %q, want ["foo", "bar"]`, qs)
}
if both := req.Form["both"]; !reflect.DeepEqual(both, []string{"y", "x"}) {
t.Errorf(`req.Form["both"] = %q, want ["y", "x"]`, both)
}
if prio := req.FormValue("prio"); prio != "2" {
t.Errorf(`req.FormValue("prio") = %q, want "2" (from body)`, prio)
}
if empty := req.FormValue("empty"); empty != "" {
t.Errorf(`req.FormValue("empty") = %q, want "" (from body)`, empty)
}
}
type stringMap map[string][]string
type parseContentTypeTest struct {
shouldError bool
contentType stringMap
}
var parseContentTypeTests = []parseContentTypeTest{
{false, stringMap{"Content-Type": {"text/plain"}}},
// Empty content type is legal - shoult be treated as
// application/octet-stream (RFC 2616, section 7.2.1)
{false, stringMap{}},
{true, stringMap{"Content-Type": {"text/plain; boundary="}}},
{false, stringMap{"Content-Type": {"application/unknown"}}},
}
func TestParseFormUnknownContentType(t *testing.T) {
for i, test := range parseContentTypeTests {
req := &Request{
Method: "POST",
Header: Header(test.contentType),
Body: ioutil.NopCloser(strings.NewReader("body")),
}
err := req.ParseForm()
switch {
case err == nil && test.shouldError:
t.Errorf("test %d should have returned error", i)
case err != nil && !test.shouldError:
t.Errorf("test %d should not have returned error, got %v", i, err)
}
}
}
func TestParseFormInitializeOnError(t *testing.T) {
nilBody, _ := NewRequest("POST", "http://www.google.com/search?q=foo", nil)
tests := []*Request{
nilBody,
{Method: "GET", URL: nil},
}
for i, req := range tests {
err := req.ParseForm()
if req.Form == nil {
t.Errorf("%d. Form not initialized, error %v", i, err)
}
if req.PostForm == nil {
t.Errorf("%d. PostForm not initialized, error %v", i, err)
}
}
}
func TestMultipartReader(t *testing.T) {
req := &Request{
Method: "POST",
Header: Header{"Content-Type": {`multipart/form-data; boundary="foo123"`}},
Body: ioutil.NopCloser(new(bytes.Buffer)),
}
multipart, err := req.MultipartReader()
if multipart == nil {
t.Errorf("expected multipart; error: %v", err)
}
req.Header = Header{"Content-Type": {"text/plain"}}
multipart, err = req.MultipartReader()
if multipart != nil {
t.Error("unexpected multipart for text/plain")
}
}
func TestParseMultipartForm(t *testing.T) {
req := &Request{
Method: "POST",
Header: Header{"Content-Type": {`multipart/form-data; boundary="foo123"`}},
Body: ioutil.NopCloser(new(bytes.Buffer)),
}
err := req.ParseMultipartForm(25)
if err == nil {
t.Error("expected multipart EOF, got nil")
}
req.Header = Header{"Content-Type": {"text/plain"}}
err = req.ParseMultipartForm(25)
if err != ErrNotMultipart {
t.Error("expected ErrNotMultipart for text/plain")
}
}
func TestRedirect(t *testing.T) {
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
switch r.URL.Path {
case "/":
w.Header().Set("Location", "/foo/")
w.WriteHeader(StatusSeeOther)
case "/foo/":
fmt.Fprintf(w, "foo")
default:
w.WriteHeader(StatusBadRequest)
}
}))
defer ts.Close()
var end = regexp.MustCompile("/foo/$")
r, err := Get(ts.URL)
if err != nil {
t.Fatal(err)
}
r.Body.Close()
url := r.Request.URL.String()
if r.StatusCode != 200 || !end.MatchString(url) {
t.Fatalf("Get got status %d at %q, want 200 matching /foo/$", r.StatusCode, url)
}
}
func TestSetBasicAuth(t *testing.T) {
r, _ := NewRequest("GET", "http://example.com/", nil)
r.SetBasicAuth("Aladdin", "open sesame")
if g, e := r.Header.Get("Authorization"), "Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ=="; g != e {
t.Errorf("got header %q, want %q", g, e)
}
}
func TestMultipartRequest(t *testing.T) {
// Test that we can read the values and files of a
// multipart request with FormValue and FormFile,
// and that ParseMultipartForm can be called multiple times.
req := newTestMultipartRequest(t)
if err := req.ParseMultipartForm(25); err != nil {
t.Fatal("ParseMultipartForm first call:", err)
}
defer req.MultipartForm.RemoveAll()
validateTestMultipartContents(t, req, false)
if err := req.ParseMultipartForm(25); err != nil {
t.Fatal("ParseMultipartForm second call:", err)
}
validateTestMultipartContents(t, req, false)
}
func TestMultipartRequestAuto(t *testing.T) {
// Test that FormValue and FormFile automatically invoke
// ParseMultipartForm and return the right values.
req := newTestMultipartRequest(t)
defer func() {
if req.MultipartForm != nil {
req.MultipartForm.RemoveAll()
}
}()
validateTestMultipartContents(t, req, true)
}
func TestMissingFileMultipartRequest(t *testing.T) {
// Test that FormFile returns an error if
// the named file is missing.
req := newTestMultipartRequest(t)
testMissingFile(t, req)
}
// Test that FormValue invokes ParseMultipartForm.
func TestFormValueCallsParseMultipartForm(t *testing.T) {
req, _ := NewRequest("POST", "http://www.google.com/", strings.NewReader("z=post"))
req.Header.Set("Content-Type", "application/x-www-form-urlencoded; param=value")
if req.Form != nil {
t.Fatal("Unexpected request Form, want nil")
}
req.FormValue("z")
if req.Form == nil {
t.Fatal("ParseMultipartForm not called by FormValue")
}
}
// Test that FormFile invokes ParseMultipartForm.
func TestFormFileCallsParseMultipartForm(t *testing.T) {
req := newTestMultipartRequest(t)
if req.Form != nil {
t.Fatal("Unexpected request Form, want nil")
}
req.FormFile("")
if req.Form == nil {
t.Fatal("ParseMultipartForm not called by FormFile")
}
}
// Test that ParseMultipartForm errors if called
// after MultipartReader on the same request.
func TestParseMultipartFormOrder(t *testing.T) {
req := newTestMultipartRequest(t)
if _, err := req.MultipartReader(); err != nil {
t.Fatalf("MultipartReader: %v", err)
}
if err := req.ParseMultipartForm(1024); err == nil {
t.Fatal("expected an error from ParseMultipartForm after call to MultipartReader")
}
}
// Test that MultipartReader errors if called
// after ParseMultipartForm on the same request.
func TestMultipartReaderOrder(t *testing.T) {
req := newTestMultipartRequest(t)
if err := req.ParseMultipartForm(25); err != nil {
t.Fatalf("ParseMultipartForm: %v", err)
}
defer req.MultipartForm.RemoveAll()
if _, err := req.MultipartReader(); err == nil {
t.Fatal("expected an error from MultipartReader after call to ParseMultipartForm")
}
}
// Test that FormFile errors if called after
// MultipartReader on the same request.
func TestFormFileOrder(t *testing.T) {
req := newTestMultipartRequest(t)
if _, err := req.MultipartReader(); err != nil {
t.Fatalf("MultipartReader: %v", err)
}
if _, _, err := req.FormFile(""); err == nil {
t.Fatal("expected an error from FormFile after call to MultipartReader")
}
}
var readRequestErrorTests = []struct {
in string
err error
}{
{"GET / HTTP/1.1\r\nheader:foo\r\n\r\n", nil},
{"GET / HTTP/1.1\r\nheader:foo\r\n", io.ErrUnexpectedEOF},
{"", io.EOF},
}
func TestReadRequestErrors(t *testing.T) {
for i, tt := range readRequestErrorTests {
_, err := ReadRequest(bufio.NewReader(strings.NewReader(tt.in)))
if err != tt.err {
t.Errorf("%d. got error = %v; want %v", i, err, tt.err)
}
}
}
func TestNewRequestHost(t *testing.T) {
req, err := NewRequest("GET", "http://localhost:1234/", nil)
if err != nil {
t.Fatal(err)
}
if req.Host != "localhost:1234" {
t.Errorf("Host = %q; want localhost:1234", req.Host)
}
}
func TestNewRequestContentLength(t *testing.T) {
readByte := func(r io.Reader) io.Reader {
var b [1]byte
r.Read(b[:])
return r
}
tests := []struct {
r io.Reader
want int64
}{
{bytes.NewReader([]byte("123")), 3},
{bytes.NewBuffer([]byte("1234")), 4},
{strings.NewReader("12345"), 5},
// Not detected:
{struct{ io.Reader }{strings.NewReader("xyz")}, 0},
{io.NewSectionReader(strings.NewReader("x"), 0, 6), 0},
{readByte(io.NewSectionReader(strings.NewReader("xy"), 0, 6)), 0},
}
for _, tt := range tests {
req, err := NewRequest("POST", "http://localhost/", tt.r)
if err != nil {
t.Fatal(err)
}
if req.ContentLength != tt.want {
t.Errorf("ContentLength(%T) = %d; want %d", tt.r, req.ContentLength, tt.want)
}
}
}
var parseHTTPVersionTests = []struct {
vers string
major, minor int
ok bool
}{
{"HTTP/0.9", 0, 9, true},
{"HTTP/1.0", 1, 0, true},
{"HTTP/1.1", 1, 1, true},
{"HTTP/3.14", 3, 14, true},
{"HTTP", 0, 0, false},
{"HTTP/one.one", 0, 0, false},
{"HTTP/1.1/", 0, 0, false},
{"HTTP/-1,0", 0, 0, false},
{"HTTP/0,-1", 0, 0, false},
{"HTTP/", 0, 0, false},
{"HTTP/1,1", 0, 0, false},
}
func TestParseHTTPVersion(t *testing.T) {
for _, tt := range parseHTTPVersionTests {
major, minor, ok := ParseHTTPVersion(tt.vers)
if ok != tt.ok || major != tt.major || minor != tt.minor {
type version struct {
major, minor int
ok bool
}
t.Errorf("failed to parse %q, expected: %#v, got %#v", tt.vers, version{tt.major, tt.minor, tt.ok}, version{major, minor, ok})
}
}
}
type logWrites struct {
t *testing.T
dst *[]string
}
func (l logWrites) WriteByte(c byte) error {
l.t.Fatalf("unexpected WriteByte call")
return nil
}
func (l logWrites) Write(p []byte) (n int, err error) {
*l.dst = append(*l.dst, string(p))
return len(p), nil
}
func TestRequestWriteBufferedWriter(t *testing.T) {
got := []string{}
req, _ := NewRequest("GET", "http://foo.com/", nil)
req.Write(logWrites{t, &got})
want := []string{
"GET / HTTP/1.1\r\n",
"Host: foo.com\r\n",
"User-Agent: " + DefaultUserAgent + "\r\n",
"\r\n",
}
if !reflect.DeepEqual(got, want) {
t.Errorf("Writes = %q\n Want = %q", got, want)
}
}
func testMissingFile(t *testing.T, req *Request) {
f, fh, err := req.FormFile("missing")
if f != nil {
t.Errorf("FormFile file = %v, want nil", f)
}
if fh != nil {
t.Errorf("FormFile file header = %q, want nil", fh)
}
if err != ErrMissingFile {
t.Errorf("FormFile err = %q, want ErrMissingFile", err)
}
}
func newTestMultipartRequest(t *testing.T) *Request {
b := strings.NewReader(strings.Replace(message, "\n", "\r\n", -1))
req, err := NewRequest("POST", "/", b)
if err != nil {
t.Fatal("NewRequest:", err)
}
ctype := fmt.Sprintf(`multipart/form-data; boundary="%s"`, boundary)
req.Header.Set("Content-type", ctype)
return req
}
func validateTestMultipartContents(t *testing.T, req *Request, allMem bool) {
if g, e := req.FormValue("texta"), textaValue; g != e {
t.Errorf("texta value = %q, want %q", g, e)
}
if g, e := req.FormValue("textb"), textbValue; g != e {
t.Errorf("textb value = %q, want %q", g, e)
}
if g := req.FormValue("missing"); g != "" {
t.Errorf("missing value = %q, want empty string", g)
}
assertMem := func(n string, fd multipart.File) {
if _, ok := fd.(*os.File); ok {
t.Error(n, " is *os.File, should not be")
}
}
fda := testMultipartFile(t, req, "filea", "filea.txt", fileaContents)
defer fda.Close()
assertMem("filea", fda)
fdb := testMultipartFile(t, req, "fileb", "fileb.txt", filebContents)
defer fdb.Close()
if allMem {
assertMem("fileb", fdb)
} else {
if _, ok := fdb.(*os.File); !ok {
t.Errorf("fileb has unexpected underlying type %T", fdb)
}
}
testMissingFile(t, req)
}
func testMultipartFile(t *testing.T, req *Request, key, expectFilename, expectContent string) multipart.File {
f, fh, err := req.FormFile(key)
if err != nil {
t.Fatalf("FormFile(%q): %q", key, err)
}
if fh.Filename != expectFilename {
t.Errorf("filename = %q, want %q", fh.Filename, expectFilename)
}
var b bytes.Buffer
_, err = io.Copy(&b, f)
if err != nil {
t.Fatal("copying contents:", err)
}
if g := b.String(); g != expectContent {
t.Errorf("contents = %q, want %q", g, expectContent)
}
return f
}
const (
fileaContents = "This is a test file."
filebContents = "Another test file."
textaValue = "foo"
textbValue = "bar"
boundary = `MyBoundary`
)
const message = `
--MyBoundary
Content-Disposition: form-data; name="filea"; filename="filea.txt"
Content-Type: text/plain
` + fileaContents + `
--MyBoundary
Content-Disposition: form-data; name="fileb"; filename="fileb.txt"
Content-Type: text/plain
` + filebContents + `
--MyBoundary
Content-Disposition: form-data; name="texta"
` + textaValue + `
--MyBoundary
Content-Disposition: form-data; name="textb"
` + textbValue + `
--MyBoundary--
`
func benchmarkReadRequest(b *testing.B, request string) {
request = request + "\n" // final \n
request = strings.Replace(request, "\n", "\r\n", -1) // expand \n to \r\n
b.SetBytes(int64(len(request)))
r := bufio.NewReader(&infiniteReader{buf: []byte(request)})
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := ReadRequest(r)
if err != nil {
b.Fatalf("failed to read request: %v", err)
}
}
}
// infiniteReader satisfies Read requests as if the contents of buf
// loop indefinitely.
type infiniteReader struct {
buf []byte
offset int
}
func (r *infiniteReader) Read(b []byte) (int, error) {
n := copy(b, r.buf[r.offset:])
r.offset = (r.offset + n) % len(r.buf)
return n, nil
}
func BenchmarkReadRequestChrome(b *testing.B) {
// https://github.com/felixge/node-http-perf/blob/master/fixtures/get.http
benchmarkReadRequest(b, `GET / HTTP/1.1
Host: localhost:8080
Connection: keep-alive
Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.52 Safari/537.17
Accept-Encoding: gzip,deflate,sdch
Accept-Language: en-US,en;q=0.8
Accept-Charset: ISO-8859-1,utf-8;q=0.7,*;q=0.3
Cookie: __utma=1.1978842379.1323102373.1323102373.1323102373.1; EPi:NumberOfVisits=1,2012-02-28T13:42:18; CrmSession=5b707226b9563e1bc69084d07a107c98; plushContainerWidth=100%25; plushNoTopMenu=0; hudson_auto_refresh=false
`)
}
func BenchmarkReadRequestCurl(b *testing.B) {
// curl http://localhost:8080/
benchmarkReadRequest(b, `GET / HTTP/1.1
User-Agent: curl/7.27.0
Host: localhost:8080
Accept: */*
`)
}
func BenchmarkReadRequestApachebench(b *testing.B) {
// ab -n 1 -c 1 http://localhost:8080/
benchmarkReadRequest(b, `GET / HTTP/1.0
Host: localhost:8080
User-Agent: ApacheBench/2.3
Accept: */*
`)
}
func BenchmarkReadRequestSiege(b *testing.B) {
// siege -r 1 -c 1 http://localhost:8080/
benchmarkReadRequest(b, `GET / HTTP/1.1
Host: localhost:8080
Accept: */*
Accept-Encoding: gzip
User-Agent: JoeDog/1.00 [en] (X11; I; Siege 2.70)
Connection: keep-alive
`)
}
func BenchmarkReadRequestWrk(b *testing.B) {
// wrk -t 1 -r 1 -c 1 http://localhost:8080/
benchmarkReadRequest(b, `GET / HTTP/1.1
Host: localhost:8080
`)
}

View File

@ -1,565 +0,0 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http
import (
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"net/url"
"strings"
"testing"
)
type reqWriteTest struct {
Req Request
Body interface{} // optional []byte or func() io.ReadCloser to populate Req.Body
// Any of these three may be empty to skip that test.
WantWrite string // Request.Write
WantProxy string // Request.WriteProxy
WantError error // wanted error from Request.Write
}
var reqWriteTests = []reqWriteTest{
// HTTP/1.1 => chunked coding; no body; no trailer
{
Req: Request{
Method: "GET",
URL: &url.URL{
Scheme: "http",
Host: "www.techcrunch.com",
Path: "/",
},
Proto: "HTTP/1.1",
ProtoMajor: 1,
ProtoMinor: 1,
Header: Header{
"Accept": {"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"},
"Accept-Charset": {"ISO-8859-1,utf-8;q=0.7,*;q=0.7"},
"Accept-Encoding": {"gzip,deflate"},
"Accept-Language": {"en-us,en;q=0.5"},
"Keep-Alive": {"300"},
"Proxy-Connection": {"keep-alive"},
"User-Agent": {"Fake"},
},
Body: nil,
Close: false,
Host: "www.techcrunch.com",
Form: map[string][]string{},
},
WantWrite: "GET / HTTP/1.1\r\n" +
"Host: www.techcrunch.com\r\n" +
"User-Agent: Fake\r\n" +
"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\n" +
"Accept-Charset: ISO-8859-1,utf-8;q=0.7,*;q=0.7\r\n" +
"Accept-Encoding: gzip,deflate\r\n" +
"Accept-Language: en-us,en;q=0.5\r\n" +
"Keep-Alive: 300\r\n" +
"Proxy-Connection: keep-alive\r\n\r\n",
WantProxy: "GET http://www.techcrunch.com/ HTTP/1.1\r\n" +
"Host: www.techcrunch.com\r\n" +
"User-Agent: Fake\r\n" +
"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\n" +
"Accept-Charset: ISO-8859-1,utf-8;q=0.7,*;q=0.7\r\n" +
"Accept-Encoding: gzip,deflate\r\n" +
"Accept-Language: en-us,en;q=0.5\r\n" +
"Keep-Alive: 300\r\n" +
"Proxy-Connection: keep-alive\r\n\r\n",
},
// HTTP/1.1 => chunked coding; body; empty trailer
{
Req: Request{
Method: "GET",
URL: &url.URL{
Scheme: "http",
Host: "www.google.com",
Path: "/search",
},
ProtoMajor: 1,
ProtoMinor: 1,
Header: Header{},
TransferEncoding: []string{"chunked"},
},
Body: []byte("abcdef"),
WantWrite: "GET /search HTTP/1.1\r\n" +
"Host: www.google.com\r\n" +
"User-Agent: Go 1.1 package http\r\n" +
"Transfer-Encoding: chunked\r\n\r\n" +
chunk("abcdef") + chunk(""),
WantProxy: "GET http://www.google.com/search HTTP/1.1\r\n" +
"Host: www.google.com\r\n" +
"User-Agent: Go 1.1 package http\r\n" +
"Transfer-Encoding: chunked\r\n\r\n" +
chunk("abcdef") + chunk(""),
},
// HTTP/1.1 POST => chunked coding; body; empty trailer
{
Req: Request{
Method: "POST",
URL: &url.URL{
Scheme: "http",
Host: "www.google.com",
Path: "/search",
},
ProtoMajor: 1,
ProtoMinor: 1,
Header: Header{},
Close: true,
TransferEncoding: []string{"chunked"},
},
Body: []byte("abcdef"),
WantWrite: "POST /search HTTP/1.1\r\n" +
"Host: www.google.com\r\n" +
"User-Agent: Go 1.1 package http\r\n" +
"Connection: close\r\n" +
"Transfer-Encoding: chunked\r\n\r\n" +
chunk("abcdef") + chunk(""),
WantProxy: "POST http://www.google.com/search HTTP/1.1\r\n" +
"Host: www.google.com\r\n" +
"User-Agent: Go 1.1 package http\r\n" +
"Connection: close\r\n" +
"Transfer-Encoding: chunked\r\n\r\n" +
chunk("abcdef") + chunk(""),
},
// HTTP/1.1 POST with Content-Length, no chunking
{
Req: Request{
Method: "POST",
URL: &url.URL{
Scheme: "http",
Host: "www.google.com",
Path: "/search",
},
ProtoMajor: 1,
ProtoMinor: 1,
Header: Header{},
Close: true,
ContentLength: 6,
},
Body: []byte("abcdef"),
WantWrite: "POST /search HTTP/1.1\r\n" +
"Host: www.google.com\r\n" +
"User-Agent: Go 1.1 package http\r\n" +
"Connection: close\r\n" +
"Content-Length: 6\r\n" +
"\r\n" +
"abcdef",
WantProxy: "POST http://www.google.com/search HTTP/1.1\r\n" +
"Host: www.google.com\r\n" +
"User-Agent: Go 1.1 package http\r\n" +
"Connection: close\r\n" +
"Content-Length: 6\r\n" +
"\r\n" +
"abcdef",
},
// HTTP/1.1 POST with Content-Length in headers
{
Req: Request{
Method: "POST",
URL: mustParseURL("http://example.com/"),
Host: "example.com",
Header: Header{
"Content-Length": []string{"10"}, // ignored
},
ContentLength: 6,
},
Body: []byte("abcdef"),
WantWrite: "POST / HTTP/1.1\r\n" +
"Host: example.com\r\n" +
"User-Agent: Go 1.1 package http\r\n" +
"Content-Length: 6\r\n" +
"\r\n" +
"abcdef",
WantProxy: "POST http://example.com/ HTTP/1.1\r\n" +
"Host: example.com\r\n" +
"User-Agent: Go 1.1 package http\r\n" +
"Content-Length: 6\r\n" +
"\r\n" +
"abcdef",
},
// default to HTTP/1.1
{
Req: Request{
Method: "GET",
URL: mustParseURL("/search"),
Host: "www.google.com",
},
WantWrite: "GET /search HTTP/1.1\r\n" +
"Host: www.google.com\r\n" +
"User-Agent: Go 1.1 package http\r\n" +
"\r\n",
},
// Request with a 0 ContentLength and a 0 byte body.
{
Req: Request{
Method: "POST",
URL: mustParseURL("/"),
Host: "example.com",
ProtoMajor: 1,
ProtoMinor: 1,
ContentLength: 0, // as if unset by user
},
Body: func() io.ReadCloser { return ioutil.NopCloser(io.LimitReader(strings.NewReader("xx"), 0)) },
// RFC 2616 Section 14.13 says Content-Length should be specified
// unless body is prohibited by the request method.
// Also, nginx expects it for POST and PUT.
WantWrite: "POST / HTTP/1.1\r\n" +
"Host: example.com\r\n" +
"User-Agent: Go 1.1 package http\r\n" +
"Content-Length: 0\r\n" +
"\r\n",
WantProxy: "POST / HTTP/1.1\r\n" +
"Host: example.com\r\n" +
"User-Agent: Go 1.1 package http\r\n" +
"Content-Length: 0\r\n" +
"\r\n",
},
// Request with a 0 ContentLength and a 1 byte body.
{
Req: Request{
Method: "POST",
URL: mustParseURL("/"),
Host: "example.com",
ProtoMajor: 1,
ProtoMinor: 1,
ContentLength: 0, // as if unset by user
},
Body: func() io.ReadCloser { return ioutil.NopCloser(io.LimitReader(strings.NewReader("xx"), 1)) },
WantWrite: "POST / HTTP/1.1\r\n" +
"Host: example.com\r\n" +
"User-Agent: Go 1.1 package http\r\n" +
"Transfer-Encoding: chunked\r\n\r\n" +
chunk("x") + chunk(""),
WantProxy: "POST / HTTP/1.1\r\n" +
"Host: example.com\r\n" +
"User-Agent: Go 1.1 package http\r\n" +
"Transfer-Encoding: chunked\r\n\r\n" +
chunk("x") + chunk(""),
},
// Request with a ContentLength of 10 but a 5 byte body.
{
Req: Request{
Method: "POST",
URL: mustParseURL("/"),
Host: "example.com",
ProtoMajor: 1,
ProtoMinor: 1,
ContentLength: 10, // but we're going to send only 5 bytes
},
Body: []byte("12345"),
WantError: errors.New("http: Request.ContentLength=10 with Body length 5"),
},
// Request with a ContentLength of 4 but an 8 byte body.
{
Req: Request{
Method: "POST",
URL: mustParseURL("/"),
Host: "example.com",
ProtoMajor: 1,
ProtoMinor: 1,
ContentLength: 4, // but we're going to try to send 8 bytes
},
Body: []byte("12345678"),
WantError: errors.New("http: Request.ContentLength=4 with Body length 8"),
},
// Request with a 5 ContentLength and nil body.
{
Req: Request{
Method: "POST",
URL: mustParseURL("/"),
Host: "example.com",
ProtoMajor: 1,
ProtoMinor: 1,
ContentLength: 5, // but we'll omit the body
},
WantError: errors.New("http: Request.ContentLength=5 with nil Body"),
},
// Request with a 0 ContentLength and a body with 1 byte content and an error.
{
Req: Request{
Method: "POST",
URL: mustParseURL("/"),
Host: "example.com",
ProtoMajor: 1,
ProtoMinor: 1,
ContentLength: 0, // as if unset by user
},
Body: func() io.ReadCloser {
err := errors.New("Custom reader error")
errReader := &errorReader{err}
return ioutil.NopCloser(io.MultiReader(strings.NewReader("x"), errReader))
},
WantError: errors.New("Custom reader error"),
},
// Request with a 0 ContentLength and a body without content and an error.
{
Req: Request{
Method: "POST",
URL: mustParseURL("/"),
Host: "example.com",
ProtoMajor: 1,
ProtoMinor: 1,
ContentLength: 0, // as if unset by user
},
Body: func() io.ReadCloser {
err := errors.New("Custom reader error")
errReader := &errorReader{err}
return ioutil.NopCloser(errReader)
},
WantError: errors.New("Custom reader error"),
},
// Verify that DumpRequest preserves the HTTP version number, doesn't add a Host,
// and doesn't add a User-Agent.
{
Req: Request{
Method: "GET",
URL: mustParseURL("/foo"),
ProtoMajor: 1,
ProtoMinor: 0,
Header: Header{
"X-Foo": []string{"X-Bar"},
},
},
WantWrite: "GET /foo HTTP/1.1\r\n" +
"Host: \r\n" +
"User-Agent: Go 1.1 package http\r\n" +
"X-Foo: X-Bar\r\n\r\n",
},
// If no Request.Host and no Request.URL.Host, we send
// an empty Host header, and don't use
// Request.Header["Host"]. This is just testing that
// we don't change Go 1.0 behavior.
{
Req: Request{
Method: "GET",
Host: "",
URL: &url.URL{
Scheme: "http",
Host: "",
Path: "/search",
},
ProtoMajor: 1,
ProtoMinor: 1,
Header: Header{
"Host": []string{"bad.example.com"},
},
},
WantWrite: "GET /search HTTP/1.1\r\n" +
"Host: \r\n" +
"User-Agent: Go 1.1 package http\r\n\r\n",
},
// Opaque test #1 from golang.org/issue/4860
{
Req: Request{
Method: "GET",
URL: &url.URL{
Scheme: "http",
Host: "www.google.com",
Opaque: "/%2F/%2F/",
},
ProtoMajor: 1,
ProtoMinor: 1,
Header: Header{},
},
WantWrite: "GET /%2F/%2F/ HTTP/1.1\r\n" +
"Host: www.google.com\r\n" +
"User-Agent: Go 1.1 package http\r\n\r\n",
},
// Opaque test #2 from golang.org/issue/4860
{
Req: Request{
Method: "GET",
URL: &url.URL{
Scheme: "http",
Host: "x.google.com",
Opaque: "//y.google.com/%2F/%2F/",
},
ProtoMajor: 1,
ProtoMinor: 1,
Header: Header{},
},
WantWrite: "GET http://y.google.com/%2F/%2F/ HTTP/1.1\r\n" +
"Host: x.google.com\r\n" +
"User-Agent: Go 1.1 package http\r\n\r\n",
},
// Testing custom case in header keys. Issue 5022.
{
Req: Request{
Method: "GET",
URL: &url.URL{
Scheme: "http",
Host: "www.google.com",
Path: "/",
},
Proto: "HTTP/1.1",
ProtoMajor: 1,
ProtoMinor: 1,
Header: Header{
"ALL-CAPS": {"x"},
},
},
WantWrite: "GET / HTTP/1.1\r\n" +
"Host: www.google.com\r\n" +
"User-Agent: Go 1.1 package http\r\n" +
"ALL-CAPS: x\r\n" +
"\r\n",
},
}
func TestRequestWrite(t *testing.T) {
for i := range reqWriteTests {
tt := &reqWriteTests[i]
setBody := func() {
if tt.Body == nil {
return
}
switch b := tt.Body.(type) {
case []byte:
tt.Req.Body = ioutil.NopCloser(bytes.NewReader(b))
case func() io.ReadCloser:
tt.Req.Body = b()
}
}
setBody()
if tt.Req.Header == nil {
tt.Req.Header = make(Header)
}
var braw bytes.Buffer
err := tt.Req.Write(&braw)
if g, e := fmt.Sprintf("%v", err), fmt.Sprintf("%v", tt.WantError); g != e {
t.Errorf("writing #%d, err = %q, want %q", i, g, e)
continue
}
if err != nil {
continue
}
if tt.WantWrite != "" {
sraw := braw.String()
if sraw != tt.WantWrite {
t.Errorf("Test %d, expecting:\n%s\nGot:\n%s\n", i, tt.WantWrite, sraw)
continue
}
}
if tt.WantProxy != "" {
setBody()
var praw bytes.Buffer
err = tt.Req.WriteProxy(&praw)
if err != nil {
t.Errorf("WriteProxy #%d: %s", i, err)
continue
}
sraw := praw.String()
if sraw != tt.WantProxy {
t.Errorf("Test Proxy %d, expecting:\n%s\nGot:\n%s\n", i, tt.WantProxy, sraw)
continue
}
}
}
}
type closeChecker struct {
io.Reader
closed bool
}
func (rc *closeChecker) Close() error {
rc.closed = true
return nil
}
// TestRequestWriteClosesBody tests that Request.Write does close its request.Body.
// It also indirectly tests NewRequest and that it doesn't wrap an existing Closer
// inside a NopCloser, and that it serializes it correctly.
func TestRequestWriteClosesBody(t *testing.T) {
rc := &closeChecker{Reader: strings.NewReader("my body")}
req, _ := NewRequest("POST", "http://foo.com/", rc)
if req.ContentLength != 0 {
t.Errorf("got req.ContentLength %d, want 0", req.ContentLength)
}
buf := new(bytes.Buffer)
req.Write(buf)
if !rc.closed {
t.Error("body not closed after write")
}
expected := "POST / HTTP/1.1\r\n" +
"Host: foo.com\r\n" +
"User-Agent: Go 1.1 package http\r\n" +
"Transfer-Encoding: chunked\r\n\r\n" +
// TODO: currently we don't buffer before chunking, so we get a
// single "m" chunk before the other chunks, as this was the 1-byte
// read from our MultiReader where we stiched the Body back together
// after sniffing whether the Body was 0 bytes or not.
chunk("m") +
chunk("y body") +
chunk("")
if buf.String() != expected {
t.Errorf("write:\n got: %s\nwant: %s", buf.String(), expected)
}
}
func chunk(s string) string {
return fmt.Sprintf("%x\r\n%s\r\n", len(s), s)
}
func mustParseURL(s string) *url.URL {
u, err := url.Parse(s)
if err != nil {
panic(fmt.Sprintf("Error parsing URL %q: %v", s, err))
}
return u
}

View File

@ -1,645 +0,0 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http
import (
"bufio"
"bytes"
"compress/gzip"
"crypto/rand"
"fmt"
"io"
"io/ioutil"
"net/url"
"reflect"
"regexp"
"strings"
"testing"
)
type respTest struct {
Raw string
Resp Response
Body string
}
func dummyReq(method string) *Request {
return &Request{Method: method}
}
func dummyReq11(method string) *Request {
return &Request{Method: method, Proto: "HTTP/1.1", ProtoMajor: 1, ProtoMinor: 1}
}
var respTests = []respTest{
// Unchunked response without Content-Length.
{
"HTTP/1.0 200 OK\r\n" +
"Connection: close\r\n" +
"\r\n" +
"Body here\n",
Response{
Status: "200 OK",
StatusCode: 200,
Proto: "HTTP/1.0",
ProtoMajor: 1,
ProtoMinor: 0,
Request: dummyReq("GET"),
Header: Header{
"Connection": {"close"}, // TODO(rsc): Delete?
},
Close: true,
ContentLength: -1,
},
"Body here\n",
},
// Unchunked HTTP/1.1 response without Content-Length or
// Connection headers.
{
"HTTP/1.1 200 OK\r\n" +
"\r\n" +
"Body here\n",
Response{
Status: "200 OK",
StatusCode: 200,
Proto: "HTTP/1.1",
ProtoMajor: 1,
ProtoMinor: 1,
Header: Header{},
Request: dummyReq("GET"),
Close: true,
ContentLength: -1,
},
"Body here\n",
},
// Unchunked HTTP/1.1 204 response without Content-Length.
{
"HTTP/1.1 204 No Content\r\n" +
"\r\n" +
"Body should not be read!\n",
Response{
Status: "204 No Content",
StatusCode: 204,
Proto: "HTTP/1.1",
ProtoMajor: 1,
ProtoMinor: 1,
Header: Header{},
Request: dummyReq("GET"),
Close: false,
ContentLength: 0,
},
"",
},
// Unchunked response with Content-Length.
{
"HTTP/1.0 200 OK\r\n" +
"Content-Length: 10\r\n" +
"Connection: close\r\n" +
"\r\n" +
"Body here\n",
Response{
Status: "200 OK",
StatusCode: 200,
Proto: "HTTP/1.0",
ProtoMajor: 1,
ProtoMinor: 0,
Request: dummyReq("GET"),
Header: Header{
"Connection": {"close"},
"Content-Length": {"10"},
},
Close: true,
ContentLength: 10,
},
"Body here\n",
},
// Chunked response without Content-Length.
{
"HTTP/1.1 200 OK\r\n" +
"Transfer-Encoding: chunked\r\n" +
"\r\n" +
"0a\r\n" +
"Body here\n\r\n" +
"09\r\n" +
"continued\r\n" +
"0\r\n" +
"\r\n",
Response{
Status: "200 OK",
StatusCode: 200,
Proto: "HTTP/1.1",
ProtoMajor: 1,
ProtoMinor: 1,
Request: dummyReq("GET"),
Header: Header{},
Close: false,
ContentLength: -1,
TransferEncoding: []string{"chunked"},
},
"Body here\ncontinued",
},
// Chunked response with Content-Length.
{
"HTTP/1.1 200 OK\r\n" +
"Transfer-Encoding: chunked\r\n" +
"Content-Length: 10\r\n" +
"\r\n" +
"0a\r\n" +
"Body here\n\r\n" +
"0\r\n" +
"\r\n",
Response{
Status: "200 OK",
StatusCode: 200,
Proto: "HTTP/1.1",
ProtoMajor: 1,
ProtoMinor: 1,
Request: dummyReq("GET"),
Header: Header{},
Close: false,
ContentLength: -1,
TransferEncoding: []string{"chunked"},
},
"Body here\n",
},
// Chunked response in response to a HEAD request
{
"HTTP/1.1 200 OK\r\n" +
"Transfer-Encoding: chunked\r\n" +
"\r\n",
Response{
Status: "200 OK",
StatusCode: 200,
Proto: "HTTP/1.1",
ProtoMajor: 1,
ProtoMinor: 1,
Request: dummyReq("HEAD"),
Header: Header{},
TransferEncoding: []string{"chunked"},
Close: false,
ContentLength: -1,
},
"",
},
// Content-Length in response to a HEAD request
{
"HTTP/1.0 200 OK\r\n" +
"Content-Length: 256\r\n" +
"\r\n",
Response{
Status: "200 OK",
StatusCode: 200,
Proto: "HTTP/1.0",
ProtoMajor: 1,
ProtoMinor: 0,
Request: dummyReq("HEAD"),
Header: Header{"Content-Length": {"256"}},
TransferEncoding: nil,
Close: true,
ContentLength: 256,
},
"",
},
// Content-Length in response to a HEAD request with HTTP/1.1
{
"HTTP/1.1 200 OK\r\n" +
"Content-Length: 256\r\n" +
"\r\n",
Response{
Status: "200 OK",
StatusCode: 200,
Proto: "HTTP/1.1",
ProtoMajor: 1,
ProtoMinor: 1,
Request: dummyReq("HEAD"),
Header: Header{"Content-Length": {"256"}},
TransferEncoding: nil,
Close: false,
ContentLength: 256,
},
"",
},
// No Content-Length or Chunked in response to a HEAD request
{
"HTTP/1.0 200 OK\r\n" +
"\r\n",
Response{
Status: "200 OK",
StatusCode: 200,
Proto: "HTTP/1.0",
ProtoMajor: 1,
ProtoMinor: 0,
Request: dummyReq("HEAD"),
Header: Header{},
TransferEncoding: nil,
Close: true,
ContentLength: -1,
},
"",
},
// explicit Content-Length of 0.
{
"HTTP/1.1 200 OK\r\n" +
"Content-Length: 0\r\n" +
"\r\n",
Response{
Status: "200 OK",
StatusCode: 200,
Proto: "HTTP/1.1",
ProtoMajor: 1,
ProtoMinor: 1,
Request: dummyReq("GET"),
Header: Header{
"Content-Length": {"0"},
},
Close: false,
ContentLength: 0,
},
"",
},
// Status line without a Reason-Phrase, but trailing space.
// (permitted by RFC 2616)
{
"HTTP/1.0 303 \r\n\r\n",
Response{
Status: "303 ",
StatusCode: 303,
Proto: "HTTP/1.0",
ProtoMajor: 1,
ProtoMinor: 0,
Request: dummyReq("GET"),
Header: Header{},
Close: true,
ContentLength: -1,
},
"",
},
// Status line without a Reason-Phrase, and no trailing space.
// (not permitted by RFC 2616, but we'll accept it anyway)
{
"HTTP/1.0 303\r\n\r\n",
Response{
Status: "303 ",
StatusCode: 303,
Proto: "HTTP/1.0",
ProtoMajor: 1,
ProtoMinor: 0,
Request: dummyReq("GET"),
Header: Header{},
Close: true,
ContentLength: -1,
},
"",
},
// golang.org/issue/4767: don't special-case multipart/byteranges responses
{
`HTTP/1.1 206 Partial Content
Connection: close
Content-Type: multipart/byteranges; boundary=18a75608c8f47cef
some body`,
Response{
Status: "206 Partial Content",
StatusCode: 206,
Proto: "HTTP/1.1",
ProtoMajor: 1,
ProtoMinor: 1,
Request: dummyReq("GET"),
Header: Header{
"Content-Type": []string{"multipart/byteranges; boundary=18a75608c8f47cef"},
},
Close: true,
ContentLength: -1,
},
"some body",
},
// Unchunked response without Content-Length, Request is nil
{
"HTTP/1.0 200 OK\r\n" +
"Connection: close\r\n" +
"\r\n" +
"Body here\n",
Response{
Status: "200 OK",
StatusCode: 200,
Proto: "HTTP/1.0",
ProtoMajor: 1,
ProtoMinor: 0,
Header: Header{
"Connection": {"close"}, // TODO(rsc): Delete?
},
Close: true,
ContentLength: -1,
},
"Body here\n",
},
}
func TestReadResponse(t *testing.T) {
for i, tt := range respTests {
resp, err := ReadResponse(bufio.NewReader(strings.NewReader(tt.Raw)), tt.Resp.Request)
if err != nil {
t.Errorf("#%d: %v", i, err)
continue
}
rbody := resp.Body
resp.Body = nil
diff(t, fmt.Sprintf("#%d Response", i), resp, &tt.Resp)
var bout bytes.Buffer
if rbody != nil {
_, err = io.Copy(&bout, rbody)
if err != nil {
t.Errorf("#%d: %v", i, err)
continue
}
rbody.Close()
}
body := bout.String()
if body != tt.Body {
t.Errorf("#%d: Body = %q want %q", i, body, tt.Body)
}
}
}
func TestWriteResponse(t *testing.T) {
for i, tt := range respTests {
resp, err := ReadResponse(bufio.NewReader(strings.NewReader(tt.Raw)), tt.Resp.Request)
if err != nil {
t.Errorf("#%d: %v", i, err)
continue
}
err = resp.Write(ioutil.Discard)
if err != nil {
t.Errorf("#%d: %v", i, err)
continue
}
}
}
var readResponseCloseInMiddleTests = []struct {
chunked, compressed bool
}{
{false, false},
{true, false},
{true, true},
}
// TestReadResponseCloseInMiddle tests that closing a body after
// reading only part of its contents advances the read to the end of
// the request, right up until the next request.
func TestReadResponseCloseInMiddle(t *testing.T) {
for _, test := range readResponseCloseInMiddleTests {
fatalf := func(format string, args ...interface{}) {
args = append([]interface{}{test.chunked, test.compressed}, args...)
t.Fatalf("on test chunked=%v, compressed=%v: "+format, args...)
}
checkErr := func(err error, msg string) {
if err == nil {
return
}
fatalf(msg+": %v", err)
}
var buf bytes.Buffer
buf.WriteString("HTTP/1.1 200 OK\r\n")
if test.chunked {
buf.WriteString("Transfer-Encoding: chunked\r\n")
} else {
buf.WriteString("Content-Length: 1000000\r\n")
}
var wr io.Writer = &buf
if test.chunked {
wr = newChunkedWriter(wr)
}
if test.compressed {
buf.WriteString("Content-Encoding: gzip\r\n")
wr = gzip.NewWriter(wr)
}
buf.WriteString("\r\n")
chunk := bytes.Repeat([]byte{'x'}, 1000)
for i := 0; i < 1000; i++ {
if test.compressed {
// Otherwise this compresses too well.
_, err := io.ReadFull(rand.Reader, chunk)
checkErr(err, "rand.Reader ReadFull")
}
wr.Write(chunk)
}
if test.compressed {
err := wr.(*gzip.Writer).Close()
checkErr(err, "compressor close")
}
if test.chunked {
buf.WriteString("0\r\n\r\n")
}
buf.WriteString("Next Request Here")
bufr := bufio.NewReader(&buf)
resp, err := ReadResponse(bufr, dummyReq("GET"))
checkErr(err, "ReadResponse")
expectedLength := int64(-1)
if !test.chunked {
expectedLength = 1000000
}
if resp.ContentLength != expectedLength {
fatalf("expected response length %d, got %d", expectedLength, resp.ContentLength)
}
if resp.Body == nil {
fatalf("nil body")
}
if test.compressed {
gzReader, err := gzip.NewReader(resp.Body)
checkErr(err, "gzip.NewReader")
resp.Body = &readerAndCloser{gzReader, resp.Body}
}
rbuf := make([]byte, 2500)
n, err := io.ReadFull(resp.Body, rbuf)
checkErr(err, "2500 byte ReadFull")
if n != 2500 {
fatalf("ReadFull only read %d bytes", n)
}
if test.compressed == false && !bytes.Equal(bytes.Repeat([]byte{'x'}, 2500), rbuf) {
fatalf("ReadFull didn't read 2500 'x'; got %q", string(rbuf))
}
resp.Body.Close()
rest, err := ioutil.ReadAll(bufr)
checkErr(err, "ReadAll on remainder")
if e, g := "Next Request Here", string(rest); e != g {
g = regexp.MustCompile(`(xx+)`).ReplaceAllStringFunc(g, func(match string) string {
return fmt.Sprintf("x(repeated x%d)", len(match))
})
fatalf("remainder = %q, expected %q", g, e)
}
}
}
func diff(t *testing.T, prefix string, have, want interface{}) {
hv := reflect.ValueOf(have).Elem()
wv := reflect.ValueOf(want).Elem()
if hv.Type() != wv.Type() {
t.Errorf("%s: type mismatch %v want %v", prefix, hv.Type(), wv.Type())
}
for i := 0; i < hv.NumField(); i++ {
hf := hv.Field(i).Interface()
wf := wv.Field(i).Interface()
if !reflect.DeepEqual(hf, wf) {
t.Errorf("%s: %s = %v want %v", prefix, hv.Type().Field(i).Name, hf, wf)
}
}
}
type responseLocationTest struct {
location string // Response's Location header or ""
requrl string // Response.Request.URL or ""
want string
wantErr error
}
var responseLocationTests = []responseLocationTest{
{"/foo", "http://bar.com/baz", "http://bar.com/foo", nil},
{"http://foo.com/", "http://bar.com/baz", "http://foo.com/", nil},
{"", "http://bar.com/baz", "", ErrNoLocation},
}
func TestLocationResponse(t *testing.T) {
for i, tt := range responseLocationTests {
res := new(Response)
res.Header = make(Header)
res.Header.Set("Location", tt.location)
if tt.requrl != "" {
res.Request = &Request{}
var err error
res.Request.URL, err = url.Parse(tt.requrl)
if err != nil {
t.Fatalf("bad test URL %q: %v", tt.requrl, err)
}
}
got, err := res.Location()
if tt.wantErr != nil {
if err == nil {
t.Errorf("%d. err=nil; want %q", i, tt.wantErr)
continue
}
if g, e := err.Error(), tt.wantErr.Error(); g != e {
t.Errorf("%d. err=%q; want %q", i, g, e)
continue
}
continue
}
if err != nil {
t.Errorf("%d. err=%q", i, err)
continue
}
if g, e := got.String(), tt.want; g != e {
t.Errorf("%d. Location=%q; want %q", i, g, e)
}
}
}
func TestResponseStatusStutter(t *testing.T) {
r := &Response{
Status: "123 some status",
StatusCode: 123,
ProtoMajor: 1,
ProtoMinor: 3,
}
var buf bytes.Buffer
r.Write(&buf)
if strings.Contains(buf.String(), "123 123") {
t.Errorf("stutter in status: %s", buf.String())
}
}
func TestResponseContentLengthShortBody(t *testing.T) {
const shortBody = "Short body, not 123 bytes."
br := bufio.NewReader(strings.NewReader("HTTP/1.1 200 OK\r\n" +
"Content-Length: 123\r\n" +
"\r\n" +
shortBody))
res, err := ReadResponse(br, &Request{Method: "GET"})
if err != nil {
t.Fatal(err)
}
if res.ContentLength != 123 {
t.Fatalf("Content-Length = %d; want 123", res.ContentLength)
}
var buf bytes.Buffer
n, err := io.Copy(&buf, res.Body)
if n != int64(len(shortBody)) {
t.Errorf("Copied %d bytes; want %d, len(%q)", n, len(shortBody), shortBody)
}
if buf.String() != shortBody {
t.Errorf("Read body %q; want %q", buf.String(), shortBody)
}
if err != io.ErrUnexpectedEOF {
t.Errorf("io.Copy error = %#v; want io.ErrUnexpectedEOF", err)
}
}
func TestReadResponseUnexpectedEOF(t *testing.T) {
br := bufio.NewReader(strings.NewReader("HTTP/1.1 301 Moved Permanently\r\n" +
"Location: http://example.com"))
_, err := ReadResponse(br, nil)
if err != io.ErrUnexpectedEOF {
t.Errorf("ReadResponse = %v; want io.ErrUnexpectedEOF", err)
}
}
func TestNeedsSniff(t *testing.T) {
// needsSniff returns true with an empty response.
r := &response{}
if got, want := r.needsSniff(), true; got != want {
t.Errorf("needsSniff = %t; want %t", got, want)
}
// needsSniff returns false when Content-Type = nil.
r.handlerHeader = Header{"Content-Type": nil}
if got, want := r.needsSniff(), false; got != want {
t.Errorf("needsSniff empty Content-Type = %t; want %t", got, want)
}
}

View File

@ -1,226 +0,0 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http
import (
"bytes"
"io/ioutil"
"strings"
"testing"
)
type respWriteTest struct {
Resp Response
Raw string
}
func TestResponseWrite(t *testing.T) {
respWriteTests := []respWriteTest{
// HTTP/1.0, identity coding; no trailer
{
Response{
StatusCode: 503,
ProtoMajor: 1,
ProtoMinor: 0,
Request: dummyReq("GET"),
Header: Header{},
Body: ioutil.NopCloser(strings.NewReader("abcdef")),
ContentLength: 6,
},
"HTTP/1.0 503 Service Unavailable\r\n" +
"Content-Length: 6\r\n\r\n" +
"abcdef",
},
// Unchunked response without Content-Length.
{
Response{
StatusCode: 200,
ProtoMajor: 1,
ProtoMinor: 0,
Request: dummyReq("GET"),
Header: Header{},
Body: ioutil.NopCloser(strings.NewReader("abcdef")),
ContentLength: -1,
},
"HTTP/1.0 200 OK\r\n" +
"\r\n" +
"abcdef",
},
// HTTP/1.1 response with unknown length and Connection: close
{
Response{
StatusCode: 200,
ProtoMajor: 1,
ProtoMinor: 1,
Request: dummyReq("GET"),
Header: Header{},
Body: ioutil.NopCloser(strings.NewReader("abcdef")),
ContentLength: -1,
Close: true,
},
"HTTP/1.1 200 OK\r\n" +
"Connection: close\r\n" +
"\r\n" +
"abcdef",
},
// HTTP/1.1 response with unknown length and not setting connection: close
{
Response{
StatusCode: 200,
ProtoMajor: 1,
ProtoMinor: 1,
Request: dummyReq11("GET"),
Header: Header{},
Body: ioutil.NopCloser(strings.NewReader("abcdef")),
ContentLength: -1,
Close: false,
},
"HTTP/1.1 200 OK\r\n" +
"Connection: close\r\n" +
"\r\n" +
"abcdef",
},
// HTTP/1.1 response with unknown length and not setting connection: close, but
// setting chunked.
{
Response{
StatusCode: 200,
ProtoMajor: 1,
ProtoMinor: 1,
Request: dummyReq11("GET"),
Header: Header{},
Body: ioutil.NopCloser(strings.NewReader("abcdef")),
ContentLength: -1,
TransferEncoding: []string{"chunked"},
Close: false,
},
"HTTP/1.1 200 OK\r\n" +
"Transfer-Encoding: chunked\r\n\r\n" +
"6\r\nabcdef\r\n0\r\n\r\n",
},
// HTTP/1.1 response 0 content-length, and nil body
{
Response{
StatusCode: 200,
ProtoMajor: 1,
ProtoMinor: 1,
Request: dummyReq11("GET"),
Header: Header{},
Body: nil,
ContentLength: 0,
Close: false,
},
"HTTP/1.1 200 OK\r\n" +
"Content-Length: 0\r\n" +
"\r\n",
},
// HTTP/1.1 response 0 content-length, and non-nil empty body
{
Response{
StatusCode: 200,
ProtoMajor: 1,
ProtoMinor: 1,
Request: dummyReq11("GET"),
Header: Header{},
Body: ioutil.NopCloser(strings.NewReader("")),
ContentLength: 0,
Close: false,
},
"HTTP/1.1 200 OK\r\n" +
"Content-Length: 0\r\n" +
"\r\n",
},
// HTTP/1.1 response 0 content-length, and non-nil non-empty body
{
Response{
StatusCode: 200,
ProtoMajor: 1,
ProtoMinor: 1,
Request: dummyReq11("GET"),
Header: Header{},
Body: ioutil.NopCloser(strings.NewReader("foo")),
ContentLength: 0,
Close: false,
},
"HTTP/1.1 200 OK\r\n" +
"Connection: close\r\n" +
"\r\nfoo",
},
// HTTP/1.1, chunked coding; empty trailer; close
{
Response{
StatusCode: 200,
ProtoMajor: 1,
ProtoMinor: 1,
Request: dummyReq("GET"),
Header: Header{},
Body: ioutil.NopCloser(strings.NewReader("abcdef")),
ContentLength: 6,
TransferEncoding: []string{"chunked"},
Close: true,
},
"HTTP/1.1 200 OK\r\n" +
"Connection: close\r\n" +
"Transfer-Encoding: chunked\r\n\r\n" +
"6\r\nabcdef\r\n0\r\n\r\n",
},
// Header value with a newline character (Issue 914).
// Also tests removal of leading and trailing whitespace.
{
Response{
StatusCode: 204,
ProtoMajor: 1,
ProtoMinor: 1,
Request: dummyReq("GET"),
Header: Header{
"Foo": []string{" Bar\nBaz "},
},
Body: nil,
ContentLength: 0,
TransferEncoding: []string{"chunked"},
Close: true,
},
"HTTP/1.1 204 No Content\r\n" +
"Connection: close\r\n" +
"Foo: Bar Baz\r\n" +
"\r\n",
},
// Want a single Content-Length header. Fixing issue 8180 where
// there were two.
{
Response{
StatusCode: StatusOK,
ProtoMajor: 1,
ProtoMinor: 1,
Request: &Request{Method: "POST"},
Header: Header{},
ContentLength: 0,
TransferEncoding: nil,
Body: nil,
},
"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n",
},
}
for i := range respWriteTests {
tt := &respWriteTests[i]
var braw bytes.Buffer
err := tt.Resp.Write(&braw)
if err != nil {
t.Errorf("error writing #%d: %s", i, err)
continue
}
sraw := braw.String()
if sraw != tt.Raw {
t.Errorf("Test %d, expecting:\n%q\nGot:\n%q\n", i, tt.Raw, sraw)
continue
}
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,171 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http_test
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"log"
. "net/http"
"net/http/httptest"
"reflect"
"strconv"
"strings"
"testing"
)
var sniffTests = []struct {
desc string
data []byte
contentType string
}{
// Some nonsense.
{"Empty", []byte{}, "text/plain; charset=utf-8"},
{"Binary", []byte{1, 2, 3}, "application/octet-stream"},
{"HTML document #1", []byte(`<HtMl><bOdY>blah blah blah</body></html>`), "text/html; charset=utf-8"},
{"HTML document #2", []byte(`<HTML></HTML>`), "text/html; charset=utf-8"},
{"HTML document #3 (leading whitespace)", []byte(` <!DOCTYPE HTML>...`), "text/html; charset=utf-8"},
{"HTML document #4 (leading CRLF)", []byte("\r\n<html>..."), "text/html; charset=utf-8"},
{"Plain text", []byte(`This is not HTML. It has ☃ though.`), "text/plain; charset=utf-8"},
{"XML", []byte("\n<?xml!"), "text/xml; charset=utf-8"},
// Image types.
{"GIF 87a", []byte(`GIF87a`), "image/gif"},
{"GIF 89a", []byte(`GIF89a...`), "image/gif"},
// TODO(dsymonds): Re-enable this when the spec is sorted w.r.t. MP4.
//{"MP4 video", []byte("\x00\x00\x00\x18ftypmp42\x00\x00\x00\x00mp42isom<\x06t\xbfmdat"), "video/mp4"},
//{"MP4 audio", []byte("\x00\x00\x00\x20ftypM4A \x00\x00\x00\x00M4A mp42isom\x00\x00\x00\x00"), "audio/mp4"},
}
func TestDetectContentType(t *testing.T) {
for _, tt := range sniffTests {
ct := DetectContentType(tt.data)
if ct != tt.contentType {
t.Errorf("%v: DetectContentType = %q, want %q", tt.desc, ct, tt.contentType)
}
}
}
func TestServerContentType(t *testing.T) {
defer afterTest(t)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
i, _ := strconv.Atoi(r.FormValue("i"))
tt := sniffTests[i]
n, err := w.Write(tt.data)
if n != len(tt.data) || err != nil {
log.Fatalf("%v: Write(%q) = %v, %v want %d, nil", tt.desc, tt.data, n, err, len(tt.data))
}
}))
defer ts.Close()
for i, tt := range sniffTests {
resp, err := Get(ts.URL + "/?i=" + strconv.Itoa(i))
if err != nil {
t.Errorf("%v: %v", tt.desc, err)
continue
}
if ct := resp.Header.Get("Content-Type"); ct != tt.contentType {
t.Errorf("%v: Content-Type = %q, want %q", tt.desc, ct, tt.contentType)
}
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
t.Errorf("%v: reading body: %v", tt.desc, err)
} else if !bytes.Equal(data, tt.data) {
t.Errorf("%v: data is %q, want %q", tt.desc, data, tt.data)
}
resp.Body.Close()
}
}
// Issue 5953: shouldn't sniff if the handler set a Content-Type header,
// even if it's the empty string.
func TestServerIssue5953(t *testing.T) {
defer afterTest(t)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
w.Header()["Content-Type"] = []string{""}
fmt.Fprintf(w, "<html><head></head><body>hi</body></html>")
}))
defer ts.Close()
resp, err := Get(ts.URL)
if err != nil {
t.Fatal(err)
}
got := resp.Header["Content-Type"]
want := []string{""}
if !reflect.DeepEqual(got, want) {
t.Errorf("Content-Type = %q; want %q", got, want)
}
resp.Body.Close()
}
func TestContentTypeWithCopy(t *testing.T) {
defer afterTest(t)
const (
input = "\n<html>\n\t<head>\n"
expected = "text/html; charset=utf-8"
)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
// Use io.Copy from a bytes.Buffer to trigger ReadFrom.
buf := bytes.NewBuffer([]byte(input))
n, err := io.Copy(w, buf)
if int(n) != len(input) || err != nil {
t.Errorf("io.Copy(w, %q) = %v, %v want %d, nil", input, n, err, len(input))
}
}))
defer ts.Close()
resp, err := Get(ts.URL)
if err != nil {
t.Fatalf("Get: %v", err)
}
if ct := resp.Header.Get("Content-Type"); ct != expected {
t.Errorf("Content-Type = %q, want %q", ct, expected)
}
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
t.Errorf("reading body: %v", err)
} else if !bytes.Equal(data, []byte(input)) {
t.Errorf("data is %q, want %q", data, input)
}
resp.Body.Close()
}
func TestSniffWriteSize(t *testing.T) {
defer afterTest(t)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
size, _ := strconv.Atoi(r.FormValue("size"))
written, err := io.WriteString(w, strings.Repeat("a", size))
if err != nil {
t.Errorf("write of %d bytes: %v", size, err)
return
}
if written != size {
t.Errorf("write of %d bytes wrote %d bytes", size, written)
}
}))
defer ts.Close()
for _, size := range []int{0, 1, 200, 600, 999, 1000, 1023, 1024, 512 << 10, 1 << 20} {
res, err := Get(fmt.Sprintf("%s/?size=%d", ts.URL, size))
if err != nil {
t.Fatalf("size %d: %v", size, err)
}
if _, err := io.Copy(ioutil.Discard, res.Body); err != nil {
t.Fatalf("size %d: io.Copy of body = %v", size, err)
}
if err := res.Body.Close(); err != nil {
t.Fatalf("size %d: body Close = %v", size, err)
}
}
}

View File

@ -1,64 +0,0 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http
import (
"bufio"
"io"
"strings"
"testing"
)
func TestBodyReadBadTrailer(t *testing.T) {
b := &body{
src: strings.NewReader("foobar"),
hdr: true, // force reading the trailer
r: bufio.NewReader(strings.NewReader("")),
}
buf := make([]byte, 7)
n, err := b.Read(buf[:3])
got := string(buf[:n])
if got != "foo" || err != nil {
t.Fatalf(`first Read = %d (%q), %v; want 3 ("foo")`, n, got, err)
}
n, err = b.Read(buf[:])
got = string(buf[:n])
if got != "bar" || err != nil {
t.Fatalf(`second Read = %d (%q), %v; want 3 ("bar")`, n, got, err)
}
n, err = b.Read(buf[:])
got = string(buf[:n])
if err == nil {
t.Errorf("final Read was successful (%q), expected error from trailer read", got)
}
}
func TestFinalChunkedBodyReadEOF(t *testing.T) {
res, err := ReadResponse(bufio.NewReader(strings.NewReader(
"HTTP/1.1 200 OK\r\n"+
"Transfer-Encoding: chunked\r\n"+
"\r\n"+
"0a\r\n"+
"Body here\n\r\n"+
"09\r\n"+
"continued\r\n"+
"0\r\n"+
"\r\n")), nil)
if err != nil {
t.Fatal(err)
}
want := "Body here\ncontinued"
buf := make([]byte, len(want))
n, err := res.Body.Read(buf)
if n != len(want) || err != io.EOF {
t.Logf("body = %#v", res.Body)
t.Errorf("Read = %v, %v; want %d, EOF", n, err, len(want))
}
if string(buf) != want {
t.Errorf("buf = %q; want %q", buf, want)
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,97 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http_test
import (
"net/http"
"runtime"
"sort"
"strings"
"testing"
"time"
)
func interestingGoroutines() (gs []string) {
buf := make([]byte, 2<<20)
buf = buf[:runtime.Stack(buf, true)]
for _, g := range strings.Split(string(buf), "\n\n") {
sl := strings.SplitN(g, "\n", 2)
if len(sl) != 2 {
continue
}
stack := strings.TrimSpace(sl[1])
if stack == "" ||
strings.Contains(stack, "created by net.startServer") ||
strings.Contains(stack, "created by testing.RunTests") ||
strings.Contains(stack, "closeWriteAndWait") ||
strings.Contains(stack, "testing.Main(") ||
// These only show up with GOTRACEBACK=2; Issue 5005 (comment 28)
strings.Contains(stack, "runtime.goexit") ||
strings.Contains(stack, "created by runtime.gc") ||
strings.Contains(stack, "runtime.MHeap_Scavenger") {
continue
}
gs = append(gs, stack)
}
sort.Strings(gs)
return
}
// Verify the other tests didn't leave any goroutines running.
// This is in a file named z_last_test.go so it sorts at the end.
func TestGoroutinesRunning(t *testing.T) {
if testing.Short() {
t.Skip("not counting goroutines for leakage in -short mode")
}
gs := interestingGoroutines()
n := 0
stackCount := make(map[string]int)
for _, g := range gs {
stackCount[g]++
n++
}
t.Logf("num goroutines = %d", n)
if n > 0 {
t.Error("Too many goroutines.")
for stack, count := range stackCount {
t.Logf("%d instances of:\n%s", count, stack)
}
}
}
func afterTest(t *testing.T) {
http.DefaultTransport.(*http.Transport).CloseIdleConnections()
if testing.Short() {
return
}
var bad string
badSubstring := map[string]string{
").readLoop(": "a Transport",
").writeLoop(": "a Transport",
"created by net/http/httptest.(*Server).Start": "an httptest.Server",
"timeoutHandler": "a TimeoutHandler",
"net.(*netFD).connect(": "a timing out dial",
").noteClientGone(": "a closenotifier sender",
}
var stacks string
for i := 0; i < 4; i++ {
bad = ""
stacks = strings.Join(interestingGoroutines(), "\n\n")
for substr, what := range badSubstring {
if strings.Contains(stacks, substr) {
bad = what
}
}
if bad == "" {
return
}
// Bad stuff found, but goroutines might just still be
// shutting down, so give it some time.
time.Sleep(250 * time.Millisecond)
}
t.Errorf("Test appears to have leaked %s:\n%s", bad, stacks)
}

View File

@ -1,106 +0,0 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package tls
import (
"testing"
)
func TestRoundUp(t *testing.T) {
if roundUp(0, 16) != 0 ||
roundUp(1, 16) != 16 ||
roundUp(15, 16) != 16 ||
roundUp(16, 16) != 16 ||
roundUp(17, 16) != 32 {
t.Error("roundUp broken")
}
}
var paddingTests = []struct {
in []byte
good bool
expectedLen int
}{
{[]byte{1, 2, 3, 4, 0}, true, 4},
{[]byte{1, 2, 3, 4, 0, 1}, false, 0},
{[]byte{1, 2, 3, 4, 99, 99}, false, 0},
{[]byte{1, 2, 3, 4, 1, 1}, true, 4},
{[]byte{1, 2, 3, 2, 2, 2}, true, 3},
{[]byte{1, 2, 3, 3, 3, 3}, true, 2},
{[]byte{1, 2, 3, 4, 3, 3}, false, 0},
{[]byte{1, 4, 4, 4, 4, 4}, true, 1},
{[]byte{5, 5, 5, 5, 5, 5}, true, 0},
{[]byte{6, 6, 6, 6, 6, 6}, false, 0},
}
func TestRemovePadding(t *testing.T) {
for i, test := range paddingTests {
payload, good := removePadding(test.in)
expectedGood := byte(255)
if !test.good {
expectedGood = 0
}
if good != expectedGood {
t.Errorf("#%d: wrong validity, want:%d got:%d", i, expectedGood, good)
}
if good == 255 && len(payload) != test.expectedLen {
t.Errorf("#%d: got %d, want %d", i, len(payload), test.expectedLen)
}
}
}
var certExampleCom = `308201403081eda003020102020101300b06092a864886f70d010105301e311c301a060355040a131354657374696e67204365727469666963617465301e170d3131313030313138353835325a170d3132303933303138353835325a301e311c301a060355040a131354657374696e67204365727469666963617465305a300b06092a864886f70d010101034b003048024100bced6e32368599eeddf18796bfd03958a154f87e5b084f96e85136a56b886733592f493f0fc68b0d6b3551781cb95e13c5de458b28d6fb60d20a9129313261410203010001a31a301830160603551d11040f300d820b6578616d706c652e636f6d300b06092a864886f70d0101050341001a0b419d2c74474c6450654e5f10b32bf426ffdf55cad1c52602e7a9151513a3424c70f5960dcd682db0c33769cc1daa3fcdd3db10809d2392ed4a1bf50ced18`
var certWildcardExampleCom = `308201423081efa003020102020101300b06092a864886f70d010105301e311c301a060355040a131354657374696e67204365727469666963617465301e170d3131313030313139303034365a170d3132303933303139303034365a301e311c301a060355040a131354657374696e67204365727469666963617465305a300b06092a864886f70d010101034b003048024100bced6e32368599eeddf18796bfd03958a154f87e5b084f96e85136a56b886733592f493f0fc68b0d6b3551781cb95e13c5de458b28d6fb60d20a9129313261410203010001a31c301a30180603551d110411300f820d2a2e6578616d706c652e636f6d300b06092a864886f70d0101050341001676f0c9e7c33c1b656ed5a6476c4e2ee9ec8e62df7407accb1875272b2edd0a22096cb2c22598d11604104d604f810eb4b5987ca6bb319c7e6ce48725c54059`
var certFooExampleCom = `308201443081f1a003020102020101300b06092a864886f70d010105301e311c301a060355040a131354657374696e67204365727469666963617465301e170d3131313030313139303131345a170d3132303933303139303131345a301e311c301a060355040a131354657374696e67204365727469666963617465305a300b06092a864886f70d010101034b003048024100bced6e32368599eeddf18796bfd03958a154f87e5b084f96e85136a56b886733592f493f0fc68b0d6b3551781cb95e13c5de458b28d6fb60d20a9129313261410203010001a31e301c301a0603551d1104133011820f666f6f2e6578616d706c652e636f6d300b06092a864886f70d010105034100646a2a51f2aa2477add854b462cf5207ba16d3213ffb5d3d0eed473fbf09935019192d1d5b8ca6a2407b424cf04d97c4cd9197c83ecf81f0eab9464a1109d09f`
var certDoubleWildcardExampleCom = `308201443081f1a003020102020101300b06092a864886f70d010105301e311c301a060355040a131354657374696e67204365727469666963617465301e170d3131313030313139303134315a170d3132303933303139303134315a301e311c301a060355040a131354657374696e67204365727469666963617465305a300b06092a864886f70d010101034b003048024100bced6e32368599eeddf18796bfd03958a154f87e5b084f96e85136a56b886733592f493f0fc68b0d6b3551781cb95e13c5de458b28d6fb60d20a9129313261410203010001a31e301c301a0603551d1104133011820f2a2e2a2e6578616d706c652e636f6d300b06092a864886f70d0101050341001c3de267975f56ef57771c6218ef95ecc65102e57bd1defe6f7efea90d9b26cf40de5bd7ad75e46201c7f2a92aaa3e907451e9409f65e28ddb6db80d726290f6`
func TestCertificateSelection(t *testing.T) {
config := Config{
Certificates: []Certificate{
{
Certificate: [][]byte{fromHex(certExampleCom)},
},
{
Certificate: [][]byte{fromHex(certWildcardExampleCom)},
},
{
Certificate: [][]byte{fromHex(certFooExampleCom)},
},
{
Certificate: [][]byte{fromHex(certDoubleWildcardExampleCom)},
},
},
}
config.BuildNameToCertificate()
pointerToIndex := func(c *Certificate) int {
for i := range config.Certificates {
if c == &config.Certificates[i] {
return i
}
}
return -1
}
if n := pointerToIndex(config.getCertificateForName("example.com")); n != 0 {
t.Errorf("example.com returned certificate %d, not 0", n)
}
if n := pointerToIndex(config.getCertificateForName("bar.example.com")); n != 1 {
t.Errorf("bar.example.com returned certificate %d, not 1", n)
}
if n := pointerToIndex(config.getCertificateForName("foo.example.com")); n != 2 {
t.Errorf("foo.example.com returned certificate %d, not 2", n)
}
if n := pointerToIndex(config.getCertificateForName("foo.bar.example.com")); n != 3 {
t.Errorf("foo.bar.example.com returned certificate %d, not 3", n)
}
if n := pointerToIndex(config.getCertificateForName("foo.bar.baz.example.com")); n != 0 {
t.Errorf("foo.bar.baz.example.com returned certificate %d, not 0", n)
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,246 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package tls
import (
"math/rand"
"reflect"
"testing"
"testing/quick"
)
var tests = []interface{}{
&clientHelloMsg{},
&serverHelloMsg{},
&finishedMsg{},
&certificateMsg{},
&certificateRequestMsg{},
&certificateVerifyMsg{},
&certificateStatusMsg{},
&clientKeyExchangeMsg{},
&nextProtoMsg{},
&newSessionTicketMsg{},
&sessionState{},
}
type testMessage interface {
marshal() []byte
unmarshal([]byte) bool
equal(interface{}) bool
}
func TestMarshalUnmarshal(t *testing.T) {
rand := rand.New(rand.NewSource(0))
for i, iface := range tests {
ty := reflect.ValueOf(iface).Type()
n := 100
if testing.Short() {
n = 5
}
for j := 0; j < n; j++ {
v, ok := quick.Value(ty, rand)
if !ok {
t.Errorf("#%d: failed to create value", i)
break
}
m1 := v.Interface().(testMessage)
marshaled := m1.marshal()
m2 := iface.(testMessage)
if !m2.unmarshal(marshaled) {
t.Errorf("#%d failed to unmarshal %#v %x", i, m1, marshaled)
break
}
m2.marshal() // to fill any marshal cache in the message
if !m1.equal(m2) {
t.Errorf("#%d got:%#v want:%#v %x", i, m2, m1, marshaled)
break
}
if i >= 3 {
// The first three message types (ClientHello,
// ServerHello and Finished) are allowed to
// have parsable prefixes because the extension
// data is optional and the length of the
// Finished varies across versions.
for j := 0; j < len(marshaled); j++ {
if m2.unmarshal(marshaled[0:j]) {
t.Errorf("#%d unmarshaled a prefix of length %d of %#v", i, j, m1)
break
}
}
}
}
}
}
func TestFuzz(t *testing.T) {
rand := rand.New(rand.NewSource(0))
for _, iface := range tests {
m := iface.(testMessage)
for j := 0; j < 1000; j++ {
len := rand.Intn(100)
bytes := randomBytes(len, rand)
// This just looks for crashes due to bounds errors etc.
m.unmarshal(bytes)
}
}
}
func randomBytes(n int, rand *rand.Rand) []byte {
r := make([]byte, n)
for i := 0; i < n; i++ {
r[i] = byte(rand.Int31())
}
return r
}
func randomString(n int, rand *rand.Rand) string {
b := randomBytes(n, rand)
return string(b)
}
func (*clientHelloMsg) Generate(rand *rand.Rand, size int) reflect.Value {
m := &clientHelloMsg{}
m.vers = uint16(rand.Intn(65536))
m.random = randomBytes(32, rand)
m.sessionId = randomBytes(rand.Intn(32), rand)
m.cipherSuites = make([]uint16, rand.Intn(63)+1)
for i := 0; i < len(m.cipherSuites); i++ {
m.cipherSuites[i] = uint16(rand.Int31())
}
m.compressionMethods = randomBytes(rand.Intn(63)+1, rand)
if rand.Intn(10) > 5 {
m.nextProtoNeg = true
}
if rand.Intn(10) > 5 {
m.serverName = randomString(rand.Intn(255), rand)
}
m.ocspStapling = rand.Intn(10) > 5
m.supportedPoints = randomBytes(rand.Intn(5)+1, rand)
m.supportedCurves = make([]uint16, rand.Intn(5)+1)
for i := range m.supportedCurves {
m.supportedCurves[i] = uint16(rand.Intn(30000))
}
if rand.Intn(10) > 5 {
m.ticketSupported = true
if rand.Intn(10) > 5 {
m.sessionTicket = randomBytes(rand.Intn(300), rand)
}
}
if rand.Intn(10) > 5 {
m.signatureAndHashes = supportedSKXSignatureAlgorithms
}
return reflect.ValueOf(m)
}
func (*serverHelloMsg) Generate(rand *rand.Rand, size int) reflect.Value {
m := &serverHelloMsg{}
m.vers = uint16(rand.Intn(65536))
m.random = randomBytes(32, rand)
m.sessionId = randomBytes(rand.Intn(32), rand)
m.cipherSuite = uint16(rand.Int31())
m.compressionMethod = uint8(rand.Intn(256))
if rand.Intn(10) > 5 {
m.nextProtoNeg = true
n := rand.Intn(10)
m.nextProtos = make([]string, n)
for i := 0; i < n; i++ {
m.nextProtos[i] = randomString(20, rand)
}
}
if rand.Intn(10) > 5 {
m.ocspStapling = true
}
if rand.Intn(10) > 5 {
m.ticketSupported = true
}
return reflect.ValueOf(m)
}
func (*certificateMsg) Generate(rand *rand.Rand, size int) reflect.Value {
m := &certificateMsg{}
numCerts := rand.Intn(20)
m.certificates = make([][]byte, numCerts)
for i := 0; i < numCerts; i++ {
m.certificates[i] = randomBytes(rand.Intn(10)+1, rand)
}
return reflect.ValueOf(m)
}
func (*certificateRequestMsg) Generate(rand *rand.Rand, size int) reflect.Value {
m := &certificateRequestMsg{}
m.certificateTypes = randomBytes(rand.Intn(5)+1, rand)
numCAs := rand.Intn(100)
m.certificateAuthorities = make([][]byte, numCAs)
for i := 0; i < numCAs; i++ {
m.certificateAuthorities[i] = randomBytes(rand.Intn(15)+1, rand)
}
return reflect.ValueOf(m)
}
func (*certificateVerifyMsg) Generate(rand *rand.Rand, size int) reflect.Value {
m := &certificateVerifyMsg{}
m.signature = randomBytes(rand.Intn(15)+1, rand)
return reflect.ValueOf(m)
}
func (*certificateStatusMsg) Generate(rand *rand.Rand, size int) reflect.Value {
m := &certificateStatusMsg{}
if rand.Intn(10) > 5 {
m.statusType = statusTypeOCSP
m.response = randomBytes(rand.Intn(10)+1, rand)
} else {
m.statusType = 42
}
return reflect.ValueOf(m)
}
func (*clientKeyExchangeMsg) Generate(rand *rand.Rand, size int) reflect.Value {
m := &clientKeyExchangeMsg{}
m.ciphertext = randomBytes(rand.Intn(1000)+1, rand)
return reflect.ValueOf(m)
}
func (*finishedMsg) Generate(rand *rand.Rand, size int) reflect.Value {
m := &finishedMsg{}
m.verifyData = randomBytes(12, rand)
return reflect.ValueOf(m)
}
func (*nextProtoMsg) Generate(rand *rand.Rand, size int) reflect.Value {
m := &nextProtoMsg{}
m.proto = randomString(rand.Intn(255), rand)
return reflect.ValueOf(m)
}
func (*newSessionTicketMsg) Generate(rand *rand.Rand, size int) reflect.Value {
m := &newSessionTicketMsg{}
m.ticket = randomBytes(rand.Intn(4), rand)
return reflect.ValueOf(m)
}
func (*sessionState) Generate(rand *rand.Rand, size int) reflect.Value {
s := &sessionState{}
s.vers = uint16(rand.Intn(10000))
s.cipherSuite = uint16(rand.Intn(10000))
s.masterSecret = randomBytes(rand.Intn(100), rand)
numCerts := rand.Intn(20)
s.certificates = make([][]byte, numCerts)
for i := 0; i < numCerts; i++ {
s.certificates[i] = randomBytes(rand.Intn(10)+1, rand)
}
return reflect.ValueOf(s)
}

File diff suppressed because it is too large Load Diff

View File

@ -1,126 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package tls
import (
"encoding/hex"
"testing"
)
type testSplitPreMasterSecretTest struct {
in, out1, out2 string
}
var testSplitPreMasterSecretTests = []testSplitPreMasterSecretTest{
{"", "", ""},
{"00", "00", "00"},
{"0011", "00", "11"},
{"001122", "0011", "1122"},
{"00112233", "0011", "2233"},
}
func TestSplitPreMasterSecret(t *testing.T) {
for i, test := range testSplitPreMasterSecretTests {
in, _ := hex.DecodeString(test.in)
out1, out2 := splitPreMasterSecret(in)
s1 := hex.EncodeToString(out1)
s2 := hex.EncodeToString(out2)
if s1 != test.out1 || s2 != test.out2 {
t.Errorf("#%d: got: (%s, %s) want: (%s, %s)", i, s1, s2, test.out1, test.out2)
}
}
}
type testKeysFromTest struct {
version uint16
preMasterSecret string
clientRandom, serverRandom string
masterSecret string
clientMAC, serverMAC string
clientKey, serverKey string
macLen, keyLen int
}
func TestKeysFromPreMasterSecret(t *testing.T) {
for i, test := range testKeysFromTests {
in, _ := hex.DecodeString(test.preMasterSecret)
clientRandom, _ := hex.DecodeString(test.clientRandom)
serverRandom, _ := hex.DecodeString(test.serverRandom)
masterSecret := masterFromPreMasterSecret(test.version, in, clientRandom, serverRandom)
if s := hex.EncodeToString(masterSecret); s != test.masterSecret {
t.Errorf("#%d: bad master secret %s, want %s", i, s, test.masterSecret)
continue
}
clientMAC, serverMAC, clientKey, serverKey, _, _ := keysFromMasterSecret(test.version, masterSecret, clientRandom, serverRandom, test.macLen, test.keyLen, 0)
clientMACString := hex.EncodeToString(clientMAC)
serverMACString := hex.EncodeToString(serverMAC)
clientKeyString := hex.EncodeToString(clientKey)
serverKeyString := hex.EncodeToString(serverKey)
if clientMACString != test.clientMAC ||
serverMACString != test.serverMAC ||
clientKeyString != test.clientKey ||
serverKeyString != test.serverKey {
t.Errorf("#%d: got: (%s, %s, %s, %s) want: (%s, %s, %s, %s)", i, clientMACString, serverMACString, clientKeyString, serverKeyString, test.clientMAC, test.serverMAC, test.clientKey, test.serverKey)
}
}
}
// These test vectors were generated from GnuTLS using `gnutls-cli --insecure -d 9 `
var testKeysFromTests = []testKeysFromTest{
{
VersionTLS10,
"0302cac83ad4b1db3b9ab49ad05957de2a504a634a386fc600889321e1a971f57479466830ac3e6f468e87f5385fa0c5",
"4ae66303755184a3917fcb44880605fcc53baa01912b22ed94473fc69cebd558",
"4ae663020ec16e6bb5130be918cfcafd4d765979a3136a5d50c593446e4e44db",
"3d851bab6e5556e959a16bc36d66cfae32f672bfa9ecdef6096cbb1b23472df1da63dbbd9827606413221d149ed08ceb",
"805aaa19b3d2c0a0759a4b6c9959890e08480119",
"2d22f9fe519c075c16448305ceee209fc24ad109",
"d50b5771244f850cd8117a9ccafe2cf1",
"e076e33206b30507a85c32855acd0919",
20,
16,
},
{
VersionTLS10,
"03023f7527316bc12cbcd69e4b9e8275d62c028f27e65c745cfcddc7ce01bd3570a111378b63848127f1c36e5f9e4890",
"4ae66364b5ea56b20ce4e25555aed2d7e67f42788dd03f3fee4adae0459ab106",
"4ae66363ab815cbf6a248b87d6b556184e945e9b97fbdf247858b0bdafacfa1c",
"7d64be7c80c59b740200b4b9c26d0baaa1c5ae56705acbcf2307fe62beb4728c19392c83f20483801cce022c77645460",
"97742ed60a0554ca13f04f97ee193177b971e3b0",
"37068751700400e03a8477a5c7eec0813ab9e0dc",
"207cddbc600d2a200abac6502053ee5c",
"df3f94f6e1eacc753b815fe16055cd43",
20,
16,
},
{
VersionTLS10,
"832d515f1d61eebb2be56ba0ef79879efb9b527504abb386fb4310ed5d0e3b1f220d3bb6b455033a2773e6d8bdf951d278a187482b400d45deb88a5d5a6bb7d6a7a1decc04eb9ef0642876cd4a82d374d3b6ff35f0351dc5d411104de431375355addc39bfb1f6329fb163b0bc298d658338930d07d313cd980a7e3d9196cac1",
"4ae663b2ee389c0de147c509d8f18f5052afc4aaf9699efe8cb05ece883d3a5e",
"4ae664d503fd4cff50cfc1fb8fc606580f87b0fcdac9554ba0e01d785bdf278e",
"1aff2e7a2c4279d0126f57a65a77a8d9d0087cf2733366699bec27eb53d5740705a8574bb1acc2abbe90e44f0dd28d6c",
"3c7647c93c1379a31a609542aa44e7f117a70085",
"0d73102994be74a575a3ead8532590ca32a526d4",
"ac7581b0b6c10d85bbd905ffbf36c65e",
"ff07edde49682b45466bd2e39464b306",
20,
16,
},
{
VersionSSL30,
"832d515f1d61eebb2be56ba0ef79879efb9b527504abb386fb4310ed5d0e3b1f220d3bb6b455033a2773e6d8bdf951d278a187482b400d45deb88a5d5a6bb7d6a7a1decc04eb9ef0642876cd4a82d374d3b6ff35f0351dc5d411104de431375355addc39bfb1f6329fb163b0bc298d658338930d07d313cd980a7e3d9196cac1",
"4ae663b2ee389c0de147c509d8f18f5052afc4aaf9699efe8cb05ece883d3a5e",
"4ae664d503fd4cff50cfc1fb8fc606580f87b0fcdac9554ba0e01d785bdf278e",
"a614863e56299dcffeea2938f22c2ba023768dbe4b3f6877bc9c346c6ae529b51d9cb87ff9695ea4d01f2205584405b2",
"2c450d5b6f6e2013ac6bea6a0b32200d4e1ffb94",
"7a7a7438769536f2fb1ae49a61f0703b79b2dc53",
"f8f6b26c10f12855c9aafb1e0e839ccf",
"2b9d4b4a60cb7f396780ebff50650419",
20,
16,
},
}

View File

@ -1,107 +0,0 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package tls
import (
"testing"
)
var rsaCertPEM = `-----BEGIN CERTIFICATE-----
MIIB0zCCAX2gAwIBAgIJAI/M7BYjwB+uMA0GCSqGSIb3DQEBBQUAMEUxCzAJBgNV
BAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRlcm5ldCBX
aWRnaXRzIFB0eSBMdGQwHhcNMTIwOTEyMjE1MjAyWhcNMTUwOTEyMjE1MjAyWjBF
MQswCQYDVQQGEwJBVTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50
ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBANLJ
hPHhITqQbPklG3ibCVxwGMRfp/v4XqhfdQHdcVfHap6NQ5Wok/4xIA+ui35/MmNa
rtNuC+BdZ1tMuVCPFZcCAwEAAaNQME4wHQYDVR0OBBYEFJvKs8RfJaXTH08W+SGv
zQyKn0H8MB8GA1UdIwQYMBaAFJvKs8RfJaXTH08W+SGvzQyKn0H8MAwGA1UdEwQF
MAMBAf8wDQYJKoZIhvcNAQEFBQADQQBJlffJHybjDGxRMqaRmDhX0+6v02TUKZsW
r5QuVbpQhH6u+0UgcW0jp9QwpxoPTLTWGXEWBBBurxFwiCBhkQ+V
-----END CERTIFICATE-----
`
var rsaKeyPEM = `-----BEGIN RSA PRIVATE KEY-----
MIIBOwIBAAJBANLJhPHhITqQbPklG3ibCVxwGMRfp/v4XqhfdQHdcVfHap6NQ5Wo
k/4xIA+ui35/MmNartNuC+BdZ1tMuVCPFZcCAwEAAQJAEJ2N+zsR0Xn8/Q6twa4G
6OB1M1WO+k+ztnX/1SvNeWu8D6GImtupLTYgjZcHufykj09jiHmjHx8u8ZZB/o1N
MQIhAPW+eyZo7ay3lMz1V01WVjNKK9QSn1MJlb06h/LuYv9FAiEA25WPedKgVyCW
SmUwbPw8fnTcpqDWE3yTO3vKcebqMSsCIBF3UmVue8YU3jybC3NxuXq3wNm34R8T
xVLHwDXh/6NJAiEAl2oHGGLz64BuAfjKrqwz7qMYr9HCLIe/YsoWq/olzScCIQDi
D2lWusoe2/nEqfDVVWGWlyJ7yOmqaVm/iNUN9B2N2g==
-----END RSA PRIVATE KEY-----
`
// keyPEM is the same as rsaKeyPEM, but declares itself as just
// "PRIVATE KEY", not "RSA PRIVATE KEY". http://golang.org/issue/4477
var keyPEM = `-----BEGIN PRIVATE KEY-----
MIIBOwIBAAJBANLJhPHhITqQbPklG3ibCVxwGMRfp/v4XqhfdQHdcVfHap6NQ5Wo
k/4xIA+ui35/MmNartNuC+BdZ1tMuVCPFZcCAwEAAQJAEJ2N+zsR0Xn8/Q6twa4G
6OB1M1WO+k+ztnX/1SvNeWu8D6GImtupLTYgjZcHufykj09jiHmjHx8u8ZZB/o1N
MQIhAPW+eyZo7ay3lMz1V01WVjNKK9QSn1MJlb06h/LuYv9FAiEA25WPedKgVyCW
SmUwbPw8fnTcpqDWE3yTO3vKcebqMSsCIBF3UmVue8YU3jybC3NxuXq3wNm34R8T
xVLHwDXh/6NJAiEAl2oHGGLz64BuAfjKrqwz7qMYr9HCLIe/YsoWq/olzScCIQDi
D2lWusoe2/nEqfDVVWGWlyJ7yOmqaVm/iNUN9B2N2g==
-----END PRIVATE KEY-----
`
var ecdsaCertPEM = `-----BEGIN CERTIFICATE-----
MIIB/jCCAWICCQDscdUxw16XFDAJBgcqhkjOPQQBMEUxCzAJBgNVBAYTAkFVMRMw
EQYDVQQIEwpTb21lLVN0YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBXaWRnaXRzIFB0
eSBMdGQwHhcNMTIxMTE0MTI0MDQ4WhcNMTUxMTE0MTI0MDQ4WjBFMQswCQYDVQQG
EwJBVTETMBEGA1UECBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50ZXJuZXQgV2lk
Z2l0cyBQdHkgTHRkMIGbMBAGByqGSM49AgEGBSuBBAAjA4GGAAQBY9+my9OoeSUR
lDQdV/x8LsOuLilthhiS1Tz4aGDHIPwC1mlvnf7fg5lecYpMCrLLhauAc1UJXcgl
01xoLuzgtAEAgv2P/jgytzRSpUYvgLBt1UA0leLYBy6mQQbrNEuqT3INapKIcUv8
XxYP0xMEUksLPq6Ca+CRSqTtrd/23uTnapkwCQYHKoZIzj0EAQOBigAwgYYCQXJo
A7Sl2nLVf+4Iu/tAX/IF4MavARKC4PPHK3zfuGfPR3oCCcsAoz3kAzOeijvd0iXb
H5jBImIxPL4WxQNiBTexAkF8D1EtpYuWdlVQ80/h/f4pBcGiXPqX5h2PQSQY7hP1
+jwM1FGS4fREIOvlBYr/SzzQRtwrvrzGYxDEDbsC0ZGRnA==
-----END CERTIFICATE-----
`
var ecdsaKeyPEM = `-----BEGIN EC PARAMETERS-----
BgUrgQQAIw==
-----END EC PARAMETERS-----
-----BEGIN EC PRIVATE KEY-----
MIHcAgEBBEIBrsoKp0oqcv6/JovJJDoDVSGWdirrkgCWxrprGlzB9o0X8fV675X0
NwuBenXFfeZvVcwluO7/Q9wkYoPd/t3jGImgBwYFK4EEACOhgYkDgYYABAFj36bL
06h5JRGUNB1X/Hwuw64uKW2GGJLVPPhoYMcg/ALWaW+d/t+DmV5xikwKssuFq4Bz
VQldyCXTXGgu7OC0AQCC/Y/+ODK3NFKlRi+AsG3VQDSV4tgHLqZBBus0S6pPcg1q
kohxS/xfFg/TEwRSSws+roJr4JFKpO2t3/be5OdqmQ==
-----END EC PRIVATE KEY-----
`
var keyPairTests = []struct {
algo string
cert string
key string
}{
{"ECDSA", ecdsaCertPEM, ecdsaKeyPEM},
{"RSA", rsaCertPEM, rsaKeyPEM},
{"RSA-untyped", rsaCertPEM, keyPEM}, // golang.org/issue/4477
}
func TestX509KeyPair(t *testing.T) {
var pem []byte
for _, test := range keyPairTests {
pem = []byte(test.cert + test.key)
if _, err := X509KeyPair(pem, pem); err != nil {
t.Errorf("Failed to load %s cert followed by %s key: %s", test.algo, test.algo, err)
}
pem = []byte(test.key + test.cert)
if _, err := X509KeyPair(pem, pem); err != nil {
t.Errorf("Failed to load %s key followed by %s cert: %s", test.algo, test.algo, err)
}
}
}
func TestX509MixedKeyPair(t *testing.T) {
if _, err := X509KeyPair([]byte(rsaCertPEM), []byte(ecdsaKeyPEM)); err == nil {
t.Error("Load of RSA certificate succeeded with ECDSA private key")
}
if _, err := X509KeyPair([]byte(ecdsaCertPEM), []byte(rsaKeyPEM)); err == nil {
t.Error("Load of ECDSA certificate succeeded with RSA private key")
}
}

View File

@ -1,130 +0,0 @@
// Package stscreds are credential Providers to retrieve STS AWS credentials.
//
// STS provides multiple ways to retrieve credentials which can be used when making
// future AWS service API operation calls.
package stscreds
import (
"fmt"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/service/sts"
)
// AssumeRoler represents the minimal subset of the STS client API used by this provider.
type AssumeRoler interface {
AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error)
}
// DefaultDuration is the default amount of time in minutes that the credentials
// will be valid for.
var DefaultDuration = time.Duration(15) * time.Minute
// AssumeRoleProvider retrieves temporary credentials from the STS service, and
// keeps track of their expiration time. This provider must be used explicitly,
// as it is not included in the credentials chain.
type AssumeRoleProvider struct {
credentials.Expiry
// STS client to make assume role request with.
Client AssumeRoler
// Role to be assumed.
RoleARN string
// Session name, if you wish to reuse the credentials elsewhere.
RoleSessionName string
// Expiry duration of the STS credentials. Defaults to 15 minutes if not set.
Duration time.Duration
// Optional ExternalID to pass along, defaults to nil if not set.
ExternalID *string
// ExpiryWindow will allow the credentials to trigger refreshing prior to
// the credentials actually expiring. This is beneficial so race conditions
// with expiring credentials do not cause request to fail unexpectedly
// due to ExpiredTokenException exceptions.
//
// So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
// 10 seconds before the credentials are actually expired.
//
// If ExpiryWindow is 0 or less it will be ignored.
ExpiryWindow time.Duration
}
// NewCredentials returns a pointer to a new Credentials object wrapping the
// AssumeRoleProvider. The credentials will expire every 15 minutes and the
// role will be named after a nanosecond timestamp of this operation.
//
// Takes a Config provider to create the STS client. The ConfigProvider is
// satisfied by the session.Session type.
func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials {
p := &AssumeRoleProvider{
Client: sts.New(c),
RoleARN: roleARN,
Duration: DefaultDuration,
}
for _, option := range options {
option(p)
}
return credentials.NewCredentials(p)
}
// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping the
// AssumeRoleProvider. The credentials will expire every 15 minutes and the
// role will be named after a nanosecond timestamp of this operation.
//
// Takes an AssumeRoler which can be satisfiede by the STS client.
func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials {
p := &AssumeRoleProvider{
Client: svc,
RoleARN: roleARN,
Duration: DefaultDuration,
}
for _, option := range options {
option(p)
}
return credentials.NewCredentials(p)
}
// Retrieve generates a new set of temporary credentials using STS.
func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) {
// Apply defaults where parameters are not set.
if p.RoleSessionName == "" {
// Try to work out a role name that will hopefully end up unique.
p.RoleSessionName = fmt.Sprintf("%d", time.Now().UTC().UnixNano())
}
if p.Duration == 0 {
// Expire as often as AWS permits.
p.Duration = DefaultDuration
}
roleOutput, err := p.Client.AssumeRole(&sts.AssumeRoleInput{
DurationSeconds: aws.Int64(int64(p.Duration / time.Second)),
RoleArn: aws.String(p.RoleARN),
RoleSessionName: aws.String(p.RoleSessionName),
ExternalId: p.ExternalID,
})
if err != nil {
return credentials.Value{}, err
}
// We will proactively generate new credentials before they expire.
p.SetExpiration(*roleOutput.Credentials.Expiration, p.ExpiryWindow)
return credentials.Value{
AccessKeyID: *roleOutput.Credentials.AccessKeyId,
SecretAccessKey: *roleOutput.Credentials.SecretAccessKey,
SessionToken: *roleOutput.Credentials.SessionToken,
}, nil
}

View File

@ -1,2 +0,0 @@
Victor Vieux <vieux@docker.com> (@vieux)
Jessie Frazelle <jess@docker.com> (@jfrazelle)

View File

@ -1,31 +0,0 @@
package units
import (
"fmt"
"time"
)
// HumanDuration returns a human-readable approximation of a duration
// (eg. "About a minute", "4 hours ago", etc.)
func HumanDuration(d time.Duration) string {
if seconds := int(d.Seconds()); seconds < 1 {
return "Less than a second"
} else if seconds < 60 {
return fmt.Sprintf("%d seconds", seconds)
} else if minutes := int(d.Minutes()); minutes == 1 {
return "About a minute"
} else if minutes < 60 {
return fmt.Sprintf("%d minutes", minutes)
} else if hours := int(d.Hours()); hours == 1 {
return "About an hour"
} else if hours < 48 {
return fmt.Sprintf("%d hours", hours)
} else if hours < 24*7*2 {
return fmt.Sprintf("%d days", hours/24)
} else if hours < 24*30*3 {
return fmt.Sprintf("%d weeks", hours/24/7)
} else if hours < 24*365*2 {
return fmt.Sprintf("%d months", hours/24/30)
}
return fmt.Sprintf("%f years", d.Hours()/24/365)
}

View File

@ -1,91 +0,0 @@
package units
import (
"fmt"
"regexp"
"strconv"
"strings"
)
// See: http://en.wikipedia.org/wiki/Binary_prefix
const (
// Decimal
KB = 1000
MB = 1000 * KB
GB = 1000 * MB
TB = 1000 * GB
PB = 1000 * TB
// Binary
KiB = 1024
MiB = 1024 * KiB
GiB = 1024 * MiB
TiB = 1024 * GiB
PiB = 1024 * TiB
)
type unitMap map[string]int64
var (
decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB}
binaryMap = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB}
sizeRegex = regexp.MustCompile(`^(\d+)([kKmMgGtTpP])?[bB]?$`)
)
var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"}
var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"}
// HumanSize returns a human-readable approximation of a size
// using SI standard (eg. "44kB", "17MB")
func HumanSize(size float64) string {
return intToString(float64(size), 1000.0, decimapAbbrs)
}
func BytesSize(size float64) string {
return intToString(size, 1024.0, binaryAbbrs)
}
func intToString(size, unit float64, _map []string) string {
i := 0
for size >= unit {
size = size / unit
i++
}
return fmt.Sprintf("%.4g %s", size, _map[i])
}
// FromHumanSize returns an integer from a human-readable specification of a
// size using SI standard (eg. "44kB", "17MB")
func FromHumanSize(size string) (int64, error) {
return parseSize(size, decimalMap)
}
// RAMInBytes parses a human-readable string representing an amount of RAM
// in bytes, kibibytes, mebibytes, gibibytes, or tebibytes and
// returns the number of bytes, or -1 if the string is unparseable.
// Units are case-insensitive, and the 'b' suffix is optional.
func RAMInBytes(size string) (int64, error) {
return parseSize(size, binaryMap)
}
// Parses the human-readable size string into the amount it represents
func parseSize(sizeStr string, uMap unitMap) (int64, error) {
matches := sizeRegex.FindStringSubmatch(sizeStr)
if len(matches) != 3 {
return -1, fmt.Errorf("invalid size: '%s'", sizeStr)
}
size, err := strconv.ParseInt(matches[1], 10, 0)
if err != nil {
return -1, err
}
unitPrefix := strings.ToLower(matches[2])
if mul, ok := uMap[unitPrefix]; ok {
size *= mul
}
return size, nil
}

31
vendor/github.com/golang/protobuf/LICENSE generated vendored Normal file
View File

@ -0,0 +1,31 @@
Go support for Protocol Buffers - Google's data interchange format
Copyright 2010 The Go Authors. All rights reserved.
https://github.com/golang/protobuf
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

43
vendor/github.com/golang/protobuf/proto/Makefile generated vendored Normal file
View File

@ -0,0 +1,43 @@
# Go support for Protocol Buffers - Google's data interchange format
#
# Copyright 2010 The Go Authors. All rights reserved.
# https://github.com/golang/protobuf
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
install:
go install
test: install generate-test-pbs
go test
generate-test-pbs:
make install
make -C testdata
protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata:. proto3_proto/proto3.proto
make

223
vendor/github.com/golang/protobuf/proto/clone.go generated vendored Normal file
View File

@ -0,0 +1,223 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2011 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Protocol buffer deep copy and merge.
// TODO: RawMessage.
package proto
import (
"log"
"reflect"
"strings"
)
// Clone returns a deep copy of a protocol buffer.
func Clone(pb Message) Message {
in := reflect.ValueOf(pb)
if in.IsNil() {
return pb
}
out := reflect.New(in.Type().Elem())
// out is empty so a merge is a deep copy.
mergeStruct(out.Elem(), in.Elem())
return out.Interface().(Message)
}
// Merge merges src into dst.
// Required and optional fields that are set in src will be set to that value in dst.
// Elements of repeated fields will be appended.
// Merge panics if src and dst are not the same type, or if dst is nil.
func Merge(dst, src Message) {
in := reflect.ValueOf(src)
out := reflect.ValueOf(dst)
if out.IsNil() {
panic("proto: nil destination")
}
if in.Type() != out.Type() {
// Explicit test prior to mergeStruct so that mistyped nils will fail
panic("proto: type mismatch")
}
if in.IsNil() {
// Merging nil into non-nil is a quiet no-op
return
}
mergeStruct(out.Elem(), in.Elem())
}
func mergeStruct(out, in reflect.Value) {
sprop := GetProperties(in.Type())
for i := 0; i < in.NumField(); i++ {
f := in.Type().Field(i)
if strings.HasPrefix(f.Name, "XXX_") {
continue
}
mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
}
if emIn, ok := in.Addr().Interface().(extendableProto); ok {
emOut := out.Addr().Interface().(extendableProto)
mergeExtension(emOut.ExtensionMap(), emIn.ExtensionMap())
}
uf := in.FieldByName("XXX_unrecognized")
if !uf.IsValid() {
return
}
uin := uf.Bytes()
if len(uin) > 0 {
out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
}
}
// mergeAny performs a merge between two values of the same type.
// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
// prop is set if this is a struct field (it may be nil).
func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
if in.Type() == protoMessageType {
if !in.IsNil() {
if out.IsNil() {
out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
} else {
Merge(out.Interface().(Message), in.Interface().(Message))
}
}
return
}
switch in.Kind() {
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
reflect.String, reflect.Uint32, reflect.Uint64:
if !viaPtr && isProto3Zero(in) {
return
}
out.Set(in)
case reflect.Interface:
// Probably a oneof field; copy non-nil values.
if in.IsNil() {
return
}
// Allocate destination if it is not set, or set to a different type.
// Otherwise we will merge as normal.
if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
}
mergeAny(out.Elem(), in.Elem(), false, nil)
case reflect.Map:
if in.Len() == 0 {
return
}
if out.IsNil() {
out.Set(reflect.MakeMap(in.Type()))
}
// For maps with value types of *T or []byte we need to deep copy each value.
elemKind := in.Type().Elem().Kind()
for _, key := range in.MapKeys() {
var val reflect.Value
switch elemKind {
case reflect.Ptr:
val = reflect.New(in.Type().Elem().Elem())
mergeAny(val, in.MapIndex(key), false, nil)
case reflect.Slice:
val = in.MapIndex(key)
val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
default:
val = in.MapIndex(key)
}
out.SetMapIndex(key, val)
}
case reflect.Ptr:
if in.IsNil() {
return
}
if out.IsNil() {
out.Set(reflect.New(in.Elem().Type()))
}
mergeAny(out.Elem(), in.Elem(), true, nil)
case reflect.Slice:
if in.IsNil() {
return
}
if in.Type().Elem().Kind() == reflect.Uint8 {
// []byte is a scalar bytes field, not a repeated field.
// Edge case: if this is in a proto3 message, a zero length
// bytes field is considered the zero value, and should not
// be merged.
if prop != nil && prop.proto3 && in.Len() == 0 {
return
}
// Make a deep copy.
// Append to []byte{} instead of []byte(nil) so that we never end up
// with a nil result.
out.SetBytes(append([]byte{}, in.Bytes()...))
return
}
n := in.Len()
if out.IsNil() {
out.Set(reflect.MakeSlice(in.Type(), 0, n))
}
switch in.Type().Elem().Kind() {
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
reflect.String, reflect.Uint32, reflect.Uint64:
out.Set(reflect.AppendSlice(out, in))
default:
for i := 0; i < n; i++ {
x := reflect.Indirect(reflect.New(in.Type().Elem()))
mergeAny(x, in.Index(i), false, nil)
out.Set(reflect.Append(out, x))
}
}
case reflect.Struct:
mergeStruct(out, in)
default:
// unknown type, so not a protocol buffer
log.Printf("proto: don't know how to copy %v", in)
}
}
func mergeExtension(out, in map[int32]Extension) {
for extNum, eIn := range in {
eOut := Extension{desc: eIn.desc}
if eIn.value != nil {
v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
eOut.value = v.Interface()
}
if eIn.enc != nil {
eOut.enc = make([]byte, len(eIn.enc))
copy(eOut.enc, eIn.enc)
}
out[extNum] = eOut
}
}

867
vendor/github.com/golang/protobuf/proto/decode.go generated vendored Normal file
View File

@ -0,0 +1,867 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
/*
* Routines for decoding protocol buffer data to construct in-memory representations.
*/
import (
"errors"
"fmt"
"io"
"os"
"reflect"
)
// errOverflow is returned when an integer is too large to be represented.
var errOverflow = errors.New("proto: integer overflow")
// ErrInternalBadWireType is returned by generated code when an incorrect
// wire type is encountered. It does not get returned to user code.
var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
// The fundamental decoders that interpret bytes on the wire.
// Those that take integer types all return uint64 and are
// therefore of type valueDecoder.
// DecodeVarint reads a varint-encoded integer from the slice.
// It returns the integer and the number of bytes consumed, or
// zero if there is not enough.
// This is the format for the
// int32, int64, uint32, uint64, bool, and enum
// protocol buffer types.
func DecodeVarint(buf []byte) (x uint64, n int) {
// x, n already 0
for shift := uint(0); shift < 64; shift += 7 {
if n >= len(buf) {
return 0, 0
}
b := uint64(buf[n])
n++
x |= (b & 0x7F) << shift
if (b & 0x80) == 0 {
return x, n
}
}
// The number is too large to represent in a 64-bit value.
return 0, 0
}
// DecodeVarint reads a varint-encoded integer from the Buffer.
// This is the format for the
// int32, int64, uint32, uint64, bool, and enum
// protocol buffer types.
func (p *Buffer) DecodeVarint() (x uint64, err error) {
// x, err already 0
i := p.index
l := len(p.buf)
for shift := uint(0); shift < 64; shift += 7 {
if i >= l {
err = io.ErrUnexpectedEOF
return
}
b := p.buf[i]
i++
x |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
p.index = i
return
}
}
// The number is too large to represent in a 64-bit value.
err = errOverflow
return
}
// DecodeFixed64 reads a 64-bit integer from the Buffer.
// This is the format for the
// fixed64, sfixed64, and double protocol buffer types.
func (p *Buffer) DecodeFixed64() (x uint64, err error) {
// x, err already 0
i := p.index + 8
if i < 0 || i > len(p.buf) {
err = io.ErrUnexpectedEOF
return
}
p.index = i
x = uint64(p.buf[i-8])
x |= uint64(p.buf[i-7]) << 8
x |= uint64(p.buf[i-6]) << 16
x |= uint64(p.buf[i-5]) << 24
x |= uint64(p.buf[i-4]) << 32
x |= uint64(p.buf[i-3]) << 40
x |= uint64(p.buf[i-2]) << 48
x |= uint64(p.buf[i-1]) << 56
return
}
// DecodeFixed32 reads a 32-bit integer from the Buffer.
// This is the format for the
// fixed32, sfixed32, and float protocol buffer types.
func (p *Buffer) DecodeFixed32() (x uint64, err error) {
// x, err already 0
i := p.index + 4
if i < 0 || i > len(p.buf) {
err = io.ErrUnexpectedEOF
return
}
p.index = i
x = uint64(p.buf[i-4])
x |= uint64(p.buf[i-3]) << 8
x |= uint64(p.buf[i-2]) << 16
x |= uint64(p.buf[i-1]) << 24
return
}
// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
// from the Buffer.
// This is the format used for the sint64 protocol buffer type.
func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
x, err = p.DecodeVarint()
if err != nil {
return
}
x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
return
}
// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
// from the Buffer.
// This is the format used for the sint32 protocol buffer type.
func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
x, err = p.DecodeVarint()
if err != nil {
return
}
x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
return
}
// These are not ValueDecoders: they produce an array of bytes or a string.
// bytes, embedded messages
// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
// This is the format used for the bytes protocol buffer
// type and for embedded messages.
func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
n, err := p.DecodeVarint()
if err != nil {
return nil, err
}
nb := int(n)
if nb < 0 {
return nil, fmt.Errorf("proto: bad byte length %d", nb)
}
end := p.index + nb
if end < p.index || end > len(p.buf) {
return nil, io.ErrUnexpectedEOF
}
if !alloc {
// todo: check if can get more uses of alloc=false
buf = p.buf[p.index:end]
p.index += nb
return
}
buf = make([]byte, nb)
copy(buf, p.buf[p.index:])
p.index += nb
return
}
// DecodeStringBytes reads an encoded string from the Buffer.
// This is the format used for the proto2 string type.
func (p *Buffer) DecodeStringBytes() (s string, err error) {
buf, err := p.DecodeRawBytes(false)
if err != nil {
return
}
return string(buf), nil
}
// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
// If the protocol buffer has extensions, and the field matches, add it as an extension.
// Otherwise, if the XXX_unrecognized field exists, append the skipped data there.
func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error {
oi := o.index
err := o.skip(t, tag, wire)
if err != nil {
return err
}
if !unrecField.IsValid() {
return nil
}
ptr := structPointer_Bytes(base, unrecField)
// Add the skipped field to struct field
obuf := o.buf
o.buf = *ptr
o.EncodeVarint(uint64(tag<<3 | wire))
*ptr = append(o.buf, obuf[oi:o.index]...)
o.buf = obuf
return nil
}
// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
func (o *Buffer) skip(t reflect.Type, tag, wire int) error {
var u uint64
var err error
switch wire {
case WireVarint:
_, err = o.DecodeVarint()
case WireFixed64:
_, err = o.DecodeFixed64()
case WireBytes:
_, err = o.DecodeRawBytes(false)
case WireFixed32:
_, err = o.DecodeFixed32()
case WireStartGroup:
for {
u, err = o.DecodeVarint()
if err != nil {
break
}
fwire := int(u & 0x7)
if fwire == WireEndGroup {
break
}
ftag := int(u >> 3)
err = o.skip(t, ftag, fwire)
if err != nil {
break
}
}
default:
err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t)
}
return err
}
// Unmarshaler is the interface representing objects that can
// unmarshal themselves. The method should reset the receiver before
// decoding starts. The argument points to data that may be
// overwritten, so implementations should not keep references to the
// buffer.
type Unmarshaler interface {
Unmarshal([]byte) error
}
// Unmarshal parses the protocol buffer representation in buf and places the
// decoded result in pb. If the struct underlying pb does not match
// the data in buf, the results can be unpredictable.
//
// Unmarshal resets pb before starting to unmarshal, so any
// existing data in pb is always removed. Use UnmarshalMerge
// to preserve and append to existing data.
func Unmarshal(buf []byte, pb Message) error {
pb.Reset()
return UnmarshalMerge(buf, pb)
}
// UnmarshalMerge parses the protocol buffer representation in buf and
// writes the decoded result to pb. If the struct underlying pb does not match
// the data in buf, the results can be unpredictable.
//
// UnmarshalMerge merges into existing data in pb.
// Most code should use Unmarshal instead.
func UnmarshalMerge(buf []byte, pb Message) error {
// If the object can unmarshal itself, let it.
if u, ok := pb.(Unmarshaler); ok {
return u.Unmarshal(buf)
}
return NewBuffer(buf).Unmarshal(pb)
}
// DecodeMessage reads a count-delimited message from the Buffer.
func (p *Buffer) DecodeMessage(pb Message) error {
enc, err := p.DecodeRawBytes(false)
if err != nil {
return err
}
return NewBuffer(enc).Unmarshal(pb)
}
// DecodeGroup reads a tag-delimited group from the Buffer.
func (p *Buffer) DecodeGroup(pb Message) error {
typ, base, err := getbase(pb)
if err != nil {
return err
}
return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base)
}
// Unmarshal parses the protocol buffer representation in the
// Buffer and places the decoded result in pb. If the struct
// underlying pb does not match the data in the buffer, the results can be
// unpredictable.
func (p *Buffer) Unmarshal(pb Message) error {
// If the object can unmarshal itself, let it.
if u, ok := pb.(Unmarshaler); ok {
err := u.Unmarshal(p.buf[p.index:])
p.index = len(p.buf)
return err
}
typ, base, err := getbase(pb)
if err != nil {
return err
}
err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base)
if collectStats {
stats.Decode++
}
return err
}
// unmarshalType does the work of unmarshaling a structure.
func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error {
var state errorState
required, reqFields := prop.reqCount, uint64(0)
var err error
for err == nil && o.index < len(o.buf) {
oi := o.index
var u uint64
u, err = o.DecodeVarint()
if err != nil {
break
}
wire := int(u & 0x7)
if wire == WireEndGroup {
if is_group {
return nil // input is satisfied
}
return fmt.Errorf("proto: %s: wiretype end group for non-group", st)
}
tag := int(u >> 3)
if tag <= 0 {
return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire)
}
fieldnum, ok := prop.decoderTags.get(tag)
if !ok {
// Maybe it's an extension?
if prop.extendable {
if e := structPointer_Interface(base, st).(extendableProto); isExtensionField(e, int32(tag)) {
if err = o.skip(st, tag, wire); err == nil {
ext := e.ExtensionMap()[int32(tag)] // may be missing
ext.enc = append(ext.enc, o.buf[oi:o.index]...)
e.ExtensionMap()[int32(tag)] = ext
}
continue
}
}
// Maybe it's a oneof?
if prop.oneofUnmarshaler != nil {
m := structPointer_Interface(base, st).(Message)
// First return value indicates whether tag is a oneof field.
ok, err = prop.oneofUnmarshaler(m, tag, wire, o)
if err == ErrInternalBadWireType {
// Map the error to something more descriptive.
// Do the formatting here to save generated code space.
err = fmt.Errorf("bad wiretype for oneof field in %T", m)
}
if ok {
continue
}
}
err = o.skipAndSave(st, tag, wire, base, prop.unrecField)
continue
}
p := prop.Prop[fieldnum]
if p.dec == nil {
fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name)
continue
}
dec := p.dec
if wire != WireStartGroup && wire != p.WireType {
if wire == WireBytes && p.packedDec != nil {
// a packable field
dec = p.packedDec
} else {
err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType)
continue
}
}
decErr := dec(o, p, base)
if decErr != nil && !state.shouldContinue(decErr, p) {
err = decErr
}
if err == nil && p.Required {
// Successfully decoded a required field.
if tag <= 64 {
// use bitmap for fields 1-64 to catch field reuse.
var mask uint64 = 1 << uint64(tag-1)
if reqFields&mask == 0 {
// new required field
reqFields |= mask
required--
}
} else {
// This is imprecise. It can be fooled by a required field
// with a tag > 64 that is encoded twice; that's very rare.
// A fully correct implementation would require allocating
// a data structure, which we would like to avoid.
required--
}
}
}
if err == nil {
if is_group {
return io.ErrUnexpectedEOF
}
if state.err != nil {
return state.err
}
if required > 0 {
// Not enough information to determine the exact field. If we use extra
// CPU, we could determine the field only if the missing required field
// has a tag <= 64 and we check reqFields.
return &RequiredNotSetError{"{Unknown}"}
}
}
return err
}
// Individual type decoders
// For each,
// u is the decoded value,
// v is a pointer to the field (pointer) in the struct
// Sizes of the pools to allocate inside the Buffer.
// The goal is modest amortization and allocation
// on at least 16-byte boundaries.
const (
boolPoolSize = 16
uint32PoolSize = 8
uint64PoolSize = 4
)
// Decode a bool.
func (o *Buffer) dec_bool(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
if len(o.bools) == 0 {
o.bools = make([]bool, boolPoolSize)
}
o.bools[0] = u != 0
*structPointer_Bool(base, p.field) = &o.bools[0]
o.bools = o.bools[1:]
return nil
}
func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
*structPointer_BoolVal(base, p.field) = u != 0
return nil
}
// Decode an int32.
func (o *Buffer) dec_int32(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
word32_Set(structPointer_Word32(base, p.field), o, uint32(u))
return nil
}
func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u))
return nil
}
// Decode an int64.
func (o *Buffer) dec_int64(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
word64_Set(structPointer_Word64(base, p.field), o, u)
return nil
}
func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
word64Val_Set(structPointer_Word64Val(base, p.field), o, u)
return nil
}
// Decode a string.
func (o *Buffer) dec_string(p *Properties, base structPointer) error {
s, err := o.DecodeStringBytes()
if err != nil {
return err
}
*structPointer_String(base, p.field) = &s
return nil
}
func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error {
s, err := o.DecodeStringBytes()
if err != nil {
return err
}
*structPointer_StringVal(base, p.field) = s
return nil
}
// Decode a slice of bytes ([]byte).
func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error {
b, err := o.DecodeRawBytes(true)
if err != nil {
return err
}
*structPointer_Bytes(base, p.field) = b
return nil
}
// Decode a slice of bools ([]bool).
func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
v := structPointer_BoolSlice(base, p.field)
*v = append(*v, u != 0)
return nil
}
// Decode a slice of bools ([]bool) in packed format.
func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error {
v := structPointer_BoolSlice(base, p.field)
nn, err := o.DecodeVarint()
if err != nil {
return err
}
nb := int(nn) // number of bytes of encoded bools
fin := o.index + nb
if fin < o.index {
return errOverflow
}
y := *v
for o.index < fin {
u, err := p.valDec(o)
if err != nil {
return err
}
y = append(y, u != 0)
}
*v = y
return nil
}
// Decode a slice of int32s ([]int32).
func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
structPointer_Word32Slice(base, p.field).Append(uint32(u))
return nil
}
// Decode a slice of int32s ([]int32) in packed format.
func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error {
v := structPointer_Word32Slice(base, p.field)
nn, err := o.DecodeVarint()
if err != nil {
return err
}
nb := int(nn) // number of bytes of encoded int32s
fin := o.index + nb
if fin < o.index {
return errOverflow
}
for o.index < fin {
u, err := p.valDec(o)
if err != nil {
return err
}
v.Append(uint32(u))
}
return nil
}
// Decode a slice of int64s ([]int64).
func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
structPointer_Word64Slice(base, p.field).Append(u)
return nil
}
// Decode a slice of int64s ([]int64) in packed format.
func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error {
v := structPointer_Word64Slice(base, p.field)
nn, err := o.DecodeVarint()
if err != nil {
return err
}
nb := int(nn) // number of bytes of encoded int64s
fin := o.index + nb
if fin < o.index {
return errOverflow
}
for o.index < fin {
u, err := p.valDec(o)
if err != nil {
return err
}
v.Append(u)
}
return nil
}
// Decode a slice of strings ([]string).
func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error {
s, err := o.DecodeStringBytes()
if err != nil {
return err
}
v := structPointer_StringSlice(base, p.field)
*v = append(*v, s)
return nil
}
// Decode a slice of slice of bytes ([][]byte).
func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error {
b, err := o.DecodeRawBytes(true)
if err != nil {
return err
}
v := structPointer_BytesSlice(base, p.field)
*v = append(*v, b)
return nil
}
// Decode a map field.
func (o *Buffer) dec_new_map(p *Properties, base structPointer) error {
raw, err := o.DecodeRawBytes(false)
if err != nil {
return err
}
oi := o.index // index at the end of this map entry
o.index -= len(raw) // move buffer back to start of map entry
mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V
if mptr.Elem().IsNil() {
mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem()))
}
v := mptr.Elem() // map[K]V
// Prepare addressable doubly-indirect placeholders for the key and value types.
// See enc_new_map for why.
keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K
keybase := toStructPointer(keyptr.Addr()) // **K
var valbase structPointer
var valptr reflect.Value
switch p.mtype.Elem().Kind() {
case reflect.Slice:
// []byte
var dummy []byte
valptr = reflect.ValueOf(&dummy) // *[]byte
valbase = toStructPointer(valptr) // *[]byte
case reflect.Ptr:
// message; valptr is **Msg; need to allocate the intermediate pointer
valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
valptr.Set(reflect.New(valptr.Type().Elem()))
valbase = toStructPointer(valptr)
default:
// everything else
valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
valbase = toStructPointer(valptr.Addr()) // **V
}
// Decode.
// This parses a restricted wire format, namely the encoding of a message
// with two fields. See enc_new_map for the format.
for o.index < oi {
// tagcode for key and value properties are always a single byte
// because they have tags 1 and 2.
tagcode := o.buf[o.index]
o.index++
switch tagcode {
case p.mkeyprop.tagcode[0]:
if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil {
return err
}
case p.mvalprop.tagcode[0]:
if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil {
return err
}
default:
// TODO: Should we silently skip this instead?
return fmt.Errorf("proto: bad map data tag %d", raw[0])
}
}
keyelem, valelem := keyptr.Elem(), valptr.Elem()
if !keyelem.IsValid() || !valelem.IsValid() {
// We did not decode the key or the value in the map entry.
// Either way, it's an invalid map entry.
return fmt.Errorf("proto: bad map data: missing key/val")
}
v.SetMapIndex(keyelem, valelem)
return nil
}
// Decode a group.
func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error {
bas := structPointer_GetStructPointer(base, p.field)
if structPointer_IsNil(bas) {
// allocate new nested message
bas = toStructPointer(reflect.New(p.stype))
structPointer_SetStructPointer(base, p.field, bas)
}
return o.unmarshalType(p.stype, p.sprop, true, bas)
}
// Decode an embedded message.
func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) {
raw, e := o.DecodeRawBytes(false)
if e != nil {
return e
}
bas := structPointer_GetStructPointer(base, p.field)
if structPointer_IsNil(bas) {
// allocate new nested message
bas = toStructPointer(reflect.New(p.stype))
structPointer_SetStructPointer(base, p.field, bas)
}
// If the object can unmarshal itself, let it.
if p.isUnmarshaler {
iv := structPointer_Interface(bas, p.stype)
return iv.(Unmarshaler).Unmarshal(raw)
}
obuf := o.buf
oi := o.index
o.buf = raw
o.index = 0
err = o.unmarshalType(p.stype, p.sprop, false, bas)
o.buf = obuf
o.index = oi
return err
}
// Decode a slice of embedded messages.
func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error {
return o.dec_slice_struct(p, false, base)
}
// Decode a slice of embedded groups.
func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error {
return o.dec_slice_struct(p, true, base)
}
// Decode a slice of structs ([]*struct).
func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error {
v := reflect.New(p.stype)
bas := toStructPointer(v)
structPointer_StructPointerSlice(base, p.field).Append(bas)
if is_group {
err := o.unmarshalType(p.stype, p.sprop, is_group, bas)
return err
}
raw, err := o.DecodeRawBytes(false)
if err != nil {
return err
}
// If the object can unmarshal itself, let it.
if p.isUnmarshaler {
iv := v.Interface()
return iv.(Unmarshaler).Unmarshal(raw)
}
obuf := o.buf
oi := o.index
o.buf = raw
o.index = 0
err = o.unmarshalType(p.stype, p.sprop, is_group, bas)
o.buf = obuf
o.index = oi
return err
}

1325
vendor/github.com/golang/protobuf/proto/encode.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

276
vendor/github.com/golang/protobuf/proto/equal.go generated vendored Normal file
View File

@ -0,0 +1,276 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2011 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Protocol buffer comparison.
package proto
import (
"bytes"
"log"
"reflect"
"strings"
)
/*
Equal returns true iff protocol buffers a and b are equal.
The arguments must both be pointers to protocol buffer structs.
Equality is defined in this way:
- Two messages are equal iff they are the same type,
corresponding fields are equal, unknown field sets
are equal, and extensions sets are equal.
- Two set scalar fields are equal iff their values are equal.
If the fields are of a floating-point type, remember that
NaN != x for all x, including NaN. If the message is defined
in a proto3 .proto file, fields are not "set"; specifically,
zero length proto3 "bytes" fields are equal (nil == {}).
- Two repeated fields are equal iff their lengths are the same,
and their corresponding elements are equal (a "bytes" field,
although represented by []byte, is not a repeated field)
- Two unset fields are equal.
- Two unknown field sets are equal if their current
encoded state is equal.
- Two extension sets are equal iff they have corresponding
elements that are pairwise equal.
- Every other combination of things are not equal.
The return value is undefined if a and b are not protocol buffers.
*/
func Equal(a, b Message) bool {
if a == nil || b == nil {
return a == b
}
v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
if v1.Type() != v2.Type() {
return false
}
if v1.Kind() == reflect.Ptr {
if v1.IsNil() {
return v2.IsNil()
}
if v2.IsNil() {
return false
}
v1, v2 = v1.Elem(), v2.Elem()
}
if v1.Kind() != reflect.Struct {
return false
}
return equalStruct(v1, v2)
}
// v1 and v2 are known to have the same type.
func equalStruct(v1, v2 reflect.Value) bool {
sprop := GetProperties(v1.Type())
for i := 0; i < v1.NumField(); i++ {
f := v1.Type().Field(i)
if strings.HasPrefix(f.Name, "XXX_") {
continue
}
f1, f2 := v1.Field(i), v2.Field(i)
if f.Type.Kind() == reflect.Ptr {
if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
// both unset
continue
} else if n1 != n2 {
// set/unset mismatch
return false
}
b1, ok := f1.Interface().(raw)
if ok {
b2 := f2.Interface().(raw)
// RawMessage
if !bytes.Equal(b1.Bytes(), b2.Bytes()) {
return false
}
continue
}
f1, f2 = f1.Elem(), f2.Elem()
}
if !equalAny(f1, f2, sprop.Prop[i]) {
return false
}
}
if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
em2 := v2.FieldByName("XXX_extensions")
if !equalExtensions(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
return false
}
}
uf := v1.FieldByName("XXX_unrecognized")
if !uf.IsValid() {
return true
}
u1 := uf.Bytes()
u2 := v2.FieldByName("XXX_unrecognized").Bytes()
if !bytes.Equal(u1, u2) {
return false
}
return true
}
// v1 and v2 are known to have the same type.
// prop may be nil.
func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
if v1.Type() == protoMessageType {
m1, _ := v1.Interface().(Message)
m2, _ := v2.Interface().(Message)
return Equal(m1, m2)
}
switch v1.Kind() {
case reflect.Bool:
return v1.Bool() == v2.Bool()
case reflect.Float32, reflect.Float64:
return v1.Float() == v2.Float()
case reflect.Int32, reflect.Int64:
return v1.Int() == v2.Int()
case reflect.Interface:
// Probably a oneof field; compare the inner values.
n1, n2 := v1.IsNil(), v2.IsNil()
if n1 || n2 {
return n1 == n2
}
e1, e2 := v1.Elem(), v2.Elem()
if e1.Type() != e2.Type() {
return false
}
return equalAny(e1, e2, nil)
case reflect.Map:
if v1.Len() != v2.Len() {
return false
}
for _, key := range v1.MapKeys() {
val2 := v2.MapIndex(key)
if !val2.IsValid() {
// This key was not found in the second map.
return false
}
if !equalAny(v1.MapIndex(key), val2, nil) {
return false
}
}
return true
case reflect.Ptr:
return equalAny(v1.Elem(), v2.Elem(), prop)
case reflect.Slice:
if v1.Type().Elem().Kind() == reflect.Uint8 {
// short circuit: []byte
// Edge case: if this is in a proto3 message, a zero length
// bytes field is considered the zero value.
if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 {
return true
}
if v1.IsNil() != v2.IsNil() {
return false
}
return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
}
if v1.Len() != v2.Len() {
return false
}
for i := 0; i < v1.Len(); i++ {
if !equalAny(v1.Index(i), v2.Index(i), prop) {
return false
}
}
return true
case reflect.String:
return v1.Interface().(string) == v2.Interface().(string)
case reflect.Struct:
return equalStruct(v1, v2)
case reflect.Uint32, reflect.Uint64:
return v1.Uint() == v2.Uint()
}
// unknown type, so not a protocol buffer
log.Printf("proto: don't know how to compare %v", v1)
return false
}
// base is the struct type that the extensions are based on.
// em1 and em2 are extension maps.
func equalExtensions(base reflect.Type, em1, em2 map[int32]Extension) bool {
if len(em1) != len(em2) {
return false
}
for extNum, e1 := range em1 {
e2, ok := em2[extNum]
if !ok {
return false
}
m1, m2 := e1.value, e2.value
if m1 != nil && m2 != nil {
// Both are unencoded.
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
return false
}
continue
}
// At least one is encoded. To do a semantically correct comparison
// we need to unmarshal them first.
var desc *ExtensionDesc
if m := extensionMaps[base]; m != nil {
desc = m[extNum]
}
if desc == nil {
log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
continue
}
var err error
if m1 == nil {
m1, err = decodeExtension(e1.enc, desc)
}
if m2 == nil && err == nil {
m2, err = decodeExtension(e2.enc, desc)
}
if err != nil {
// The encoded form is invalid.
log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
return false
}
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
return false
}
}
return true
}

399
vendor/github.com/golang/protobuf/proto/extensions.go generated vendored Normal file
View File

@ -0,0 +1,399 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
/*
* Types and routines for supporting protocol buffer extensions.
*/
import (
"errors"
"fmt"
"reflect"
"strconv"
"sync"
)
// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
var ErrMissingExtension = errors.New("proto: missing extension")
// ExtensionRange represents a range of message extensions for a protocol buffer.
// Used in code generated by the protocol compiler.
type ExtensionRange struct {
Start, End int32 // both inclusive
}
// extendableProto is an interface implemented by any protocol buffer that may be extended.
type extendableProto interface {
Message
ExtensionRangeArray() []ExtensionRange
ExtensionMap() map[int32]Extension
}
var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem()
// ExtensionDesc represents an extension specification.
// Used in generated code from the protocol compiler.
type ExtensionDesc struct {
ExtendedType Message // nil pointer to the type that is being extended
ExtensionType interface{} // nil pointer to the extension type
Field int32 // field number
Name string // fully-qualified name of extension, for text formatting
Tag string // protobuf tag style
}
func (ed *ExtensionDesc) repeated() bool {
t := reflect.TypeOf(ed.ExtensionType)
return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
}
// Extension represents an extension in a message.
type Extension struct {
// When an extension is stored in a message using SetExtension
// only desc and value are set. When the message is marshaled
// enc will be set to the encoded form of the message.
//
// When a message is unmarshaled and contains extensions, each
// extension will have only enc set. When such an extension is
// accessed using GetExtension (or GetExtensions) desc and value
// will be set.
desc *ExtensionDesc
value interface{}
enc []byte
}
// SetRawExtension is for testing only.
func SetRawExtension(base extendableProto, id int32, b []byte) {
base.ExtensionMap()[id] = Extension{enc: b}
}
// isExtensionField returns true iff the given field number is in an extension range.
func isExtensionField(pb extendableProto, field int32) bool {
for _, er := range pb.ExtensionRangeArray() {
if er.Start <= field && field <= er.End {
return true
}
}
return false
}
// checkExtensionTypes checks that the given extension is valid for pb.
func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
// Check the extended type.
if a, b := reflect.TypeOf(pb), reflect.TypeOf(extension.ExtendedType); a != b {
return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String())
}
// Check the range.
if !isExtensionField(pb, extension.Field) {
return errors.New("proto: bad extension number; not in declared ranges")
}
return nil
}
// extPropKey is sufficient to uniquely identify an extension.
type extPropKey struct {
base reflect.Type
field int32
}
var extProp = struct {
sync.RWMutex
m map[extPropKey]*Properties
}{
m: make(map[extPropKey]*Properties),
}
func extensionProperties(ed *ExtensionDesc) *Properties {
key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}
extProp.RLock()
if prop, ok := extProp.m[key]; ok {
extProp.RUnlock()
return prop
}
extProp.RUnlock()
extProp.Lock()
defer extProp.Unlock()
// Check again.
if prop, ok := extProp.m[key]; ok {
return prop
}
prop := new(Properties)
prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil)
extProp.m[key] = prop
return prop
}
// encodeExtensionMap encodes any unmarshaled (unencoded) extensions in m.
func encodeExtensionMap(m map[int32]Extension) error {
for k, e := range m {
if e.value == nil || e.desc == nil {
// Extension is only in its encoded form.
continue
}
// We don't skip extensions that have an encoded form set,
// because the extension value may have been mutated after
// the last time this function was called.
et := reflect.TypeOf(e.desc.ExtensionType)
props := extensionProperties(e.desc)
p := NewBuffer(nil)
// If e.value has type T, the encoder expects a *struct{ X T }.
// Pass a *T with a zero field and hope it all works out.
x := reflect.New(et)
x.Elem().Set(reflect.ValueOf(e.value))
if err := props.enc(p, props, toStructPointer(x)); err != nil {
return err
}
e.enc = p.buf
m[k] = e
}
return nil
}
func sizeExtensionMap(m map[int32]Extension) (n int) {
for _, e := range m {
if e.value == nil || e.desc == nil {
// Extension is only in its encoded form.
n += len(e.enc)
continue
}
// We don't skip extensions that have an encoded form set,
// because the extension value may have been mutated after
// the last time this function was called.
et := reflect.TypeOf(e.desc.ExtensionType)
props := extensionProperties(e.desc)
// If e.value has type T, the encoder expects a *struct{ X T }.
// Pass a *T with a zero field and hope it all works out.
x := reflect.New(et)
x.Elem().Set(reflect.ValueOf(e.value))
n += props.size(props, toStructPointer(x))
}
return
}
// HasExtension returns whether the given extension is present in pb.
func HasExtension(pb extendableProto, extension *ExtensionDesc) bool {
// TODO: Check types, field numbers, etc.?
_, ok := pb.ExtensionMap()[extension.Field]
return ok
}
// ClearExtension removes the given extension from pb.
func ClearExtension(pb extendableProto, extension *ExtensionDesc) {
// TODO: Check types, field numbers, etc.?
delete(pb.ExtensionMap(), extension.Field)
}
// GetExtension parses and returns the given extension of pb.
// If the extension is not present and has no default value it returns ErrMissingExtension.
func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, error) {
if err := checkExtensionTypes(pb, extension); err != nil {
return nil, err
}
emap := pb.ExtensionMap()
e, ok := emap[extension.Field]
if !ok {
// defaultExtensionValue returns the default value or
// ErrMissingExtension if there is no default.
return defaultExtensionValue(extension)
}
if e.value != nil {
// Already decoded. Check the descriptor, though.
if e.desc != extension {
// This shouldn't happen. If it does, it means that
// GetExtension was called twice with two different
// descriptors with the same field number.
return nil, errors.New("proto: descriptor conflict")
}
return e.value, nil
}
v, err := decodeExtension(e.enc, extension)
if err != nil {
return nil, err
}
// Remember the decoded version and drop the encoded version.
// That way it is safe to mutate what we return.
e.value = v
e.desc = extension
e.enc = nil
emap[extension.Field] = e
return e.value, nil
}
// defaultExtensionValue returns the default value for extension.
// If no default for an extension is defined ErrMissingExtension is returned.
func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
t := reflect.TypeOf(extension.ExtensionType)
props := extensionProperties(extension)
sf, _, err := fieldDefault(t, props)
if err != nil {
return nil, err
}
if sf == nil || sf.value == nil {
// There is no default value.
return nil, ErrMissingExtension
}
if t.Kind() != reflect.Ptr {
// We do not need to return a Ptr, we can directly return sf.value.
return sf.value, nil
}
// We need to return an interface{} that is a pointer to sf.value.
value := reflect.New(t).Elem()
value.Set(reflect.New(value.Type().Elem()))
if sf.kind == reflect.Int32 {
// We may have an int32 or an enum, but the underlying data is int32.
// Since we can't set an int32 into a non int32 reflect.value directly
// set it as a int32.
value.Elem().SetInt(int64(sf.value.(int32)))
} else {
value.Elem().Set(reflect.ValueOf(sf.value))
}
return value.Interface(), nil
}
// decodeExtension decodes an extension encoded in b.
func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
o := NewBuffer(b)
t := reflect.TypeOf(extension.ExtensionType)
props := extensionProperties(extension)
// t is a pointer to a struct, pointer to basic type or a slice.
// Allocate a "field" to store the pointer/slice itself; the
// pointer/slice will be stored here. We pass
// the address of this field to props.dec.
// This passes a zero field and a *t and lets props.dec
// interpret it as a *struct{ x t }.
value := reflect.New(t).Elem()
for {
// Discard wire type and field number varint. It isn't needed.
if _, err := o.DecodeVarint(); err != nil {
return nil, err
}
if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil {
return nil, err
}
if o.index >= len(o.buf) {
break
}
}
return value.Interface(), nil
}
// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
// The returned slice has the same length as es; missing extensions will appear as nil elements.
func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
epb, ok := pb.(extendableProto)
if !ok {
err = errors.New("proto: not an extendable proto")
return
}
extensions = make([]interface{}, len(es))
for i, e := range es {
extensions[i], err = GetExtension(epb, e)
if err == ErrMissingExtension {
err = nil
}
if err != nil {
return
}
}
return
}
// SetExtension sets the specified extension of pb to the specified value.
func SetExtension(pb extendableProto, extension *ExtensionDesc, value interface{}) error {
if err := checkExtensionTypes(pb, extension); err != nil {
return err
}
typ := reflect.TypeOf(extension.ExtensionType)
if typ != reflect.TypeOf(value) {
return errors.New("proto: bad extension value type")
}
// nil extension values need to be caught early, because the
// encoder can't distinguish an ErrNil due to a nil extension
// from an ErrNil due to a missing field. Extensions are
// always optional, so the encoder would just swallow the error
// and drop all the extensions from the encoded message.
if reflect.ValueOf(value).IsNil() {
return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
}
pb.ExtensionMap()[extension.Field] = Extension{desc: extension, value: value}
return nil
}
// A global registry of extensions.
// The generated code will register the generated descriptors by calling RegisterExtension.
var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
// RegisterExtension is called from the generated code.
func RegisterExtension(desc *ExtensionDesc) {
st := reflect.TypeOf(desc.ExtendedType).Elem()
m := extensionMaps[st]
if m == nil {
m = make(map[int32]*ExtensionDesc)
extensionMaps[st] = m
}
if _, ok := m[desc.Field]; ok {
panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
}
m[desc.Field] = desc
}
// RegisteredExtensions returns a map of the registered extensions of a
// protocol buffer struct, indexed by the extension number.
// The argument pb should be a nil pointer to the struct type.
func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
return extensionMaps[reflect.TypeOf(pb).Elem()]
}

894
vendor/github.com/golang/protobuf/proto/lib.go generated vendored Normal file
View File

@ -0,0 +1,894 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/*
Package proto converts data structures to and from the wire format of
protocol buffers. It works in concert with the Go source code generated
for .proto files by the protocol compiler.
A summary of the properties of the protocol buffer interface
for a protocol buffer variable v:
- Names are turned from camel_case to CamelCase for export.
- There are no methods on v to set fields; just treat
them as structure fields.
- There are getters that return a field's value if set,
and return the field's default value if unset.
The getters work even if the receiver is a nil message.
- The zero value for a struct is its correct initialization state.
All desired fields must be set before marshaling.
- A Reset() method will restore a protobuf struct to its zero state.
- Non-repeated fields are pointers to the values; nil means unset.
That is, optional or required field int32 f becomes F *int32.
- Repeated fields are slices.
- Helper functions are available to aid the setting of fields.
msg.Foo = proto.String("hello") // set field
- Constants are defined to hold the default values of all fields that
have them. They have the form Default_StructName_FieldName.
Because the getter methods handle defaulted values,
direct use of these constants should be rare.
- Enums are given type names and maps from names to values.
Enum values are prefixed by the enclosing message's name, or by the
enum's type name if it is a top-level enum. Enum types have a String
method, and a Enum method to assist in message construction.
- Nested messages, groups and enums have type names prefixed with the name of
the surrounding message type.
- Extensions are given descriptor names that start with E_,
followed by an underscore-delimited list of the nested messages
that contain it (if any) followed by the CamelCased name of the
extension field itself. HasExtension, ClearExtension, GetExtension
and SetExtension are functions for manipulating extensions.
- Oneof field sets are given a single field in their message,
with distinguished wrapper types for each possible field value.
- Marshal and Unmarshal are functions to encode and decode the wire format.
When the .proto file specifies `syntax="proto3"`, there are some differences:
- Non-repeated fields of non-message type are values instead of pointers.
- Getters are only generated for message and oneof fields.
- Enum types do not get an Enum method.
The simplest way to describe this is to see an example.
Given file test.proto, containing
package example;
enum FOO { X = 17; }
message Test {
required string label = 1;
optional int32 type = 2 [default=77];
repeated int64 reps = 3;
optional group OptionalGroup = 4 {
required string RequiredField = 5;
}
oneof union {
int32 number = 6;
string name = 7;
}
}
The resulting file, test.pb.go, is:
package example
import proto "github.com/golang/protobuf/proto"
import math "math"
type FOO int32
const (
FOO_X FOO = 17
)
var FOO_name = map[int32]string{
17: "X",
}
var FOO_value = map[string]int32{
"X": 17,
}
func (x FOO) Enum() *FOO {
p := new(FOO)
*p = x
return p
}
func (x FOO) String() string {
return proto.EnumName(FOO_name, int32(x))
}
func (x *FOO) UnmarshalJSON(data []byte) error {
value, err := proto.UnmarshalJSONEnum(FOO_value, data)
if err != nil {
return err
}
*x = FOO(value)
return nil
}
type Test struct {
Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
// Types that are valid to be assigned to Union:
// *Test_Number
// *Test_Name
Union isTest_Union `protobuf_oneof:"union"`
XXX_unrecognized []byte `json:"-"`
}
func (m *Test) Reset() { *m = Test{} }
func (m *Test) String() string { return proto.CompactTextString(m) }
func (*Test) ProtoMessage() {}
type isTest_Union interface {
isTest_Union()
}
type Test_Number struct {
Number int32 `protobuf:"varint,6,opt,name=number"`
}
type Test_Name struct {
Name string `protobuf:"bytes,7,opt,name=name"`
}
func (*Test_Number) isTest_Union() {}
func (*Test_Name) isTest_Union() {}
func (m *Test) GetUnion() isTest_Union {
if m != nil {
return m.Union
}
return nil
}
const Default_Test_Type int32 = 77
func (m *Test) GetLabel() string {
if m != nil && m.Label != nil {
return *m.Label
}
return ""
}
func (m *Test) GetType() int32 {
if m != nil && m.Type != nil {
return *m.Type
}
return Default_Test_Type
}
func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
if m != nil {
return m.Optionalgroup
}
return nil
}
type Test_OptionalGroup struct {
RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
}
func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} }
func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
func (m *Test_OptionalGroup) GetRequiredField() string {
if m != nil && m.RequiredField != nil {
return *m.RequiredField
}
return ""
}
func (m *Test) GetNumber() int32 {
if x, ok := m.GetUnion().(*Test_Number); ok {
return x.Number
}
return 0
}
func (m *Test) GetName() string {
if x, ok := m.GetUnion().(*Test_Name); ok {
return x.Name
}
return ""
}
func init() {
proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
}
To create and play with a Test object:
package main
import (
"log"
"github.com/golang/protobuf/proto"
pb "./example.pb"
)
func main() {
test := &pb.Test{
Label: proto.String("hello"),
Type: proto.Int32(17),
Reps: []int64{1, 2, 3},
Optionalgroup: &pb.Test_OptionalGroup{
RequiredField: proto.String("good bye"),
},
Union: &pb.Test_Name{"fred"},
}
data, err := proto.Marshal(test)
if err != nil {
log.Fatal("marshaling error: ", err)
}
newTest := &pb.Test{}
err = proto.Unmarshal(data, newTest)
if err != nil {
log.Fatal("unmarshaling error: ", err)
}
// Now test and newTest contain the same data.
if test.GetLabel() != newTest.GetLabel() {
log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
}
// Use a type switch to determine which oneof was set.
switch u := test.Union.(type) {
case *pb.Test_Number: // u.Number contains the number.
case *pb.Test_Name: // u.Name contains the string.
}
// etc.
}
*/
package proto
import (
"encoding/json"
"fmt"
"log"
"reflect"
"sort"
"strconv"
"sync"
)
// Message is implemented by generated protocol buffer messages.
type Message interface {
Reset()
String() string
ProtoMessage()
}
// Stats records allocation details about the protocol buffer encoders
// and decoders. Useful for tuning the library itself.
type Stats struct {
Emalloc uint64 // mallocs in encode
Dmalloc uint64 // mallocs in decode
Encode uint64 // number of encodes
Decode uint64 // number of decodes
Chit uint64 // number of cache hits
Cmiss uint64 // number of cache misses
Size uint64 // number of sizes
}
// Set to true to enable stats collection.
const collectStats = false
var stats Stats
// GetStats returns a copy of the global Stats structure.
func GetStats() Stats { return stats }
// A Buffer is a buffer manager for marshaling and unmarshaling
// protocol buffers. It may be reused between invocations to
// reduce memory usage. It is not necessary to use a Buffer;
// the global functions Marshal and Unmarshal create a
// temporary Buffer and are fine for most applications.
type Buffer struct {
buf []byte // encode/decode byte stream
index int // write point
// pools of basic types to amortize allocation.
bools []bool
uint32s []uint32
uint64s []uint64
// extra pools, only used with pointer_reflect.go
int32s []int32
int64s []int64
float32s []float32
float64s []float64
}
// NewBuffer allocates a new Buffer and initializes its internal data to
// the contents of the argument slice.
func NewBuffer(e []byte) *Buffer {
return &Buffer{buf: e}
}
// Reset resets the Buffer, ready for marshaling a new protocol buffer.
func (p *Buffer) Reset() {
p.buf = p.buf[0:0] // for reading/writing
p.index = 0 // for reading
}
// SetBuf replaces the internal buffer with the slice,
// ready for unmarshaling the contents of the slice.
func (p *Buffer) SetBuf(s []byte) {
p.buf = s
p.index = 0
}
// Bytes returns the contents of the Buffer.
func (p *Buffer) Bytes() []byte { return p.buf }
/*
* Helper routines for simplifying the creation of optional fields of basic type.
*/
// Bool is a helper routine that allocates a new bool value
// to store v and returns a pointer to it.
func Bool(v bool) *bool {
return &v
}
// Int32 is a helper routine that allocates a new int32 value
// to store v and returns a pointer to it.
func Int32(v int32) *int32 {
return &v
}
// Int is a helper routine that allocates a new int32 value
// to store v and returns a pointer to it, but unlike Int32
// its argument value is an int.
func Int(v int) *int32 {
p := new(int32)
*p = int32(v)
return p
}
// Int64 is a helper routine that allocates a new int64 value
// to store v and returns a pointer to it.
func Int64(v int64) *int64 {
return &v
}
// Float32 is a helper routine that allocates a new float32 value
// to store v and returns a pointer to it.
func Float32(v float32) *float32 {
return &v
}
// Float64 is a helper routine that allocates a new float64 value
// to store v and returns a pointer to it.
func Float64(v float64) *float64 {
return &v
}
// Uint32 is a helper routine that allocates a new uint32 value
// to store v and returns a pointer to it.
func Uint32(v uint32) *uint32 {
return &v
}
// Uint64 is a helper routine that allocates a new uint64 value
// to store v and returns a pointer to it.
func Uint64(v uint64) *uint64 {
return &v
}
// String is a helper routine that allocates a new string value
// to store v and returns a pointer to it.
func String(v string) *string {
return &v
}
// EnumName is a helper function to simplify printing protocol buffer enums
// by name. Given an enum map and a value, it returns a useful string.
func EnumName(m map[int32]string, v int32) string {
s, ok := m[v]
if ok {
return s
}
return strconv.Itoa(int(v))
}
// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
// from their JSON-encoded representation. Given a map from the enum's symbolic
// names to its int values, and a byte buffer containing the JSON-encoded
// value, it returns an int32 that can be cast to the enum type by the caller.
//
// The function can deal with both JSON representations, numeric and symbolic.
func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
if data[0] == '"' {
// New style: enums are strings.
var repr string
if err := json.Unmarshal(data, &repr); err != nil {
return -1, err
}
val, ok := m[repr]
if !ok {
return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
}
return val, nil
}
// Old style: enums are ints.
var val int32
if err := json.Unmarshal(data, &val); err != nil {
return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
}
return val, nil
}
// DebugPrint dumps the encoded data in b in a debugging format with a header
// including the string s. Used in testing but made available for general debugging.
func (p *Buffer) DebugPrint(s string, b []byte) {
var u uint64
obuf := p.buf
index := p.index
p.buf = b
p.index = 0
depth := 0
fmt.Printf("\n--- %s ---\n", s)
out:
for {
for i := 0; i < depth; i++ {
fmt.Print(" ")
}
index := p.index
if index == len(p.buf) {
break
}
op, err := p.DecodeVarint()
if err != nil {
fmt.Printf("%3d: fetching op err %v\n", index, err)
break out
}
tag := op >> 3
wire := op & 7
switch wire {
default:
fmt.Printf("%3d: t=%3d unknown wire=%d\n",
index, tag, wire)
break out
case WireBytes:
var r []byte
r, err = p.DecodeRawBytes(false)
if err != nil {
break out
}
fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
if len(r) <= 6 {
for i := 0; i < len(r); i++ {
fmt.Printf(" %.2x", r[i])
}
} else {
for i := 0; i < 3; i++ {
fmt.Printf(" %.2x", r[i])
}
fmt.Printf(" ..")
for i := len(r) - 3; i < len(r); i++ {
fmt.Printf(" %.2x", r[i])
}
}
fmt.Printf("\n")
case WireFixed32:
u, err = p.DecodeFixed32()
if err != nil {
fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
break out
}
fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
case WireFixed64:
u, err = p.DecodeFixed64()
if err != nil {
fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
break out
}
fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
case WireVarint:
u, err = p.DecodeVarint()
if err != nil {
fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
break out
}
fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
case WireStartGroup:
fmt.Printf("%3d: t=%3d start\n", index, tag)
depth++
case WireEndGroup:
depth--
fmt.Printf("%3d: t=%3d end\n", index, tag)
}
}
if depth != 0 {
fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
}
fmt.Printf("\n")
p.buf = obuf
p.index = index
}
// SetDefaults sets unset protocol buffer fields to their default values.
// It only modifies fields that are both unset and have defined defaults.
// It recursively sets default values in any non-nil sub-messages.
func SetDefaults(pb Message) {
setDefaults(reflect.ValueOf(pb), true, false)
}
// v is a pointer to a struct.
func setDefaults(v reflect.Value, recur, zeros bool) {
v = v.Elem()
defaultMu.RLock()
dm, ok := defaults[v.Type()]
defaultMu.RUnlock()
if !ok {
dm = buildDefaultMessage(v.Type())
defaultMu.Lock()
defaults[v.Type()] = dm
defaultMu.Unlock()
}
for _, sf := range dm.scalars {
f := v.Field(sf.index)
if !f.IsNil() {
// field already set
continue
}
dv := sf.value
if dv == nil && !zeros {
// no explicit default, and don't want to set zeros
continue
}
fptr := f.Addr().Interface() // **T
// TODO: Consider batching the allocations we do here.
switch sf.kind {
case reflect.Bool:
b := new(bool)
if dv != nil {
*b = dv.(bool)
}
*(fptr.(**bool)) = b
case reflect.Float32:
f := new(float32)
if dv != nil {
*f = dv.(float32)
}
*(fptr.(**float32)) = f
case reflect.Float64:
f := new(float64)
if dv != nil {
*f = dv.(float64)
}
*(fptr.(**float64)) = f
case reflect.Int32:
// might be an enum
if ft := f.Type(); ft != int32PtrType {
// enum
f.Set(reflect.New(ft.Elem()))
if dv != nil {
f.Elem().SetInt(int64(dv.(int32)))
}
} else {
// int32 field
i := new(int32)
if dv != nil {
*i = dv.(int32)
}
*(fptr.(**int32)) = i
}
case reflect.Int64:
i := new(int64)
if dv != nil {
*i = dv.(int64)
}
*(fptr.(**int64)) = i
case reflect.String:
s := new(string)
if dv != nil {
*s = dv.(string)
}
*(fptr.(**string)) = s
case reflect.Uint8:
// exceptional case: []byte
var b []byte
if dv != nil {
db := dv.([]byte)
b = make([]byte, len(db))
copy(b, db)
} else {
b = []byte{}
}
*(fptr.(*[]byte)) = b
case reflect.Uint32:
u := new(uint32)
if dv != nil {
*u = dv.(uint32)
}
*(fptr.(**uint32)) = u
case reflect.Uint64:
u := new(uint64)
if dv != nil {
*u = dv.(uint64)
}
*(fptr.(**uint64)) = u
default:
log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
}
}
for _, ni := range dm.nested {
f := v.Field(ni)
// f is *T or []*T or map[T]*T
switch f.Kind() {
case reflect.Ptr:
if f.IsNil() {
continue
}
setDefaults(f, recur, zeros)
case reflect.Slice:
for i := 0; i < f.Len(); i++ {
e := f.Index(i)
if e.IsNil() {
continue
}
setDefaults(e, recur, zeros)
}
case reflect.Map:
for _, k := range f.MapKeys() {
e := f.MapIndex(k)
if e.IsNil() {
continue
}
setDefaults(e, recur, zeros)
}
}
}
}
var (
// defaults maps a protocol buffer struct type to a slice of the fields,
// with its scalar fields set to their proto-declared non-zero default values.
defaultMu sync.RWMutex
defaults = make(map[reflect.Type]defaultMessage)
int32PtrType = reflect.TypeOf((*int32)(nil))
)
// defaultMessage represents information about the default values of a message.
type defaultMessage struct {
scalars []scalarField
nested []int // struct field index of nested messages
}
type scalarField struct {
index int // struct field index
kind reflect.Kind // element type (the T in *T or []T)
value interface{} // the proto-declared default value, or nil
}
// t is a struct type.
func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
sprop := GetProperties(t)
for _, prop := range sprop.Prop {
fi, ok := sprop.decoderTags.get(prop.Tag)
if !ok {
// XXX_unrecognized
continue
}
ft := t.Field(fi).Type
sf, nested, err := fieldDefault(ft, prop)
switch {
case err != nil:
log.Print(err)
case nested:
dm.nested = append(dm.nested, fi)
case sf != nil:
sf.index = fi
dm.scalars = append(dm.scalars, *sf)
}
}
return dm
}
// fieldDefault returns the scalarField for field type ft.
// sf will be nil if the field can not have a default.
// nestedMessage will be true if this is a nested message.
// Note that sf.index is not set on return.
func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
var canHaveDefault bool
switch ft.Kind() {
case reflect.Ptr:
if ft.Elem().Kind() == reflect.Struct {
nestedMessage = true
} else {
canHaveDefault = true // proto2 scalar field
}
case reflect.Slice:
switch ft.Elem().Kind() {
case reflect.Ptr:
nestedMessage = true // repeated message
case reflect.Uint8:
canHaveDefault = true // bytes field
}
case reflect.Map:
if ft.Elem().Kind() == reflect.Ptr {
nestedMessage = true // map with message values
}
}
if !canHaveDefault {
if nestedMessage {
return nil, true, nil
}
return nil, false, nil
}
// We now know that ft is a pointer or slice.
sf = &scalarField{kind: ft.Elem().Kind()}
// scalar fields without defaults
if !prop.HasDefault {
return sf, false, nil
}
// a scalar field: either *T or []byte
switch ft.Elem().Kind() {
case reflect.Bool:
x, err := strconv.ParseBool(prop.Default)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
}
sf.value = x
case reflect.Float32:
x, err := strconv.ParseFloat(prop.Default, 32)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
}
sf.value = float32(x)
case reflect.Float64:
x, err := strconv.ParseFloat(prop.Default, 64)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
}
sf.value = x
case reflect.Int32:
x, err := strconv.ParseInt(prop.Default, 10, 32)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
}
sf.value = int32(x)
case reflect.Int64:
x, err := strconv.ParseInt(prop.Default, 10, 64)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
}
sf.value = x
case reflect.String:
sf.value = prop.Default
case reflect.Uint8:
// []byte (not *uint8)
sf.value = []byte(prop.Default)
case reflect.Uint32:
x, err := strconv.ParseUint(prop.Default, 10, 32)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
}
sf.value = uint32(x)
case reflect.Uint64:
x, err := strconv.ParseUint(prop.Default, 10, 64)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
}
sf.value = x
default:
return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
}
return sf, false, nil
}
// Map fields may have key types of non-float scalars, strings and enums.
// The easiest way to sort them in some deterministic order is to use fmt.
// If this turns out to be inefficient we can always consider other options,
// such as doing a Schwartzian transform.
func mapKeys(vs []reflect.Value) sort.Interface {
s := mapKeySorter{
vs: vs,
// default Less function: textual comparison
less: func(a, b reflect.Value) bool {
return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface())
},
}
// Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps;
// numeric keys are sorted numerically.
if len(vs) == 0 {
return s
}
switch vs[0].Kind() {
case reflect.Int32, reflect.Int64:
s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
case reflect.Uint32, reflect.Uint64:
s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
}
return s
}
type mapKeySorter struct {
vs []reflect.Value
less func(a, b reflect.Value) bool
}
func (s mapKeySorter) Len() int { return len(s.vs) }
func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }
func (s mapKeySorter) Less(i, j int) bool {
return s.less(s.vs[i], s.vs[j])
}
// isProto3Zero reports whether v is a zero proto3 value.
func isProto3Zero(v reflect.Value) bool {
switch v.Kind() {
case reflect.Bool:
return !v.Bool()
case reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Uint32, reflect.Uint64:
return v.Uint() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
case reflect.String:
return v.String() == ""
}
return false
}
// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
// to assert that that code is compatible with this version of the proto package.
const ProtoPackageIsVersion1 = true

280
vendor/github.com/golang/protobuf/proto/message_set.go generated vendored Normal file
View File

@ -0,0 +1,280 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
/*
* Support for message sets.
*/
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"reflect"
"sort"
)
// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
// A message type ID is required for storing a protocol buffer in a message set.
var errNoMessageTypeID = errors.New("proto does not have a message type ID")
// The first two types (_MessageSet_Item and messageSet)
// model what the protocol compiler produces for the following protocol message:
// message MessageSet {
// repeated group Item = 1 {
// required int32 type_id = 2;
// required string message = 3;
// };
// }
// That is the MessageSet wire format. We can't use a proto to generate these
// because that would introduce a circular dependency between it and this package.
type _MessageSet_Item struct {
TypeId *int32 `protobuf:"varint,2,req,name=type_id"`
Message []byte `protobuf:"bytes,3,req,name=message"`
}
type messageSet struct {
Item []*_MessageSet_Item `protobuf:"group,1,rep"`
XXX_unrecognized []byte
// TODO: caching?
}
// Make sure messageSet is a Message.
var _ Message = (*messageSet)(nil)
// messageTypeIder is an interface satisfied by a protocol buffer type
// that may be stored in a MessageSet.
type messageTypeIder interface {
MessageTypeId() int32
}
func (ms *messageSet) find(pb Message) *_MessageSet_Item {
mti, ok := pb.(messageTypeIder)
if !ok {
return nil
}
id := mti.MessageTypeId()
for _, item := range ms.Item {
if *item.TypeId == id {
return item
}
}
return nil
}
func (ms *messageSet) Has(pb Message) bool {
if ms.find(pb) != nil {
return true
}
return false
}
func (ms *messageSet) Unmarshal(pb Message) error {
if item := ms.find(pb); item != nil {
return Unmarshal(item.Message, pb)
}
if _, ok := pb.(messageTypeIder); !ok {
return errNoMessageTypeID
}
return nil // TODO: return error instead?
}
func (ms *messageSet) Marshal(pb Message) error {
msg, err := Marshal(pb)
if err != nil {
return err
}
if item := ms.find(pb); item != nil {
// reuse existing item
item.Message = msg
return nil
}
mti, ok := pb.(messageTypeIder)
if !ok {
return errNoMessageTypeID
}
mtid := mti.MessageTypeId()
ms.Item = append(ms.Item, &_MessageSet_Item{
TypeId: &mtid,
Message: msg,
})
return nil
}
func (ms *messageSet) Reset() { *ms = messageSet{} }
func (ms *messageSet) String() string { return CompactTextString(ms) }
func (*messageSet) ProtoMessage() {}
// Support for the message_set_wire_format message option.
func skipVarint(buf []byte) []byte {
i := 0
for ; buf[i]&0x80 != 0; i++ {
}
return buf[i+1:]
}
// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
func MarshalMessageSet(m map[int32]Extension) ([]byte, error) {
if err := encodeExtensionMap(m); err != nil {
return nil, err
}
// Sort extension IDs to provide a deterministic encoding.
// See also enc_map in encode.go.
ids := make([]int, 0, len(m))
for id := range m {
ids = append(ids, int(id))
}
sort.Ints(ids)
ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))}
for _, id := range ids {
e := m[int32(id)]
// Remove the wire type and field number varint, as well as the length varint.
msg := skipVarint(skipVarint(e.enc))
ms.Item = append(ms.Item, &_MessageSet_Item{
TypeId: Int32(int32(id)),
Message: msg,
})
}
return Marshal(ms)
}
// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
func UnmarshalMessageSet(buf []byte, m map[int32]Extension) error {
ms := new(messageSet)
if err := Unmarshal(buf, ms); err != nil {
return err
}
for _, item := range ms.Item {
id := *item.TypeId
msg := item.Message
// Restore wire type and field number varint, plus length varint.
// Be careful to preserve duplicate items.
b := EncodeVarint(uint64(id)<<3 | WireBytes)
if ext, ok := m[id]; ok {
// Existing data; rip off the tag and length varint
// so we join the new data correctly.
// We can assume that ext.enc is set because we are unmarshaling.
o := ext.enc[len(b):] // skip wire type and field number
_, n := DecodeVarint(o) // calculate length of length varint
o = o[n:] // skip length varint
msg = append(o, msg...) // join old data and new data
}
b = append(b, EncodeVarint(uint64(len(msg)))...)
b = append(b, msg...)
m[id] = Extension{enc: b}
}
return nil
}
// MarshalMessageSetJSON encodes the extension map represented by m in JSON format.
// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
func MarshalMessageSetJSON(m map[int32]Extension) ([]byte, error) {
var b bytes.Buffer
b.WriteByte('{')
// Process the map in key order for deterministic output.
ids := make([]int32, 0, len(m))
for id := range m {
ids = append(ids, id)
}
sort.Sort(int32Slice(ids)) // int32Slice defined in text.go
for i, id := range ids {
ext := m[id]
if i > 0 {
b.WriteByte(',')
}
msd, ok := messageSetMap[id]
if !ok {
// Unknown type; we can't render it, so skip it.
continue
}
fmt.Fprintf(&b, `"[%s]":`, msd.name)
x := ext.value
if x == nil {
x = reflect.New(msd.t.Elem()).Interface()
if err := Unmarshal(ext.enc, x.(Message)); err != nil {
return nil, err
}
}
d, err := json.Marshal(x)
if err != nil {
return nil, err
}
b.Write(d)
}
b.WriteByte('}')
return b.Bytes(), nil
}
// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.
// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
func UnmarshalMessageSetJSON(buf []byte, m map[int32]Extension) error {
// Common-case fast path.
if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) {
return nil
}
// This is fairly tricky, and it's not clear that it is needed.
return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented")
}
// A global registry of types that can be used in a MessageSet.
var messageSetMap = make(map[int32]messageSetDesc)
type messageSetDesc struct {
t reflect.Type // pointer to struct
name string
}
// RegisterMessageSetType is called from the generated code.
func RegisterMessageSetType(m Message, fieldNum int32, name string) {
messageSetMap[fieldNum] = messageSetDesc{
t: reflect.TypeOf(m),
name: name,
}
}

View File

@ -0,0 +1,479 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2012 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// +build appengine
// This file contains an implementation of proto field accesses using package reflect.
// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
// be used on App Engine.
package proto
import (
"math"
"reflect"
)
// A structPointer is a pointer to a struct.
type structPointer struct {
v reflect.Value
}
// toStructPointer returns a structPointer equivalent to the given reflect value.
// The reflect value must itself be a pointer to a struct.
func toStructPointer(v reflect.Value) structPointer {
return structPointer{v}
}
// IsNil reports whether p is nil.
func structPointer_IsNil(p structPointer) bool {
return p.v.IsNil()
}
// Interface returns the struct pointer as an interface value.
func structPointer_Interface(p structPointer, _ reflect.Type) interface{} {
return p.v.Interface()
}
// A field identifies a field in a struct, accessible from a structPointer.
// In this implementation, a field is identified by the sequence of field indices
// passed to reflect's FieldByIndex.
type field []int
// toField returns a field equivalent to the given reflect field.
func toField(f *reflect.StructField) field {
return f.Index
}
// invalidField is an invalid field identifier.
var invalidField = field(nil)
// IsValid reports whether the field identifier is valid.
func (f field) IsValid() bool { return f != nil }
// field returns the given field in the struct as a reflect value.
func structPointer_field(p structPointer, f field) reflect.Value {
// Special case: an extension map entry with a value of type T
// passes a *T to the struct-handling code with a zero field,
// expecting that it will be treated as equivalent to *struct{ X T },
// which has the same memory layout. We have to handle that case
// specially, because reflect will panic if we call FieldByIndex on a
// non-struct.
if f == nil {
return p.v.Elem()
}
return p.v.Elem().FieldByIndex(f)
}
// ifield returns the given field in the struct as an interface value.
func structPointer_ifield(p structPointer, f field) interface{} {
return structPointer_field(p, f).Addr().Interface()
}
// Bytes returns the address of a []byte field in the struct.
func structPointer_Bytes(p structPointer, f field) *[]byte {
return structPointer_ifield(p, f).(*[]byte)
}
// BytesSlice returns the address of a [][]byte field in the struct.
func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
return structPointer_ifield(p, f).(*[][]byte)
}
// Bool returns the address of a *bool field in the struct.
func structPointer_Bool(p structPointer, f field) **bool {
return structPointer_ifield(p, f).(**bool)
}
// BoolVal returns the address of a bool field in the struct.
func structPointer_BoolVal(p structPointer, f field) *bool {
return structPointer_ifield(p, f).(*bool)
}
// BoolSlice returns the address of a []bool field in the struct.
func structPointer_BoolSlice(p structPointer, f field) *[]bool {
return structPointer_ifield(p, f).(*[]bool)
}
// String returns the address of a *string field in the struct.
func structPointer_String(p structPointer, f field) **string {
return structPointer_ifield(p, f).(**string)
}
// StringVal returns the address of a string field in the struct.
func structPointer_StringVal(p structPointer, f field) *string {
return structPointer_ifield(p, f).(*string)
}
// StringSlice returns the address of a []string field in the struct.
func structPointer_StringSlice(p structPointer, f field) *[]string {
return structPointer_ifield(p, f).(*[]string)
}
// ExtMap returns the address of an extension map field in the struct.
func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
return structPointer_ifield(p, f).(*map[int32]Extension)
}
// NewAt returns the reflect.Value for a pointer to a field in the struct.
func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
return structPointer_field(p, f).Addr()
}
// SetStructPointer writes a *struct field in the struct.
func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
structPointer_field(p, f).Set(q.v)
}
// GetStructPointer reads a *struct field in the struct.
func structPointer_GetStructPointer(p structPointer, f field) structPointer {
return structPointer{structPointer_field(p, f)}
}
// StructPointerSlice the address of a []*struct field in the struct.
func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice {
return structPointerSlice{structPointer_field(p, f)}
}
// A structPointerSlice represents the address of a slice of pointers to structs
// (themselves messages or groups). That is, v.Type() is *[]*struct{...}.
type structPointerSlice struct {
v reflect.Value
}
func (p structPointerSlice) Len() int { return p.v.Len() }
func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} }
func (p structPointerSlice) Append(q structPointer) {
p.v.Set(reflect.Append(p.v, q.v))
}
var (
int32Type = reflect.TypeOf(int32(0))
uint32Type = reflect.TypeOf(uint32(0))
float32Type = reflect.TypeOf(float32(0))
int64Type = reflect.TypeOf(int64(0))
uint64Type = reflect.TypeOf(uint64(0))
float64Type = reflect.TypeOf(float64(0))
)
// A word32 represents a field of type *int32, *uint32, *float32, or *enum.
// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable.
type word32 struct {
v reflect.Value
}
// IsNil reports whether p is nil.
func word32_IsNil(p word32) bool {
return p.v.IsNil()
}
// Set sets p to point at a newly allocated word with bits set to x.
func word32_Set(p word32, o *Buffer, x uint32) {
t := p.v.Type().Elem()
switch t {
case int32Type:
if len(o.int32s) == 0 {
o.int32s = make([]int32, uint32PoolSize)
}
o.int32s[0] = int32(x)
p.v.Set(reflect.ValueOf(&o.int32s[0]))
o.int32s = o.int32s[1:]
return
case uint32Type:
if len(o.uint32s) == 0 {
o.uint32s = make([]uint32, uint32PoolSize)
}
o.uint32s[0] = x
p.v.Set(reflect.ValueOf(&o.uint32s[0]))
o.uint32s = o.uint32s[1:]
return
case float32Type:
if len(o.float32s) == 0 {
o.float32s = make([]float32, uint32PoolSize)
}
o.float32s[0] = math.Float32frombits(x)
p.v.Set(reflect.ValueOf(&o.float32s[0]))
o.float32s = o.float32s[1:]
return
}
// must be enum
p.v.Set(reflect.New(t))
p.v.Elem().SetInt(int64(int32(x)))
}
// Get gets the bits pointed at by p, as a uint32.
func word32_Get(p word32) uint32 {
elem := p.v.Elem()
switch elem.Kind() {
case reflect.Int32:
return uint32(elem.Int())
case reflect.Uint32:
return uint32(elem.Uint())
case reflect.Float32:
return math.Float32bits(float32(elem.Float()))
}
panic("unreachable")
}
// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct.
func structPointer_Word32(p structPointer, f field) word32 {
return word32{structPointer_field(p, f)}
}
// A word32Val represents a field of type int32, uint32, float32, or enum.
// That is, v.Type() is int32, uint32, float32, or enum and v is assignable.
type word32Val struct {
v reflect.Value
}
// Set sets *p to x.
func word32Val_Set(p word32Val, x uint32) {
switch p.v.Type() {
case int32Type:
p.v.SetInt(int64(x))
return
case uint32Type:
p.v.SetUint(uint64(x))
return
case float32Type:
p.v.SetFloat(float64(math.Float32frombits(x)))
return
}
// must be enum
p.v.SetInt(int64(int32(x)))
}
// Get gets the bits pointed at by p, as a uint32.
func word32Val_Get(p word32Val) uint32 {
elem := p.v
switch elem.Kind() {
case reflect.Int32:
return uint32(elem.Int())
case reflect.Uint32:
return uint32(elem.Uint())
case reflect.Float32:
return math.Float32bits(float32(elem.Float()))
}
panic("unreachable")
}
// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct.
func structPointer_Word32Val(p structPointer, f field) word32Val {
return word32Val{structPointer_field(p, f)}
}
// A word32Slice is a slice of 32-bit values.
// That is, v.Type() is []int32, []uint32, []float32, or []enum.
type word32Slice struct {
v reflect.Value
}
func (p word32Slice) Append(x uint32) {
n, m := p.v.Len(), p.v.Cap()
if n < m {
p.v.SetLen(n + 1)
} else {
t := p.v.Type().Elem()
p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
}
elem := p.v.Index(n)
switch elem.Kind() {
case reflect.Int32:
elem.SetInt(int64(int32(x)))
case reflect.Uint32:
elem.SetUint(uint64(x))
case reflect.Float32:
elem.SetFloat(float64(math.Float32frombits(x)))
}
}
func (p word32Slice) Len() int {
return p.v.Len()
}
func (p word32Slice) Index(i int) uint32 {
elem := p.v.Index(i)
switch elem.Kind() {
case reflect.Int32:
return uint32(elem.Int())
case reflect.Uint32:
return uint32(elem.Uint())
case reflect.Float32:
return math.Float32bits(float32(elem.Float()))
}
panic("unreachable")
}
// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct.
func structPointer_Word32Slice(p structPointer, f field) word32Slice {
return word32Slice{structPointer_field(p, f)}
}
// word64 is like word32 but for 64-bit values.
type word64 struct {
v reflect.Value
}
func word64_Set(p word64, o *Buffer, x uint64) {
t := p.v.Type().Elem()
switch t {
case int64Type:
if len(o.int64s) == 0 {
o.int64s = make([]int64, uint64PoolSize)
}
o.int64s[0] = int64(x)
p.v.Set(reflect.ValueOf(&o.int64s[0]))
o.int64s = o.int64s[1:]
return
case uint64Type:
if len(o.uint64s) == 0 {
o.uint64s = make([]uint64, uint64PoolSize)
}
o.uint64s[0] = x
p.v.Set(reflect.ValueOf(&o.uint64s[0]))
o.uint64s = o.uint64s[1:]
return
case float64Type:
if len(o.float64s) == 0 {
o.float64s = make([]float64, uint64PoolSize)
}
o.float64s[0] = math.Float64frombits(x)
p.v.Set(reflect.ValueOf(&o.float64s[0]))
o.float64s = o.float64s[1:]
return
}
panic("unreachable")
}
func word64_IsNil(p word64) bool {
return p.v.IsNil()
}
func word64_Get(p word64) uint64 {
elem := p.v.Elem()
switch elem.Kind() {
case reflect.Int64:
return uint64(elem.Int())
case reflect.Uint64:
return elem.Uint()
case reflect.Float64:
return math.Float64bits(elem.Float())
}
panic("unreachable")
}
func structPointer_Word64(p structPointer, f field) word64 {
return word64{structPointer_field(p, f)}
}
// word64Val is like word32Val but for 64-bit values.
type word64Val struct {
v reflect.Value
}
func word64Val_Set(p word64Val, o *Buffer, x uint64) {
switch p.v.Type() {
case int64Type:
p.v.SetInt(int64(x))
return
case uint64Type:
p.v.SetUint(x)
return
case float64Type:
p.v.SetFloat(math.Float64frombits(x))
return
}
panic("unreachable")
}
func word64Val_Get(p word64Val) uint64 {
elem := p.v
switch elem.Kind() {
case reflect.Int64:
return uint64(elem.Int())
case reflect.Uint64:
return elem.Uint()
case reflect.Float64:
return math.Float64bits(elem.Float())
}
panic("unreachable")
}
func structPointer_Word64Val(p structPointer, f field) word64Val {
return word64Val{structPointer_field(p, f)}
}
type word64Slice struct {
v reflect.Value
}
func (p word64Slice) Append(x uint64) {
n, m := p.v.Len(), p.v.Cap()
if n < m {
p.v.SetLen(n + 1)
} else {
t := p.v.Type().Elem()
p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
}
elem := p.v.Index(n)
switch elem.Kind() {
case reflect.Int64:
elem.SetInt(int64(int64(x)))
case reflect.Uint64:
elem.SetUint(uint64(x))
case reflect.Float64:
elem.SetFloat(float64(math.Float64frombits(x)))
}
}
func (p word64Slice) Len() int {
return p.v.Len()
}
func (p word64Slice) Index(i int) uint64 {
elem := p.v.Index(i)
switch elem.Kind() {
case reflect.Int64:
return uint64(elem.Int())
case reflect.Uint64:
return uint64(elem.Uint())
case reflect.Float64:
return math.Float64bits(float64(elem.Float()))
}
panic("unreachable")
}
func structPointer_Word64Slice(p structPointer, f field) word64Slice {
return word64Slice{structPointer_field(p, f)}
}

View File

@ -0,0 +1,266 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2012 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// +build !appengine
// This file contains the implementation of the proto field accesses using package unsafe.
package proto
import (
"reflect"
"unsafe"
)
// NOTE: These type_Foo functions would more idiomatically be methods,
// but Go does not allow methods on pointer types, and we must preserve
// some pointer type for the garbage collector. We use these
// funcs with clunky names as our poor approximation to methods.
//
// An alternative would be
// type structPointer struct { p unsafe.Pointer }
// but that does not registerize as well.
// A structPointer is a pointer to a struct.
type structPointer unsafe.Pointer
// toStructPointer returns a structPointer equivalent to the given reflect value.
func toStructPointer(v reflect.Value) structPointer {
return structPointer(unsafe.Pointer(v.Pointer()))
}
// IsNil reports whether p is nil.
func structPointer_IsNil(p structPointer) bool {
return p == nil
}
// Interface returns the struct pointer, assumed to have element type t,
// as an interface value.
func structPointer_Interface(p structPointer, t reflect.Type) interface{} {
return reflect.NewAt(t, unsafe.Pointer(p)).Interface()
}
// A field identifies a field in a struct, accessible from a structPointer.
// In this implementation, a field is identified by its byte offset from the start of the struct.
type field uintptr
// toField returns a field equivalent to the given reflect field.
func toField(f *reflect.StructField) field {
return field(f.Offset)
}
// invalidField is an invalid field identifier.
const invalidField = ^field(0)
// IsValid reports whether the field identifier is valid.
func (f field) IsValid() bool {
return f != ^field(0)
}
// Bytes returns the address of a []byte field in the struct.
func structPointer_Bytes(p structPointer, f field) *[]byte {
return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// BytesSlice returns the address of a [][]byte field in the struct.
func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// Bool returns the address of a *bool field in the struct.
func structPointer_Bool(p structPointer, f field) **bool {
return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// BoolVal returns the address of a bool field in the struct.
func structPointer_BoolVal(p structPointer, f field) *bool {
return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// BoolSlice returns the address of a []bool field in the struct.
func structPointer_BoolSlice(p structPointer, f field) *[]bool {
return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// String returns the address of a *string field in the struct.
func structPointer_String(p structPointer, f field) **string {
return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// StringVal returns the address of a string field in the struct.
func structPointer_StringVal(p structPointer, f field) *string {
return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// StringSlice returns the address of a []string field in the struct.
func structPointer_StringSlice(p structPointer, f field) *[]string {
return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// ExtMap returns the address of an extension map field in the struct.
func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// NewAt returns the reflect.Value for a pointer to a field in the struct.
func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f)))
}
// SetStructPointer writes a *struct field in the struct.
func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
*(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q
}
// GetStructPointer reads a *struct field in the struct.
func structPointer_GetStructPointer(p structPointer, f field) structPointer {
return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// StructPointerSlice the address of a []*struct field in the struct.
func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice {
return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups).
type structPointerSlice []structPointer
func (v *structPointerSlice) Len() int { return len(*v) }
func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] }
func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) }
// A word32 is the address of a "pointer to 32-bit value" field.
type word32 **uint32
// IsNil reports whether *v is nil.
func word32_IsNil(p word32) bool {
return *p == nil
}
// Set sets *v to point at a newly allocated word set to x.
func word32_Set(p word32, o *Buffer, x uint32) {
if len(o.uint32s) == 0 {
o.uint32s = make([]uint32, uint32PoolSize)
}
o.uint32s[0] = x
*p = &o.uint32s[0]
o.uint32s = o.uint32s[1:]
}
// Get gets the value pointed at by *v.
func word32_Get(p word32) uint32 {
return **p
}
// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
func structPointer_Word32(p structPointer, f field) word32 {
return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
}
// A word32Val is the address of a 32-bit value field.
type word32Val *uint32
// Set sets *p to x.
func word32Val_Set(p word32Val, x uint32) {
*p = x
}
// Get gets the value pointed at by p.
func word32Val_Get(p word32Val) uint32 {
return *p
}
// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
func structPointer_Word32Val(p structPointer, f field) word32Val {
return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
}
// A word32Slice is a slice of 32-bit values.
type word32Slice []uint32
func (v *word32Slice) Append(x uint32) { *v = append(*v, x) }
func (v *word32Slice) Len() int { return len(*v) }
func (v *word32Slice) Index(i int) uint32 { return (*v)[i] }
// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct.
func structPointer_Word32Slice(p structPointer, f field) *word32Slice {
return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// word64 is like word32 but for 64-bit values.
type word64 **uint64
func word64_Set(p word64, o *Buffer, x uint64) {
if len(o.uint64s) == 0 {
o.uint64s = make([]uint64, uint64PoolSize)
}
o.uint64s[0] = x
*p = &o.uint64s[0]
o.uint64s = o.uint64s[1:]
}
func word64_IsNil(p word64) bool {
return *p == nil
}
func word64_Get(p word64) uint64 {
return **p
}
func structPointer_Word64(p structPointer, f field) word64 {
return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
}
// word64Val is like word32Val but for 64-bit values.
type word64Val *uint64
func word64Val_Set(p word64Val, o *Buffer, x uint64) {
*p = x
}
func word64Val_Get(p word64Val) uint64 {
return *p
}
func structPointer_Word64Val(p structPointer, f field) word64Val {
return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
}
// word64Slice is like word32Slice but for 64-bit values.
type word64Slice []uint64
func (v *word64Slice) Append(x uint64) { *v = append(*v, x) }
func (v *word64Slice) Len() int { return len(*v) }
func (v *word64Slice) Index(i int) uint64 { return (*v)[i] }
func structPointer_Word64Slice(p structPointer, f field) *word64Slice {
return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}

846
vendor/github.com/golang/protobuf/proto/properties.go generated vendored Normal file
View File

@ -0,0 +1,846 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
/*
* Routines for encoding data into the wire format for protocol buffers.
*/
import (
"fmt"
"log"
"os"
"reflect"
"sort"
"strconv"
"strings"
"sync"
)
const debug bool = false
// Constants that identify the encoding of a value on the wire.
const (
WireVarint = 0
WireFixed64 = 1
WireBytes = 2
WireStartGroup = 3
WireEndGroup = 4
WireFixed32 = 5
)
const startSize = 10 // initial slice/string sizes
// Encoders are defined in encode.go
// An encoder outputs the full representation of a field, including its
// tag and encoder type.
type encoder func(p *Buffer, prop *Properties, base structPointer) error
// A valueEncoder encodes a single integer in a particular encoding.
type valueEncoder func(o *Buffer, x uint64) error
// Sizers are defined in encode.go
// A sizer returns the encoded size of a field, including its tag and encoder
// type.
type sizer func(prop *Properties, base structPointer) int
// A valueSizer returns the encoded size of a single integer in a particular
// encoding.
type valueSizer func(x uint64) int
// Decoders are defined in decode.go
// A decoder creates a value from its wire representation.
// Unrecognized subelements are saved in unrec.
type decoder func(p *Buffer, prop *Properties, base structPointer) error
// A valueDecoder decodes a single integer in a particular encoding.
type valueDecoder func(o *Buffer) (x uint64, err error)
// A oneofMarshaler does the marshaling for all oneof fields in a message.
type oneofMarshaler func(Message, *Buffer) error
// A oneofUnmarshaler does the unmarshaling for a oneof field in a message.
type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error)
// A oneofSizer does the sizing for all oneof fields in a message.
type oneofSizer func(Message) int
// tagMap is an optimization over map[int]int for typical protocol buffer
// use-cases. Encoded protocol buffers are often in tag order with small tag
// numbers.
type tagMap struct {
fastTags []int
slowTags map[int]int
}
// tagMapFastLimit is the upper bound on the tag number that will be stored in
// the tagMap slice rather than its map.
const tagMapFastLimit = 1024
func (p *tagMap) get(t int) (int, bool) {
if t > 0 && t < tagMapFastLimit {
if t >= len(p.fastTags) {
return 0, false
}
fi := p.fastTags[t]
return fi, fi >= 0
}
fi, ok := p.slowTags[t]
return fi, ok
}
func (p *tagMap) put(t int, fi int) {
if t > 0 && t < tagMapFastLimit {
for len(p.fastTags) < t+1 {
p.fastTags = append(p.fastTags, -1)
}
p.fastTags[t] = fi
return
}
if p.slowTags == nil {
p.slowTags = make(map[int]int)
}
p.slowTags[t] = fi
}
// StructProperties represents properties for all the fields of a struct.
// decoderTags and decoderOrigNames should only be used by the decoder.
type StructProperties struct {
Prop []*Properties // properties for each field
reqCount int // required count
decoderTags tagMap // map from proto tag to struct field number
decoderOrigNames map[string]int // map from original name to struct field number
order []int // list of struct field numbers in tag order
unrecField field // field id of the XXX_unrecognized []byte field
extendable bool // is this an extendable proto
oneofMarshaler oneofMarshaler
oneofUnmarshaler oneofUnmarshaler
oneofSizer oneofSizer
stype reflect.Type
// OneofTypes contains information about the oneof fields in this message.
// It is keyed by the original name of a field.
OneofTypes map[string]*OneofProperties
}
// OneofProperties represents information about a specific field in a oneof.
type OneofProperties struct {
Type reflect.Type // pointer to generated struct type for this oneof field
Field int // struct field number of the containing oneof in the message
Prop *Properties
}
// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
// See encode.go, (*Buffer).enc_struct.
func (sp *StructProperties) Len() int { return len(sp.order) }
func (sp *StructProperties) Less(i, j int) bool {
return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
}
func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
// Properties represents the protocol-specific behavior of a single struct field.
type Properties struct {
Name string // name of the field, for error messages
OrigName string // original name before protocol compiler (always set)
JSONName string // name to use for JSON; determined by protoc
Wire string
WireType int
Tag int
Required bool
Optional bool
Repeated bool
Packed bool // relevant for repeated primitives only
Enum string // set for enum types only
proto3 bool // whether this is known to be a proto3 field; set for []byte only
oneof bool // whether this is a oneof field
Default string // default value
HasDefault bool // whether an explicit default was provided
def_uint64 uint64
enc encoder
valEnc valueEncoder // set for bool and numeric types only
field field
tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType)
tagbuf [8]byte
stype reflect.Type // set for struct types only
sprop *StructProperties // set for struct types only
isMarshaler bool
isUnmarshaler bool
mtype reflect.Type // set for map types only
mkeyprop *Properties // set for map types only
mvalprop *Properties // set for map types only
size sizer
valSize valueSizer // set for bool and numeric types only
dec decoder
valDec valueDecoder // set for bool and numeric types only
// If this is a packable field, this will be the decoder for the packed version of the field.
packedDec decoder
}
// String formats the properties in the protobuf struct field tag style.
func (p *Properties) String() string {
s := p.Wire
s = ","
s += strconv.Itoa(p.Tag)
if p.Required {
s += ",req"
}
if p.Optional {
s += ",opt"
}
if p.Repeated {
s += ",rep"
}
if p.Packed {
s += ",packed"
}
s += ",name=" + p.OrigName
if p.JSONName != p.OrigName {
s += ",json=" + p.JSONName
}
if p.proto3 {
s += ",proto3"
}
if p.oneof {
s += ",oneof"
}
if len(p.Enum) > 0 {
s += ",enum=" + p.Enum
}
if p.HasDefault {
s += ",def=" + p.Default
}
return s
}
// Parse populates p by parsing a string in the protobuf struct field tag style.
func (p *Properties) Parse(s string) {
// "bytes,49,opt,name=foo,def=hello!"
fields := strings.Split(s, ",") // breaks def=, but handled below.
if len(fields) < 2 {
fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s)
return
}
p.Wire = fields[0]
switch p.Wire {
case "varint":
p.WireType = WireVarint
p.valEnc = (*Buffer).EncodeVarint
p.valDec = (*Buffer).DecodeVarint
p.valSize = sizeVarint
case "fixed32":
p.WireType = WireFixed32
p.valEnc = (*Buffer).EncodeFixed32
p.valDec = (*Buffer).DecodeFixed32
p.valSize = sizeFixed32
case "fixed64":
p.WireType = WireFixed64
p.valEnc = (*Buffer).EncodeFixed64
p.valDec = (*Buffer).DecodeFixed64
p.valSize = sizeFixed64
case "zigzag32":
p.WireType = WireVarint
p.valEnc = (*Buffer).EncodeZigzag32
p.valDec = (*Buffer).DecodeZigzag32
p.valSize = sizeZigzag32
case "zigzag64":
p.WireType = WireVarint
p.valEnc = (*Buffer).EncodeZigzag64
p.valDec = (*Buffer).DecodeZigzag64
p.valSize = sizeZigzag64
case "bytes", "group":
p.WireType = WireBytes
// no numeric converter for non-numeric types
default:
fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s)
return
}
var err error
p.Tag, err = strconv.Atoi(fields[1])
if err != nil {
return
}
for i := 2; i < len(fields); i++ {
f := fields[i]
switch {
case f == "req":
p.Required = true
case f == "opt":
p.Optional = true
case f == "rep":
p.Repeated = true
case f == "packed":
p.Packed = true
case strings.HasPrefix(f, "name="):
p.OrigName = f[5:]
case strings.HasPrefix(f, "json="):
p.JSONName = f[5:]
case strings.HasPrefix(f, "enum="):
p.Enum = f[5:]
case f == "proto3":
p.proto3 = true
case f == "oneof":
p.oneof = true
case strings.HasPrefix(f, "def="):
p.HasDefault = true
p.Default = f[4:] // rest of string
if i+1 < len(fields) {
// Commas aren't escaped, and def is always last.
p.Default += "," + strings.Join(fields[i+1:], ",")
break
}
}
}
}
func logNoSliceEnc(t1, t2 reflect.Type) {
fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2)
}
var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
// Initialize the fields for encoding and decoding.
func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
p.enc = nil
p.dec = nil
p.size = nil
switch t1 := typ; t1.Kind() {
default:
fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1)
// proto3 scalar types
case reflect.Bool:
p.enc = (*Buffer).enc_proto3_bool
p.dec = (*Buffer).dec_proto3_bool
p.size = size_proto3_bool
case reflect.Int32:
p.enc = (*Buffer).enc_proto3_int32
p.dec = (*Buffer).dec_proto3_int32
p.size = size_proto3_int32
case reflect.Uint32:
p.enc = (*Buffer).enc_proto3_uint32
p.dec = (*Buffer).dec_proto3_int32 // can reuse
p.size = size_proto3_uint32
case reflect.Int64, reflect.Uint64:
p.enc = (*Buffer).enc_proto3_int64
p.dec = (*Buffer).dec_proto3_int64
p.size = size_proto3_int64
case reflect.Float32:
p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits
p.dec = (*Buffer).dec_proto3_int32
p.size = size_proto3_uint32
case reflect.Float64:
p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits
p.dec = (*Buffer).dec_proto3_int64
p.size = size_proto3_int64
case reflect.String:
p.enc = (*Buffer).enc_proto3_string
p.dec = (*Buffer).dec_proto3_string
p.size = size_proto3_string
case reflect.Ptr:
switch t2 := t1.Elem(); t2.Kind() {
default:
fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2)
break
case reflect.Bool:
p.enc = (*Buffer).enc_bool
p.dec = (*Buffer).dec_bool
p.size = size_bool
case reflect.Int32:
p.enc = (*Buffer).enc_int32
p.dec = (*Buffer).dec_int32
p.size = size_int32
case reflect.Uint32:
p.enc = (*Buffer).enc_uint32
p.dec = (*Buffer).dec_int32 // can reuse
p.size = size_uint32
case reflect.Int64, reflect.Uint64:
p.enc = (*Buffer).enc_int64
p.dec = (*Buffer).dec_int64
p.size = size_int64
case reflect.Float32:
p.enc = (*Buffer).enc_uint32 // can just treat them as bits
p.dec = (*Buffer).dec_int32
p.size = size_uint32
case reflect.Float64:
p.enc = (*Buffer).enc_int64 // can just treat them as bits
p.dec = (*Buffer).dec_int64
p.size = size_int64
case reflect.String:
p.enc = (*Buffer).enc_string
p.dec = (*Buffer).dec_string
p.size = size_string
case reflect.Struct:
p.stype = t1.Elem()
p.isMarshaler = isMarshaler(t1)
p.isUnmarshaler = isUnmarshaler(t1)
if p.Wire == "bytes" {
p.enc = (*Buffer).enc_struct_message
p.dec = (*Buffer).dec_struct_message
p.size = size_struct_message
} else {
p.enc = (*Buffer).enc_struct_group
p.dec = (*Buffer).dec_struct_group
p.size = size_struct_group
}
}
case reflect.Slice:
switch t2 := t1.Elem(); t2.Kind() {
default:
logNoSliceEnc(t1, t2)
break
case reflect.Bool:
if p.Packed {
p.enc = (*Buffer).enc_slice_packed_bool
p.size = size_slice_packed_bool
} else {
p.enc = (*Buffer).enc_slice_bool
p.size = size_slice_bool
}
p.dec = (*Buffer).dec_slice_bool
p.packedDec = (*Buffer).dec_slice_packed_bool
case reflect.Int32:
if p.Packed {
p.enc = (*Buffer).enc_slice_packed_int32
p.size = size_slice_packed_int32
} else {
p.enc = (*Buffer).enc_slice_int32
p.size = size_slice_int32
}
p.dec = (*Buffer).dec_slice_int32
p.packedDec = (*Buffer).dec_slice_packed_int32
case reflect.Uint32:
if p.Packed {
p.enc = (*Buffer).enc_slice_packed_uint32
p.size = size_slice_packed_uint32
} else {
p.enc = (*Buffer).enc_slice_uint32
p.size = size_slice_uint32
}
p.dec = (*Buffer).dec_slice_int32
p.packedDec = (*Buffer).dec_slice_packed_int32
case reflect.Int64, reflect.Uint64:
if p.Packed {
p.enc = (*Buffer).enc_slice_packed_int64
p.size = size_slice_packed_int64
} else {
p.enc = (*Buffer).enc_slice_int64
p.size = size_slice_int64
}
p.dec = (*Buffer).dec_slice_int64
p.packedDec = (*Buffer).dec_slice_packed_int64
case reflect.Uint8:
p.enc = (*Buffer).enc_slice_byte
p.dec = (*Buffer).dec_slice_byte
p.size = size_slice_byte
// This is a []byte, which is either a bytes field,
// or the value of a map field. In the latter case,
// we always encode an empty []byte, so we should not
// use the proto3 enc/size funcs.
// f == nil iff this is the key/value of a map field.
if p.proto3 && f != nil {
p.enc = (*Buffer).enc_proto3_slice_byte
p.size = size_proto3_slice_byte
}
case reflect.Float32, reflect.Float64:
switch t2.Bits() {
case 32:
// can just treat them as bits
if p.Packed {
p.enc = (*Buffer).enc_slice_packed_uint32
p.size = size_slice_packed_uint32
} else {
p.enc = (*Buffer).enc_slice_uint32
p.size = size_slice_uint32
}
p.dec = (*Buffer).dec_slice_int32
p.packedDec = (*Buffer).dec_slice_packed_int32
case 64:
// can just treat them as bits
if p.Packed {
p.enc = (*Buffer).enc_slice_packed_int64
p.size = size_slice_packed_int64
} else {
p.enc = (*Buffer).enc_slice_int64
p.size = size_slice_int64
}
p.dec = (*Buffer).dec_slice_int64
p.packedDec = (*Buffer).dec_slice_packed_int64
default:
logNoSliceEnc(t1, t2)
break
}
case reflect.String:
p.enc = (*Buffer).enc_slice_string
p.dec = (*Buffer).dec_slice_string
p.size = size_slice_string
case reflect.Ptr:
switch t3 := t2.Elem(); t3.Kind() {
default:
fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3)
break
case reflect.Struct:
p.stype = t2.Elem()
p.isMarshaler = isMarshaler(t2)
p.isUnmarshaler = isUnmarshaler(t2)
if p.Wire == "bytes" {
p.enc = (*Buffer).enc_slice_struct_message
p.dec = (*Buffer).dec_slice_struct_message
p.size = size_slice_struct_message
} else {
p.enc = (*Buffer).enc_slice_struct_group
p.dec = (*Buffer).dec_slice_struct_group
p.size = size_slice_struct_group
}
}
case reflect.Slice:
switch t2.Elem().Kind() {
default:
fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem())
break
case reflect.Uint8:
p.enc = (*Buffer).enc_slice_slice_byte
p.dec = (*Buffer).dec_slice_slice_byte
p.size = size_slice_slice_byte
}
}
case reflect.Map:
p.enc = (*Buffer).enc_new_map
p.dec = (*Buffer).dec_new_map
p.size = size_new_map
p.mtype = t1
p.mkeyprop = &Properties{}
p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
p.mvalprop = &Properties{}
vtype := p.mtype.Elem()
if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
// The value type is not a message (*T) or bytes ([]byte),
// so we need encoders for the pointer to this type.
vtype = reflect.PtrTo(vtype)
}
p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
}
// precalculate tag code
wire := p.WireType
if p.Packed {
wire = WireBytes
}
x := uint32(p.Tag)<<3 | uint32(wire)
i := 0
for i = 0; x > 127; i++ {
p.tagbuf[i] = 0x80 | uint8(x&0x7F)
x >>= 7
}
p.tagbuf[i] = uint8(x)
p.tagcode = p.tagbuf[0 : i+1]
if p.stype != nil {
if lockGetProp {
p.sprop = GetProperties(p.stype)
} else {
p.sprop = getPropertiesLocked(p.stype)
}
}
}
var (
marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
)
// isMarshaler reports whether type t implements Marshaler.
func isMarshaler(t reflect.Type) bool {
// We're checking for (likely) pointer-receiver methods
// so if t is not a pointer, something is very wrong.
// The calls above only invoke isMarshaler on pointer types.
if t.Kind() != reflect.Ptr {
panic("proto: misuse of isMarshaler")
}
return t.Implements(marshalerType)
}
// isUnmarshaler reports whether type t implements Unmarshaler.
func isUnmarshaler(t reflect.Type) bool {
// We're checking for (likely) pointer-receiver methods
// so if t is not a pointer, something is very wrong.
// The calls above only invoke isUnmarshaler on pointer types.
if t.Kind() != reflect.Ptr {
panic("proto: misuse of isUnmarshaler")
}
return t.Implements(unmarshalerType)
}
// Init populates the properties from a protocol buffer struct tag.
func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
p.init(typ, name, tag, f, true)
}
func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
// "bytes,49,opt,def=hello!"
p.Name = name
p.OrigName = name
if f != nil {
p.field = toField(f)
}
if tag == "" {
return
}
p.Parse(tag)
p.setEncAndDec(typ, f, lockGetProp)
}
var (
propertiesMu sync.RWMutex
propertiesMap = make(map[reflect.Type]*StructProperties)
)
// GetProperties returns the list of properties for the type represented by t.
// t must represent a generated struct type of a protocol message.
func GetProperties(t reflect.Type) *StructProperties {
if t.Kind() != reflect.Struct {
panic("proto: type must have kind struct")
}
// Most calls to GetProperties in a long-running program will be
// retrieving details for types we have seen before.
propertiesMu.RLock()
sprop, ok := propertiesMap[t]
propertiesMu.RUnlock()
if ok {
if collectStats {
stats.Chit++
}
return sprop
}
propertiesMu.Lock()
sprop = getPropertiesLocked(t)
propertiesMu.Unlock()
return sprop
}
// getPropertiesLocked requires that propertiesMu is held.
func getPropertiesLocked(t reflect.Type) *StructProperties {
if prop, ok := propertiesMap[t]; ok {
if collectStats {
stats.Chit++
}
return prop
}
if collectStats {
stats.Cmiss++
}
prop := new(StructProperties)
// in case of recursive protos, fill this in now.
propertiesMap[t] = prop
// build properties
prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType)
prop.unrecField = invalidField
prop.Prop = make([]*Properties, t.NumField())
prop.order = make([]int, t.NumField())
for i := 0; i < t.NumField(); i++ {
f := t.Field(i)
p := new(Properties)
name := f.Name
p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
if f.Name == "XXX_extensions" { // special case
p.enc = (*Buffer).enc_map
p.dec = nil // not needed
p.size = size_map
}
if f.Name == "XXX_unrecognized" { // special case
prop.unrecField = toField(&f)
}
oneof := f.Tag.Get("protobuf_oneof") != "" // special case
prop.Prop[i] = p
prop.order[i] = i
if debug {
print(i, " ", f.Name, " ", t.String(), " ")
if p.Tag > 0 {
print(p.String())
}
print("\n")
}
if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && !oneof {
fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]")
}
}
// Re-order prop.order.
sort.Sort(prop)
type oneofMessage interface {
XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
}
if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
var oots []interface{}
prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs()
prop.stype = t
// Interpret oneof metadata.
prop.OneofTypes = make(map[string]*OneofProperties)
for _, oot := range oots {
oop := &OneofProperties{
Type: reflect.ValueOf(oot).Type(), // *T
Prop: new(Properties),
}
sft := oop.Type.Elem().Field(0)
oop.Prop.Name = sft.Name
oop.Prop.Parse(sft.Tag.Get("protobuf"))
// There will be exactly one interface field that
// this new value is assignable to.
for i := 0; i < t.NumField(); i++ {
f := t.Field(i)
if f.Type.Kind() != reflect.Interface {
continue
}
if !oop.Type.AssignableTo(f.Type) {
continue
}
oop.Field = i
break
}
prop.OneofTypes[oop.Prop.OrigName] = oop
}
}
// build required counts
// build tags
reqCount := 0
prop.decoderOrigNames = make(map[string]int)
for i, p := range prop.Prop {
if strings.HasPrefix(p.Name, "XXX_") {
// Internal fields should not appear in tags/origNames maps.
// They are handled specially when encoding and decoding.
continue
}
if p.Required {
reqCount++
}
prop.decoderTags.put(p.Tag, i)
prop.decoderOrigNames[p.OrigName] = i
}
prop.reqCount = reqCount
return prop
}
// Return the Properties object for the x[0]'th field of the structure.
func propByIndex(t reflect.Type, x []int) *Properties {
if len(x) != 1 {
fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t)
return nil
}
prop := GetProperties(t)
return prop.Prop[x[0]]
}
// Get the address and type of a pointer to a struct from an interface.
func getbase(pb Message) (t reflect.Type, b structPointer, err error) {
if pb == nil {
err = ErrNil
return
}
// get the reflect type of the pointer to the struct.
t = reflect.TypeOf(pb)
// get the address of the struct.
value := reflect.ValueOf(pb)
b = toStructPointer(value)
return
}
// A global registry of enum types.
// The generated code will register the generated maps by calling RegisterEnum.
var enumValueMaps = make(map[string]map[string]int32)
// RegisterEnum is called from the generated code to install the enum descriptor
// maps into the global table to aid parsing text format protocol buffers.
func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
if _, ok := enumValueMaps[typeName]; ok {
panic("proto: duplicate enum registered: " + typeName)
}
enumValueMaps[typeName] = valueMap
}
// EnumValueMap returns the mapping from names to integers of the
// enum type enumType, or a nil if not found.
func EnumValueMap(enumType string) map[string]int32 {
return enumValueMaps[enumType]
}
// A registry of all linked message types.
// The string is a fully-qualified proto name ("pkg.Message").
var (
protoTypes = make(map[string]reflect.Type)
revProtoTypes = make(map[reflect.Type]string)
)
// RegisterType is called from generated code and maps from the fully qualified
// proto name to the type (pointer to struct) of the protocol buffer.
func RegisterType(x Message, name string) {
if _, ok := protoTypes[name]; ok {
// TODO: Some day, make this a panic.
log.Printf("proto: duplicate proto type registered: %s", name)
return
}
t := reflect.TypeOf(x)
protoTypes[name] = t
revProtoTypes[t] = name
}
// MessageName returns the fully-qualified proto name for the given message type.
func MessageName(x Message) string { return revProtoTypes[reflect.TypeOf(x)] }
// MessageType returns the message type (pointer to struct) for a named message.
func MessageType(name string) reflect.Type { return protoTypes[name] }

762
vendor/github.com/golang/protobuf/proto/text.go generated vendored Normal file
View File

@ -0,0 +1,762 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
// Functions for writing the text protocol buffer format.
import (
"bufio"
"bytes"
"encoding"
"errors"
"fmt"
"io"
"log"
"math"
"reflect"
"sort"
"strings"
)
var (
newline = []byte("\n")
spaces = []byte(" ")
gtNewline = []byte(">\n")
endBraceNewline = []byte("}\n")
backslashN = []byte{'\\', 'n'}
backslashR = []byte{'\\', 'r'}
backslashT = []byte{'\\', 't'}
backslashDQ = []byte{'\\', '"'}
backslashBS = []byte{'\\', '\\'}
posInf = []byte("inf")
negInf = []byte("-inf")
nan = []byte("nan")
)
type writer interface {
io.Writer
WriteByte(byte) error
}
// textWriter is an io.Writer that tracks its indentation level.
type textWriter struct {
ind int
complete bool // if the current position is a complete line
compact bool // whether to write out as a one-liner
w writer
}
func (w *textWriter) WriteString(s string) (n int, err error) {
if !strings.Contains(s, "\n") {
if !w.compact && w.complete {
w.writeIndent()
}
w.complete = false
return io.WriteString(w.w, s)
}
// WriteString is typically called without newlines, so this
// codepath and its copy are rare. We copy to avoid
// duplicating all of Write's logic here.
return w.Write([]byte(s))
}
func (w *textWriter) Write(p []byte) (n int, err error) {
newlines := bytes.Count(p, newline)
if newlines == 0 {
if !w.compact && w.complete {
w.writeIndent()
}
n, err = w.w.Write(p)
w.complete = false
return n, err
}
frags := bytes.SplitN(p, newline, newlines+1)
if w.compact {
for i, frag := range frags {
if i > 0 {
if err := w.w.WriteByte(' '); err != nil {
return n, err
}
n++
}
nn, err := w.w.Write(frag)
n += nn
if err != nil {
return n, err
}
}
return n, nil
}
for i, frag := range frags {
if w.complete {
w.writeIndent()
}
nn, err := w.w.Write(frag)
n += nn
if err != nil {
return n, err
}
if i+1 < len(frags) {
if err := w.w.WriteByte('\n'); err != nil {
return n, err
}
n++
}
}
w.complete = len(frags[len(frags)-1]) == 0
return n, nil
}
func (w *textWriter) WriteByte(c byte) error {
if w.compact && c == '\n' {
c = ' '
}
if !w.compact && w.complete {
w.writeIndent()
}
err := w.w.WriteByte(c)
w.complete = c == '\n'
return err
}
func (w *textWriter) indent() { w.ind++ }
func (w *textWriter) unindent() {
if w.ind == 0 {
log.Printf("proto: textWriter unindented too far")
return
}
w.ind--
}
func writeName(w *textWriter, props *Properties) error {
if _, err := w.WriteString(props.OrigName); err != nil {
return err
}
if props.Wire != "group" {
return w.WriteByte(':')
}
return nil
}
// raw is the interface satisfied by RawMessage.
type raw interface {
Bytes() []byte
}
func writeStruct(w *textWriter, sv reflect.Value) error {
st := sv.Type()
sprops := GetProperties(st)
for i := 0; i < sv.NumField(); i++ {
fv := sv.Field(i)
props := sprops.Prop[i]
name := st.Field(i).Name
if strings.HasPrefix(name, "XXX_") {
// There are two XXX_ fields:
// XXX_unrecognized []byte
// XXX_extensions map[int32]proto.Extension
// The first is handled here;
// the second is handled at the bottom of this function.
if name == "XXX_unrecognized" && !fv.IsNil() {
if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {
return err
}
}
continue
}
if fv.Kind() == reflect.Ptr && fv.IsNil() {
// Field not filled in. This could be an optional field or
// a required field that wasn't filled in. Either way, there
// isn't anything we can show for it.
continue
}
if fv.Kind() == reflect.Slice && fv.IsNil() {
// Repeated field that is empty, or a bytes field that is unused.
continue
}
if props.Repeated && fv.Kind() == reflect.Slice {
// Repeated field.
for j := 0; j < fv.Len(); j++ {
if err := writeName(w, props); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte(' '); err != nil {
return err
}
}
v := fv.Index(j)
if v.Kind() == reflect.Ptr && v.IsNil() {
// A nil message in a repeated field is not valid,
// but we can handle that more gracefully than panicking.
if _, err := w.Write([]byte("<nil>\n")); err != nil {
return err
}
continue
}
if err := writeAny(w, v, props); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
return err
}
}
continue
}
if fv.Kind() == reflect.Map {
// Map fields are rendered as a repeated struct with key/value fields.
keys := fv.MapKeys()
sort.Sort(mapKeys(keys))
for _, key := range keys {
val := fv.MapIndex(key)
if err := writeName(w, props); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte(' '); err != nil {
return err
}
}
// open struct
if err := w.WriteByte('<'); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte('\n'); err != nil {
return err
}
}
w.indent()
// key
if _, err := w.WriteString("key:"); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte(' '); err != nil {
return err
}
}
if err := writeAny(w, key, props.mkeyprop); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
return err
}
// nil values aren't legal, but we can avoid panicking because of them.
if val.Kind() != reflect.Ptr || !val.IsNil() {
// value
if _, err := w.WriteString("value:"); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte(' '); err != nil {
return err
}
}
if err := writeAny(w, val, props.mvalprop); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
return err
}
}
// close struct
w.unindent()
if err := w.WriteByte('>'); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
return err
}
}
continue
}
if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {
// empty bytes field
continue
}
if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
// proto3 non-repeated scalar field; skip if zero value
if isProto3Zero(fv) {
continue
}
}
if fv.Kind() == reflect.Interface {
// Check if it is a oneof.
if st.Field(i).Tag.Get("protobuf_oneof") != "" {
// fv is nil, or holds a pointer to generated struct.
// That generated struct has exactly one field,
// which has a protobuf struct tag.
if fv.IsNil() {
continue
}
inner := fv.Elem().Elem() // interface -> *T -> T
tag := inner.Type().Field(0).Tag.Get("protobuf")
props = new(Properties) // Overwrite the outer props var, but not its pointee.
props.Parse(tag)
// Write the value in the oneof, not the oneof itself.
fv = inner.Field(0)
// Special case to cope with malformed messages gracefully:
// If the value in the oneof is a nil pointer, don't panic
// in writeAny.
if fv.Kind() == reflect.Ptr && fv.IsNil() {
// Use errors.New so writeAny won't render quotes.
msg := errors.New("/* nil */")
fv = reflect.ValueOf(&msg).Elem()
}
}
}
if err := writeName(w, props); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte(' '); err != nil {
return err
}
}
if b, ok := fv.Interface().(raw); ok {
if err := writeRaw(w, b.Bytes()); err != nil {
return err
}
continue
}
// Enums have a String method, so writeAny will work fine.
if err := writeAny(w, fv, props); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
return err
}
}
// Extensions (the XXX_extensions field).
pv := sv.Addr()
if pv.Type().Implements(extendableProtoType) {
if err := writeExtensions(w, pv); err != nil {
return err
}
}
return nil
}
// writeRaw writes an uninterpreted raw message.
func writeRaw(w *textWriter, b []byte) error {
if err := w.WriteByte('<'); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte('\n'); err != nil {
return err
}
}
w.indent()
if err := writeUnknownStruct(w, b); err != nil {
return err
}
w.unindent()
if err := w.WriteByte('>'); err != nil {
return err
}
return nil
}
// writeAny writes an arbitrary field.
func writeAny(w *textWriter, v reflect.Value, props *Properties) error {
v = reflect.Indirect(v)
// Floats have special cases.
if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
x := v.Float()
var b []byte
switch {
case math.IsInf(x, 1):
b = posInf
case math.IsInf(x, -1):
b = negInf
case math.IsNaN(x):
b = nan
}
if b != nil {
_, err := w.Write(b)
return err
}
// Other values are handled below.
}
// We don't attempt to serialise every possible value type; only those
// that can occur in protocol buffers.
switch v.Kind() {
case reflect.Slice:
// Should only be a []byte; repeated fields are handled in writeStruct.
if err := writeString(w, string(v.Interface().([]byte))); err != nil {
return err
}
case reflect.String:
if err := writeString(w, v.String()); err != nil {
return err
}
case reflect.Struct:
// Required/optional group/message.
var bra, ket byte = '<', '>'
if props != nil && props.Wire == "group" {
bra, ket = '{', '}'
}
if err := w.WriteByte(bra); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte('\n'); err != nil {
return err
}
}
w.indent()
if tm, ok := v.Interface().(encoding.TextMarshaler); ok {
text, err := tm.MarshalText()
if err != nil {
return err
}
if _, err = w.Write(text); err != nil {
return err
}
} else if err := writeStruct(w, v); err != nil {
return err
}
w.unindent()
if err := w.WriteByte(ket); err != nil {
return err
}
default:
_, err := fmt.Fprint(w, v.Interface())
return err
}
return nil
}
// equivalent to C's isprint.
func isprint(c byte) bool {
return c >= 0x20 && c < 0x7f
}
// writeString writes a string in the protocol buffer text format.
// It is similar to strconv.Quote except we don't use Go escape sequences,
// we treat the string as a byte sequence, and we use octal escapes.
// These differences are to maintain interoperability with the other
// languages' implementations of the text format.
func writeString(w *textWriter, s string) error {
// use WriteByte here to get any needed indent
if err := w.WriteByte('"'); err != nil {
return err
}
// Loop over the bytes, not the runes.
for i := 0; i < len(s); i++ {
var err error
// Divergence from C++: we don't escape apostrophes.
// There's no need to escape them, and the C++ parser
// copes with a naked apostrophe.
switch c := s[i]; c {
case '\n':
_, err = w.w.Write(backslashN)
case '\r':
_, err = w.w.Write(backslashR)
case '\t':
_, err = w.w.Write(backslashT)
case '"':
_, err = w.w.Write(backslashDQ)
case '\\':
_, err = w.w.Write(backslashBS)
default:
if isprint(c) {
err = w.w.WriteByte(c)
} else {
_, err = fmt.Fprintf(w.w, "\\%03o", c)
}
}
if err != nil {
return err
}
}
return w.WriteByte('"')
}
func writeUnknownStruct(w *textWriter, data []byte) (err error) {
if !w.compact {
if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
return err
}
}
b := NewBuffer(data)
for b.index < len(b.buf) {
x, err := b.DecodeVarint()
if err != nil {
_, err := fmt.Fprintf(w, "/* %v */\n", err)
return err
}
wire, tag := x&7, x>>3
if wire == WireEndGroup {
w.unindent()
if _, err := w.Write(endBraceNewline); err != nil {
return err
}
continue
}
if _, err := fmt.Fprint(w, tag); err != nil {
return err
}
if wire != WireStartGroup {
if err := w.WriteByte(':'); err != nil {
return err
}
}
if !w.compact || wire == WireStartGroup {
if err := w.WriteByte(' '); err != nil {
return err
}
}
switch wire {
case WireBytes:
buf, e := b.DecodeRawBytes(false)
if e == nil {
_, err = fmt.Fprintf(w, "%q", buf)
} else {
_, err = fmt.Fprintf(w, "/* %v */", e)
}
case WireFixed32:
x, err = b.DecodeFixed32()
err = writeUnknownInt(w, x, err)
case WireFixed64:
x, err = b.DecodeFixed64()
err = writeUnknownInt(w, x, err)
case WireStartGroup:
err = w.WriteByte('{')
w.indent()
case WireVarint:
x, err = b.DecodeVarint()
err = writeUnknownInt(w, x, err)
default:
_, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire)
}
if err != nil {
return err
}
if err = w.WriteByte('\n'); err != nil {
return err
}
}
return nil
}
func writeUnknownInt(w *textWriter, x uint64, err error) error {
if err == nil {
_, err = fmt.Fprint(w, x)
} else {
_, err = fmt.Fprintf(w, "/* %v */", err)
}
return err
}
type int32Slice []int32
func (s int32Slice) Len() int { return len(s) }
func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
// writeExtensions writes all the extensions in pv.
// pv is assumed to be a pointer to a protocol message struct that is extendable.
func writeExtensions(w *textWriter, pv reflect.Value) error {
emap := extensionMaps[pv.Type().Elem()]
ep := pv.Interface().(extendableProto)
// Order the extensions by ID.
// This isn't strictly necessary, but it will give us
// canonical output, which will also make testing easier.
m := ep.ExtensionMap()
ids := make([]int32, 0, len(m))
for id := range m {
ids = append(ids, id)
}
sort.Sort(int32Slice(ids))
for _, extNum := range ids {
ext := m[extNum]
var desc *ExtensionDesc
if emap != nil {
desc = emap[extNum]
}
if desc == nil {
// Unknown extension.
if err := writeUnknownStruct(w, ext.enc); err != nil {
return err
}
continue
}
pb, err := GetExtension(ep, desc)
if err != nil {
return fmt.Errorf("failed getting extension: %v", err)
}
// Repeated extensions will appear as a slice.
if !desc.repeated() {
if err := writeExtension(w, desc.Name, pb); err != nil {
return err
}
} else {
v := reflect.ValueOf(pb)
for i := 0; i < v.Len(); i++ {
if err := writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
return err
}
}
}
}
return nil
}
func writeExtension(w *textWriter, name string, pb interface{}) error {
if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte(' '); err != nil {
return err
}
}
if err := writeAny(w, reflect.ValueOf(pb), nil); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
return err
}
return nil
}
func (w *textWriter) writeIndent() {
if !w.complete {
return
}
remain := w.ind * 2
for remain > 0 {
n := remain
if n > len(spaces) {
n = len(spaces)
}
w.w.Write(spaces[:n])
remain -= n
}
w.complete = false
}
// TextMarshaler is a configurable text format marshaler.
type TextMarshaler struct {
Compact bool // use compact text format (one line).
}
// Marshal writes a given protocol buffer in text format.
// The only errors returned are from w.
func (m *TextMarshaler) Marshal(w io.Writer, pb Message) error {
val := reflect.ValueOf(pb)
if pb == nil || val.IsNil() {
w.Write([]byte("<nil>"))
return nil
}
var bw *bufio.Writer
ww, ok := w.(writer)
if !ok {
bw = bufio.NewWriter(w)
ww = bw
}
aw := &textWriter{
w: ww,
complete: true,
compact: m.Compact,
}
if tm, ok := pb.(encoding.TextMarshaler); ok {
text, err := tm.MarshalText()
if err != nil {
return err
}
if _, err = aw.Write(text); err != nil {
return err
}
if bw != nil {
return bw.Flush()
}
return nil
}
// Dereference the received pointer so we don't have outer < and >.
v := reflect.Indirect(val)
if err := writeStruct(aw, v); err != nil {
return err
}
if bw != nil {
return bw.Flush()
}
return nil
}
// Text is the same as Marshal, but returns the string directly.
func (m *TextMarshaler) Text(pb Message) string {
var buf bytes.Buffer
m.Marshal(&buf, pb)
return buf.String()
}
var (
defaultTextMarshaler = TextMarshaler{}
compactTextMarshaler = TextMarshaler{Compact: true}
)
// TODO: consider removing some of the Marshal functions below.
// MarshalText writes a given protocol buffer in text format.
// The only errors returned are from w.
func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) }
// MarshalTextString is the same as MarshalText, but returns the string directly.
func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) }
// CompactText writes a given protocol buffer in compact text format (one line).
func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) }
// CompactTextString is the same as CompactText, but returns the string directly.
func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) }

806
vendor/github.com/golang/protobuf/proto/text_parser.go generated vendored Normal file
View File

@ -0,0 +1,806 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
// Functions for parsing the Text protocol buffer format.
// TODO: message sets.
import (
"encoding"
"errors"
"fmt"
"reflect"
"strconv"
"strings"
"unicode/utf8"
)
type ParseError struct {
Message string
Line int // 1-based line number
Offset int // 0-based byte offset from start of input
}
func (p *ParseError) Error() string {
if p.Line == 1 {
// show offset only for first line
return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message)
}
return fmt.Sprintf("line %d: %v", p.Line, p.Message)
}
type token struct {
value string
err *ParseError
line int // line number
offset int // byte number from start of input, not start of line
unquoted string // the unquoted version of value, if it was a quoted string
}
func (t *token) String() string {
if t.err == nil {
return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset)
}
return fmt.Sprintf("parse error: %v", t.err)
}
type textParser struct {
s string // remaining input
done bool // whether the parsing is finished (success or error)
backed bool // whether back() was called
offset, line int
cur token
}
func newTextParser(s string) *textParser {
p := new(textParser)
p.s = s
p.line = 1
p.cur.line = 1
return p
}
func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
p.cur.err = pe
p.done = true
return pe
}
// Numbers and identifiers are matched by [-+._A-Za-z0-9]
func isIdentOrNumberChar(c byte) bool {
switch {
case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
return true
case '0' <= c && c <= '9':
return true
}
switch c {
case '-', '+', '.', '_':
return true
}
return false
}
func isWhitespace(c byte) bool {
switch c {
case ' ', '\t', '\n', '\r':
return true
}
return false
}
func isQuote(c byte) bool {
switch c {
case '"', '\'':
return true
}
return false
}
func (p *textParser) skipWhitespace() {
i := 0
for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
if p.s[i] == '#' {
// comment; skip to end of line or input
for i < len(p.s) && p.s[i] != '\n' {
i++
}
if i == len(p.s) {
break
}
}
if p.s[i] == '\n' {
p.line++
}
i++
}
p.offset += i
p.s = p.s[i:len(p.s)]
if len(p.s) == 0 {
p.done = true
}
}
func (p *textParser) advance() {
// Skip whitespace
p.skipWhitespace()
if p.done {
return
}
// Start of non-whitespace
p.cur.err = nil
p.cur.offset, p.cur.line = p.offset, p.line
p.cur.unquoted = ""
switch p.s[0] {
case '<', '>', '{', '}', ':', '[', ']', ';', ',':
// Single symbol
p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
case '"', '\'':
// Quoted string
i := 1
for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
if p.s[i] == '\\' && i+1 < len(p.s) {
// skip escaped char
i++
}
i++
}
if i >= len(p.s) || p.s[i] != p.s[0] {
p.errorf("unmatched quote")
return
}
unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
if err != nil {
p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
return
}
p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
p.cur.unquoted = unq
default:
i := 0
for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
i++
}
if i == 0 {
p.errorf("unexpected byte %#x", p.s[0])
return
}
p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
}
p.offset += len(p.cur.value)
}
var (
errBadUTF8 = errors.New("proto: bad UTF-8")
errBadHex = errors.New("proto: bad hexadecimal")
)
func unquoteC(s string, quote rune) (string, error) {
// This is based on C++'s tokenizer.cc.
// Despite its name, this is *not* parsing C syntax.
// For instance, "\0" is an invalid quoted string.
// Avoid allocation in trivial cases.
simple := true
for _, r := range s {
if r == '\\' || r == quote {
simple = false
break
}
}
if simple {
return s, nil
}
buf := make([]byte, 0, 3*len(s)/2)
for len(s) > 0 {
r, n := utf8.DecodeRuneInString(s)
if r == utf8.RuneError && n == 1 {
return "", errBadUTF8
}
s = s[n:]
if r != '\\' {
if r < utf8.RuneSelf {
buf = append(buf, byte(r))
} else {
buf = append(buf, string(r)...)
}
continue
}
ch, tail, err := unescape(s)
if err != nil {
return "", err
}
buf = append(buf, ch...)
s = tail
}
return string(buf), nil
}
func unescape(s string) (ch string, tail string, err error) {
r, n := utf8.DecodeRuneInString(s)
if r == utf8.RuneError && n == 1 {
return "", "", errBadUTF8
}
s = s[n:]
switch r {
case 'a':
return "\a", s, nil
case 'b':
return "\b", s, nil
case 'f':
return "\f", s, nil
case 'n':
return "\n", s, nil
case 'r':
return "\r", s, nil
case 't':
return "\t", s, nil
case 'v':
return "\v", s, nil
case '?':
return "?", s, nil // trigraph workaround
case '\'', '"', '\\':
return string(r), s, nil
case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X':
if len(s) < 2 {
return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
}
base := 8
ss := s[:2]
s = s[2:]
if r == 'x' || r == 'X' {
base = 16
} else {
ss = string(r) + ss
}
i, err := strconv.ParseUint(ss, base, 8)
if err != nil {
return "", "", err
}
return string([]byte{byte(i)}), s, nil
case 'u', 'U':
n := 4
if r == 'U' {
n = 8
}
if len(s) < n {
return "", "", fmt.Errorf(`\%c requires %d digits`, r, n)
}
bs := make([]byte, n/2)
for i := 0; i < n; i += 2 {
a, ok1 := unhex(s[i])
b, ok2 := unhex(s[i+1])
if !ok1 || !ok2 {
return "", "", errBadHex
}
bs[i/2] = a<<4 | b
}
s = s[n:]
return string(bs), s, nil
}
return "", "", fmt.Errorf(`unknown escape \%c`, r)
}
// Adapted from src/pkg/strconv/quote.go.
func unhex(b byte) (v byte, ok bool) {
switch {
case '0' <= b && b <= '9':
return b - '0', true
case 'a' <= b && b <= 'f':
return b - 'a' + 10, true
case 'A' <= b && b <= 'F':
return b - 'A' + 10, true
}
return 0, false
}
// Back off the parser by one token. Can only be done between calls to next().
// It makes the next advance() a no-op.
func (p *textParser) back() { p.backed = true }
// Advances the parser and returns the new current token.
func (p *textParser) next() *token {
if p.backed || p.done {
p.backed = false
return &p.cur
}
p.advance()
if p.done {
p.cur.value = ""
} else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
// Look for multiple quoted strings separated by whitespace,
// and concatenate them.
cat := p.cur
for {
p.skipWhitespace()
if p.done || !isQuote(p.s[0]) {
break
}
p.advance()
if p.cur.err != nil {
return &p.cur
}
cat.value += " " + p.cur.value
cat.unquoted += p.cur.unquoted
}
p.done = false // parser may have seen EOF, but we want to return cat
p.cur = cat
}
return &p.cur
}
func (p *textParser) consumeToken(s string) error {
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value != s {
p.back()
return p.errorf("expected %q, found %q", s, tok.value)
}
return nil
}
// Return a RequiredNotSetError indicating which required field was not set.
func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {
st := sv.Type()
sprops := GetProperties(st)
for i := 0; i < st.NumField(); i++ {
if !isNil(sv.Field(i)) {
continue
}
props := sprops.Prop[i]
if props.Required {
return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)}
}
}
return &RequiredNotSetError{fmt.Sprintf("%v.<unknown field name>", st)} // should not happen
}
// Returns the index in the struct for the named field, as well as the parsed tag properties.
func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) {
i, ok := sprops.decoderOrigNames[name]
if ok {
return i, sprops.Prop[i], true
}
return -1, nil, false
}
// Consume a ':' from the input stream (if the next token is a colon),
// returning an error if a colon is needed but not present.
func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value != ":" {
// Colon is optional when the field is a group or message.
needColon := true
switch props.Wire {
case "group":
needColon = false
case "bytes":
// A "bytes" field is either a message, a string, or a repeated field;
// those three become *T, *string and []T respectively, so we can check for
// this field being a pointer to a non-string.
if typ.Kind() == reflect.Ptr {
// *T or *string
if typ.Elem().Kind() == reflect.String {
break
}
} else if typ.Kind() == reflect.Slice {
// []T or []*T
if typ.Elem().Kind() != reflect.Ptr {
break
}
} else if typ.Kind() == reflect.String {
// The proto3 exception is for a string field,
// which requires a colon.
break
}
needColon = false
}
if needColon {
return p.errorf("expected ':', found %q", tok.value)
}
p.back()
}
return nil
}
func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
st := sv.Type()
sprops := GetProperties(st)
reqCount := sprops.reqCount
var reqFieldErr error
fieldSet := make(map[string]bool)
// A struct is a sequence of "name: value", terminated by one of
// '>' or '}', or the end of the input. A name may also be
// "[extension]".
for {
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value == terminator {
break
}
if tok.value == "[" {
// Looks like an extension.
//
// TODO: Check whether we need to handle
// namespace rooted names (e.g. ".something.Foo").
tok = p.next()
if tok.err != nil {
return tok.err
}
var desc *ExtensionDesc
// This could be faster, but it's functional.
// TODO: Do something smarter than a linear scan.
for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
if d.Name == tok.value {
desc = d
break
}
}
if desc == nil {
return p.errorf("unrecognized extension %q", tok.value)
}
// Check the extension terminator.
tok = p.next()
if tok.err != nil {
return tok.err
}
if tok.value != "]" {
return p.errorf("unrecognized extension terminator %q", tok.value)
}
props := &Properties{}
props.Parse(desc.Tag)
typ := reflect.TypeOf(desc.ExtensionType)
if err := p.checkForColon(props, typ); err != nil {
return err
}
rep := desc.repeated()
// Read the extension structure, and set it in
// the value we're constructing.
var ext reflect.Value
if !rep {
ext = reflect.New(typ).Elem()
} else {
ext = reflect.New(typ.Elem()).Elem()
}
if err := p.readAny(ext, props); err != nil {
if _, ok := err.(*RequiredNotSetError); !ok {
return err
}
reqFieldErr = err
}
ep := sv.Addr().Interface().(extendableProto)
if !rep {
SetExtension(ep, desc, ext.Interface())
} else {
old, err := GetExtension(ep, desc)
var sl reflect.Value
if err == nil {
sl = reflect.ValueOf(old) // existing slice
} else {
sl = reflect.MakeSlice(typ, 0, 1)
}
sl = reflect.Append(sl, ext)
SetExtension(ep, desc, sl.Interface())
}
if err := p.consumeOptionalSeparator(); err != nil {
return err
}
continue
}
// This is a normal, non-extension field.
name := tok.value
var dst reflect.Value
fi, props, ok := structFieldByName(sprops, name)
if ok {
dst = sv.Field(fi)
} else if oop, ok := sprops.OneofTypes[name]; ok {
// It is a oneof.
props = oop.Prop
nv := reflect.New(oop.Type.Elem())
dst = nv.Elem().Field(0)
sv.Field(oop.Field).Set(nv)
}
if !dst.IsValid() {
return p.errorf("unknown field name %q in %v", name, st)
}
if dst.Kind() == reflect.Map {
// Consume any colon.
if err := p.checkForColon(props, dst.Type()); err != nil {
return err
}
// Construct the map if it doesn't already exist.
if dst.IsNil() {
dst.Set(reflect.MakeMap(dst.Type()))
}
key := reflect.New(dst.Type().Key()).Elem()
val := reflect.New(dst.Type().Elem()).Elem()
// The map entry should be this sequence of tokens:
// < key : KEY value : VALUE >
// Technically the "key" and "value" could come in any order,
// but in practice they won't.
tok := p.next()
var terminator string
switch tok.value {
case "<":
terminator = ">"
case "{":
terminator = "}"
default:
return p.errorf("expected '{' or '<', found %q", tok.value)
}
if err := p.consumeToken("key"); err != nil {
return err
}
if err := p.consumeToken(":"); err != nil {
return err
}
if err := p.readAny(key, props.mkeyprop); err != nil {
return err
}
if err := p.consumeOptionalSeparator(); err != nil {
return err
}
if err := p.consumeToken("value"); err != nil {
return err
}
if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil {
return err
}
if err := p.readAny(val, props.mvalprop); err != nil {
return err
}
if err := p.consumeOptionalSeparator(); err != nil {
return err
}
if err := p.consumeToken(terminator); err != nil {
return err
}
dst.SetMapIndex(key, val)
continue
}
// Check that it's not already set if it's not a repeated field.
if !props.Repeated && fieldSet[name] {
return p.errorf("non-repeated field %q was repeated", name)
}
if err := p.checkForColon(props, dst.Type()); err != nil {
return err
}
// Parse into the field.
fieldSet[name] = true
if err := p.readAny(dst, props); err != nil {
if _, ok := err.(*RequiredNotSetError); !ok {
return err
}
reqFieldErr = err
} else if props.Required {
reqCount--
}
if err := p.consumeOptionalSeparator(); err != nil {
return err
}
}
if reqCount > 0 {
return p.missingRequiredFieldError(sv)
}
return reqFieldErr
}
// consumeOptionalSeparator consumes an optional semicolon or comma.
// It is used in readStruct to provide backward compatibility.
func (p *textParser) consumeOptionalSeparator() error {
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value != ";" && tok.value != "," {
p.back()
}
return nil
}
func (p *textParser) readAny(v reflect.Value, props *Properties) error {
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value == "" {
return p.errorf("unexpected EOF")
}
switch fv := v; fv.Kind() {
case reflect.Slice:
at := v.Type()
if at.Elem().Kind() == reflect.Uint8 {
// Special case for []byte
if tok.value[0] != '"' && tok.value[0] != '\'' {
// Deliberately written out here, as the error after
// this switch statement would write "invalid []byte: ...",
// which is not as user-friendly.
return p.errorf("invalid string: %v", tok.value)
}
bytes := []byte(tok.unquoted)
fv.Set(reflect.ValueOf(bytes))
return nil
}
// Repeated field.
if tok.value == "[" {
// Repeated field with list notation, like [1,2,3].
for {
fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
err := p.readAny(fv.Index(fv.Len()-1), props)
if err != nil {
return err
}
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value == "]" {
break
}
if tok.value != "," {
return p.errorf("Expected ']' or ',' found %q", tok.value)
}
}
return nil
}
// One value of the repeated field.
p.back()
fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
return p.readAny(fv.Index(fv.Len()-1), props)
case reflect.Bool:
// Either "true", "false", 1 or 0.
switch tok.value {
case "true", "1":
fv.SetBool(true)
return nil
case "false", "0":
fv.SetBool(false)
return nil
}
case reflect.Float32, reflect.Float64:
v := tok.value
// Ignore 'f' for compatibility with output generated by C++, but don't
// remove 'f' when the value is "-inf" or "inf".
if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" {
v = v[:len(v)-1]
}
if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {
fv.SetFloat(f)
return nil
}
case reflect.Int32:
if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
fv.SetInt(x)
return nil
}
if len(props.Enum) == 0 {
break
}
m, ok := enumValueMaps[props.Enum]
if !ok {
break
}
x, ok := m[tok.value]
if !ok {
break
}
fv.SetInt(int64(x))
return nil
case reflect.Int64:
if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
fv.SetInt(x)
return nil
}
case reflect.Ptr:
// A basic field (indirected through pointer), or a repeated message/group
p.back()
fv.Set(reflect.New(fv.Type().Elem()))
return p.readAny(fv.Elem(), props)
case reflect.String:
if tok.value[0] == '"' || tok.value[0] == '\'' {
fv.SetString(tok.unquoted)
return nil
}
case reflect.Struct:
var terminator string
switch tok.value {
case "{":
terminator = "}"
case "<":
terminator = ">"
default:
return p.errorf("expected '{' or '<', found %q", tok.value)
}
// TODO: Handle nested messages which implement encoding.TextUnmarshaler.
return p.readStruct(fv, terminator)
case reflect.Uint32:
if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
fv.SetUint(uint64(x))
return nil
}
case reflect.Uint64:
if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
fv.SetUint(x)
return nil
}
}
return p.errorf("invalid %v: %v", v.Type(), tok.value)
}
// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
// before starting to unmarshal, so any existing data in pb is always removed.
// If a required field is not set and no other error occurs,
// UnmarshalText returns *RequiredNotSetError.
func UnmarshalText(s string, pb Message) error {
if um, ok := pb.(encoding.TextUnmarshaler); ok {
err := um.UnmarshalText([]byte(s))
return err
}
pb.Reset()
v := reflect.ValueOf(pb)
if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil {
return pe
}
return nil
}

View File

@ -1,3 +0,0 @@
// Package apiversions provides information and interaction with the different
// API versions for the OpenStack Block Storage service, code-named Cinder.
package apiversions

View File

@ -1,21 +0,0 @@
package apiversions
import (
"github.com/rackspace/gophercloud"
"github.com/rackspace/gophercloud/pagination"
)
// List lists all the Cinder API versions available to end-users.
func List(c *gophercloud.ServiceClient) pagination.Pager {
return pagination.NewPager(c, listURL(c), func(r pagination.PageResult) pagination.Page {
return APIVersionPage{pagination.SinglePageBase(r)}
})
}
// Get will retrieve the volume type with the provided ID. To extract the volume
// type from the result, call the Extract method on the GetResult.
func Get(client *gophercloud.ServiceClient, v string) GetResult {
var res GetResult
_, res.Err = client.Get(getURL(client, v), &res.Body, nil)
return res
}

View File

@ -1,58 +0,0 @@
package apiversions
import (
"github.com/rackspace/gophercloud"
"github.com/rackspace/gophercloud/pagination"
"github.com/mitchellh/mapstructure"
)
// APIVersion represents an API version for Cinder.
type APIVersion struct {
ID string `json:"id" mapstructure:"id"` // unique identifier
Status string `json:"status" mapstructure:"status"` // current status
Updated string `json:"updated" mapstructure:"updated"` // date last updated
}
// APIVersionPage is the page returned by a pager when traversing over a
// collection of API versions.
type APIVersionPage struct {
pagination.SinglePageBase
}
// IsEmpty checks whether an APIVersionPage struct is empty.
func (r APIVersionPage) IsEmpty() (bool, error) {
is, err := ExtractAPIVersions(r)
if err != nil {
return true, err
}
return len(is) == 0, nil
}
// ExtractAPIVersions takes a collection page, extracts all of the elements,
// and returns them a slice of APIVersion structs. It is effectively a cast.
func ExtractAPIVersions(page pagination.Page) ([]APIVersion, error) {
var resp struct {
Versions []APIVersion `mapstructure:"versions"`
}
err := mapstructure.Decode(page.(APIVersionPage).Body, &resp)
return resp.Versions, err
}
// GetResult represents the result of a get operation.
type GetResult struct {
gophercloud.Result
}
// Extract is a function that accepts a result and extracts an API version resource.
func (r GetResult) Extract() (*APIVersion, error) {
var resp struct {
Version *APIVersion `mapstructure:"version"`
}
err := mapstructure.Decode(r.Body, &resp)
return resp.Version, err
}

View File

@ -1,15 +0,0 @@
package apiversions
import (
"strings"
"github.com/rackspace/gophercloud"
)
func getURL(c *gophercloud.ServiceClient, version string) string {
return c.ServiceURL(strings.TrimRight(version, "/") + "/")
}
func listURL(c *gophercloud.ServiceClient) string {
return c.ServiceURL("")
}

View File

@ -1,5 +0,0 @@
// Package snapshots provides information and interaction with snapshots in the
// OpenStack Block Storage service. A snapshot is a point in time copy of the
// data contained in an external storage volume, and can be controlled
// programmatically.
package snapshots

View File

@ -1,114 +0,0 @@
package snapshots
import (
"fmt"
"net/http"
"testing"
th "github.com/rackspace/gophercloud/testhelper"
fake "github.com/rackspace/gophercloud/testhelper/client"
)
func MockListResponse(t *testing.T) {
th.Mux.HandleFunc("/snapshots", func(w http.ResponseWriter, r *http.Request) {
th.TestMethod(t, r, "GET")
th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
w.Header().Add("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, `
{
"snapshots": [
{
"id": "289da7f8-6440-407c-9fb4-7db01ec49164",
"display_name": "snapshot-001"
},
{
"id": "96c3bda7-c82a-4f50-be73-ca7621794835",
"display_name": "snapshot-002"
}
]
}
`)
})
}
func MockGetResponse(t *testing.T) {
th.Mux.HandleFunc("/snapshots/d32019d3-bc6e-4319-9c1d-6722fc136a22", func(w http.ResponseWriter, r *http.Request) {
th.TestMethod(t, r, "GET")
th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
w.Header().Add("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, `
{
"snapshot": {
"display_name": "snapshot-001",
"id": "d32019d3-bc6e-4319-9c1d-6722fc136a22"
}
}
`)
})
}
func MockCreateResponse(t *testing.T) {
th.Mux.HandleFunc("/snapshots", func(w http.ResponseWriter, r *http.Request) {
th.TestMethod(t, r, "POST")
th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
th.TestHeader(t, r, "Content-Type", "application/json")
th.TestHeader(t, r, "Accept", "application/json")
th.TestJSONRequest(t, r, `
{
"snapshot": {
"volume_id": "1234",
"display_name": "snapshot-001"
}
}
`)
w.Header().Add("Content-Type", "application/json")
w.WriteHeader(http.StatusCreated)
fmt.Fprintf(w, `
{
"snapshot": {
"volume_id": "1234",
"display_name": "snapshot-001",
"id": "d32019d3-bc6e-4319-9c1d-6722fc136a22"
}
}
`)
})
}
func MockUpdateMetadataResponse(t *testing.T) {
th.Mux.HandleFunc("/snapshots/123/metadata", func(w http.ResponseWriter, r *http.Request) {
th.TestMethod(t, r, "PUT")
th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
th.TestHeader(t, r, "Content-Type", "application/json")
th.TestJSONRequest(t, r, `
{
"metadata": {
"key": "v1"
}
}
`)
fmt.Fprintf(w, `
{
"metadata": {
"key": "v1"
}
}
`)
})
}
func MockDeleteResponse(t *testing.T) {
th.Mux.HandleFunc("/snapshots/d32019d3-bc6e-4319-9c1d-6722fc136a22", func(w http.ResponseWriter, r *http.Request) {
th.TestMethod(t, r, "DELETE")
th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
w.WriteHeader(http.StatusNoContent)
})
}

View File

@ -1,173 +0,0 @@
package snapshots
import (
"fmt"
"github.com/rackspace/gophercloud"
"github.com/rackspace/gophercloud/pagination"
)
// CreateOptsBuilder allows extensions to add additional parameters to the
// Create request.
type CreateOptsBuilder interface {
ToSnapshotCreateMap() (map[string]interface{}, error)
}
// CreateOpts contains options for creating a Snapshot. This object is passed to
// the snapshots.Create function. For more information about these parameters,
// see the Snapshot object.
type CreateOpts struct {
// OPTIONAL
Description string
// OPTIONAL
Force bool
// OPTIONAL
Metadata map[string]interface{}
// OPTIONAL
Name string
// REQUIRED
VolumeID string
}
// ToSnapshotCreateMap assembles a request body based on the contents of a
// CreateOpts.
func (opts CreateOpts) ToSnapshotCreateMap() (map[string]interface{}, error) {
s := make(map[string]interface{})
if opts.VolumeID == "" {
return nil, fmt.Errorf("Required CreateOpts field 'VolumeID' not set.")
}
s["volume_id"] = opts.VolumeID
if opts.Description != "" {
s["display_description"] = opts.Description
}
if opts.Force == true {
s["force"] = opts.Force
}
if opts.Metadata != nil {
s["metadata"] = opts.Metadata
}
if opts.Name != "" {
s["display_name"] = opts.Name
}
return map[string]interface{}{"snapshot": s}, nil
}
// Create will create a new Snapshot based on the values in CreateOpts. To
// extract the Snapshot object from the response, call the Extract method on the
// CreateResult.
func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateResult {
var res CreateResult
reqBody, err := opts.ToSnapshotCreateMap()
if err != nil {
res.Err = err
return res
}
_, res.Err = client.Post(createURL(client), reqBody, &res.Body, &gophercloud.RequestOpts{
OkCodes: []int{200, 201},
})
return res
}
// Delete will delete the existing Snapshot with the provided ID.
func Delete(client *gophercloud.ServiceClient, id string) DeleteResult {
var res DeleteResult
_, res.Err = client.Delete(deleteURL(client, id), nil)
return res
}
// Get retrieves the Snapshot with the provided ID. To extract the Snapshot
// object from the response, call the Extract method on the GetResult.
func Get(client *gophercloud.ServiceClient, id string) GetResult {
var res GetResult
_, res.Err = client.Get(getURL(client, id), &res.Body, nil)
return res
}
// ListOptsBuilder allows extensions to add additional parameters to the List
// request.
type ListOptsBuilder interface {
ToSnapshotListQuery() (string, error)
}
// ListOpts hold options for listing Snapshots. It is passed to the
// snapshots.List function.
type ListOpts struct {
Name string `q:"display_name"`
Status string `q:"status"`
VolumeID string `q:"volume_id"`
}
// ToSnapshotListQuery formats a ListOpts into a query string.
func (opts ListOpts) ToSnapshotListQuery() (string, error) {
q, err := gophercloud.BuildQueryString(opts)
if err != nil {
return "", err
}
return q.String(), nil
}
// List returns Snapshots optionally limited by the conditions provided in
// ListOpts.
func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager {
url := listURL(client)
if opts != nil {
query, err := opts.ToSnapshotListQuery()
if err != nil {
return pagination.Pager{Err: err}
}
url += query
}
createPage := func(r pagination.PageResult) pagination.Page {
return ListResult{pagination.SinglePageBase(r)}
}
return pagination.NewPager(client, url, createPage)
}
// UpdateMetadataOptsBuilder allows extensions to add additional parameters to
// the Update request.
type UpdateMetadataOptsBuilder interface {
ToSnapshotUpdateMetadataMap() (map[string]interface{}, error)
}
// UpdateMetadataOpts contain options for updating an existing Snapshot. This
// object is passed to the snapshots.Update function. For more information
// about the parameters, see the Snapshot object.
type UpdateMetadataOpts struct {
Metadata map[string]interface{}
}
// ToSnapshotUpdateMetadataMap assembles a request body based on the contents of
// an UpdateMetadataOpts.
func (opts UpdateMetadataOpts) ToSnapshotUpdateMetadataMap() (map[string]interface{}, error) {
v := make(map[string]interface{})
if opts.Metadata != nil {
v["metadata"] = opts.Metadata
}
return v, nil
}
// UpdateMetadata will update the Snapshot with provided information. To
// extract the updated Snapshot from the response, call the ExtractMetadata
// method on the UpdateMetadataResult.
func UpdateMetadata(client *gophercloud.ServiceClient, id string, opts UpdateMetadataOptsBuilder) UpdateMetadataResult {
var res UpdateMetadataResult
reqBody, err := opts.ToSnapshotUpdateMetadataMap()
if err != nil {
res.Err = err
return res
}
_, res.Err = client.Put(updateMetadataURL(client, id), reqBody, &res.Body, &gophercloud.RequestOpts{
OkCodes: []int{200},
})
return res
}

View File

@ -1,123 +0,0 @@
package snapshots
import (
"github.com/rackspace/gophercloud"
"github.com/rackspace/gophercloud/pagination"
"github.com/mitchellh/mapstructure"
)
// Snapshot contains all the information associated with an OpenStack Snapshot.
type Snapshot struct {
// Currect status of the Snapshot.
Status string `mapstructure:"status"`
// Display name.
Name string `mapstructure:"display_name"`
// Instances onto which the Snapshot is attached.
Attachments []string `mapstructure:"attachments"`
// Logical group.
AvailabilityZone string `mapstructure:"availability_zone"`
// Is the Snapshot bootable?
Bootable string `mapstructure:"bootable"`
// Date created.
CreatedAt string `mapstructure:"created_at"`
// Display description.
Description string `mapstructure:"display_discription"`
// See VolumeType object for more information.
VolumeType string `mapstructure:"volume_type"`
// ID of the Snapshot from which this Snapshot was created.
SnapshotID string `mapstructure:"snapshot_id"`
// ID of the Volume from which this Snapshot was created.
VolumeID string `mapstructure:"volume_id"`
// User-defined key-value pairs.
Metadata map[string]string `mapstructure:"metadata"`
// Unique identifier.
ID string `mapstructure:"id"`
// Size of the Snapshot, in GB.
Size int `mapstructure:"size"`
}
// CreateResult contains the response body and error from a Create request.
type CreateResult struct {
commonResult
}
// GetResult contains the response body and error from a Get request.
type GetResult struct {
commonResult
}
// DeleteResult contains the response body and error from a Delete request.
type DeleteResult struct {
gophercloud.ErrResult
}
// ListResult is a pagination.Pager that is returned from a call to the List function.
type ListResult struct {
pagination.SinglePageBase
}
// IsEmpty returns true if a ListResult contains no Snapshots.
func (r ListResult) IsEmpty() (bool, error) {
volumes, err := ExtractSnapshots(r)
if err != nil {
return true, err
}
return len(volumes) == 0, nil
}
// ExtractSnapshots extracts and returns Snapshots. It is used while iterating over a snapshots.List call.
func ExtractSnapshots(page pagination.Page) ([]Snapshot, error) {
var response struct {
Snapshots []Snapshot `json:"snapshots"`
}
err := mapstructure.Decode(page.(ListResult).Body, &response)
return response.Snapshots, err
}
// UpdateMetadataResult contains the response body and error from an UpdateMetadata request.
type UpdateMetadataResult struct {
commonResult
}
// ExtractMetadata returns the metadata from a response from snapshots.UpdateMetadata.
func (r UpdateMetadataResult) ExtractMetadata() (map[string]interface{}, error) {
if r.Err != nil {
return nil, r.Err
}
m := r.Body.(map[string]interface{})["metadata"]
return m.(map[string]interface{}), nil
}
type commonResult struct {
gophercloud.Result
}
// Extract will get the Snapshot object out of the commonResult object.
func (r commonResult) Extract() (*Snapshot, error) {
if r.Err != nil {
return nil, r.Err
}
var res struct {
Snapshot *Snapshot `json:"snapshot"`
}
err := mapstructure.Decode(r.Body, &res)
return res.Snapshot, err
}

View File

@ -1,27 +0,0 @@
package snapshots
import "github.com/rackspace/gophercloud"
func createURL(c *gophercloud.ServiceClient) string {
return c.ServiceURL("snapshots")
}
func deleteURL(c *gophercloud.ServiceClient, id string) string {
return c.ServiceURL("snapshots", id)
}
func getURL(c *gophercloud.ServiceClient, id string) string {
return deleteURL(c, id)
}
func listURL(c *gophercloud.ServiceClient) string {
return createURL(c)
}
func metadataURL(c *gophercloud.ServiceClient, id string) string {
return c.ServiceURL("snapshots", id, "metadata")
}
func updateMetadataURL(c *gophercloud.ServiceClient, id string) string {
return metadataURL(c, id)
}

View File

@ -1,22 +0,0 @@
package snapshots
import (
"github.com/rackspace/gophercloud"
)
// WaitForStatus will continually poll the resource, checking for a particular
// status. It will do this for the amount of seconds defined.
func WaitForStatus(c *gophercloud.ServiceClient, id, status string, secs int) error {
return gophercloud.WaitFor(secs, func() (bool, error) {
current, err := Get(c, id).Extract()
if err != nil {
return false, err
}
if current.Status == status {
return true, nil
}
return false, nil
})
}

View File

@ -1,5 +0,0 @@
// Package volumes provides information and interaction with volumes in the
// OpenStack Block Storage service. A volume is a detachable block storage
// device, akin to a USB hard drive. It can only be attached to one instance at
// a time.
package volumes

View File

@ -1,113 +0,0 @@
package volumes
import (
"fmt"
"net/http"
"testing"
th "github.com/rackspace/gophercloud/testhelper"
fake "github.com/rackspace/gophercloud/testhelper/client"
)
func MockListResponse(t *testing.T) {
th.Mux.HandleFunc("/volumes", func(w http.ResponseWriter, r *http.Request) {
th.TestMethod(t, r, "GET")
th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
w.Header().Add("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, `
{
"volumes": [
{
"id": "289da7f8-6440-407c-9fb4-7db01ec49164",
"display_name": "vol-001"
},
{
"id": "96c3bda7-c82a-4f50-be73-ca7621794835",
"display_name": "vol-002"
}
]
}
`)
})
}
func MockGetResponse(t *testing.T) {
th.Mux.HandleFunc("/volumes/d32019d3-bc6e-4319-9c1d-6722fc136a22", func(w http.ResponseWriter, r *http.Request) {
th.TestMethod(t, r, "GET")
th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
w.Header().Add("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, `
{
"volume": {
"display_name": "vol-001",
"id": "d32019d3-bc6e-4319-9c1d-6722fc136a22",
"attachments": [
{
"device": "/dev/vde",
"server_id": "a740d24b-dc5b-4d59-ac75-53971c2920ba",
"id": "d6da11e5-2ed3-413e-88d8-b772ba62193d",
"volume_id": "d6da11e5-2ed3-413e-88d8-b772ba62193d"
}
]
}
}
`)
})
}
func MockCreateResponse(t *testing.T) {
th.Mux.HandleFunc("/volumes", func(w http.ResponseWriter, r *http.Request) {
th.TestMethod(t, r, "POST")
th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
th.TestHeader(t, r, "Content-Type", "application/json")
th.TestHeader(t, r, "Accept", "application/json")
th.TestJSONRequest(t, r, `
{
"volume": {
"size": 75
}
}
`)
w.Header().Add("Content-Type", "application/json")
w.WriteHeader(http.StatusCreated)
fmt.Fprintf(w, `
{
"volume": {
"size": 4,
"id": "d32019d3-bc6e-4319-9c1d-6722fc136a22"
}
}
`)
})
}
func MockDeleteResponse(t *testing.T) {
th.Mux.HandleFunc("/volumes/d32019d3-bc6e-4319-9c1d-6722fc136a22", func(w http.ResponseWriter, r *http.Request) {
th.TestMethod(t, r, "DELETE")
th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
w.WriteHeader(http.StatusNoContent)
})
}
func MockUpdateResponse(t *testing.T) {
th.Mux.HandleFunc("/volumes/d32019d3-bc6e-4319-9c1d-6722fc136a22", func(w http.ResponseWriter, r *http.Request) {
th.TestMethod(t, r, "PUT")
th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, `
{
"volume": {
"display_name": "vol-002",
"id": "d32019d3-bc6e-4319-9c1d-6722fc136a22"
}
}
`)
})
}

View File

@ -1,203 +0,0 @@
package volumes
import (
"fmt"
"github.com/rackspace/gophercloud"
"github.com/rackspace/gophercloud/pagination"
)
// CreateOptsBuilder allows extensions to add additional parameters to the
// Create request.
type CreateOptsBuilder interface {
ToVolumeCreateMap() (map[string]interface{}, error)
}
// CreateOpts contains options for creating a Volume. This object is passed to
// the volumes.Create function. For more information about these parameters,
// see the Volume object.
type CreateOpts struct {
// OPTIONAL
Availability string
// OPTIONAL
Description string
// OPTIONAL
Metadata map[string]string
// OPTIONAL
Name string
// REQUIRED
Size int
// OPTIONAL
SnapshotID, SourceVolID, ImageID string
// OPTIONAL
VolumeType string
}
// ToVolumeCreateMap assembles a request body based on the contents of a
// CreateOpts.
func (opts CreateOpts) ToVolumeCreateMap() (map[string]interface{}, error) {
v := make(map[string]interface{})
if opts.Size == 0 {
return nil, fmt.Errorf("Required CreateOpts field 'Size' not set.")
}
v["size"] = opts.Size
if opts.Availability != "" {
v["availability_zone"] = opts.Availability
}
if opts.Description != "" {
v["display_description"] = opts.Description
}
if opts.ImageID != "" {
v["imageRef"] = opts.ImageID
}
if opts.Metadata != nil {
v["metadata"] = opts.Metadata
}
if opts.Name != "" {
v["display_name"] = opts.Name
}
if opts.SourceVolID != "" {
v["source_volid"] = opts.SourceVolID
}
if opts.SnapshotID != "" {
v["snapshot_id"] = opts.SnapshotID
}
if opts.VolumeType != "" {
v["volume_type"] = opts.VolumeType
}
return map[string]interface{}{"volume": v}, nil
}
// Create will create a new Volume based on the values in CreateOpts. To extract
// the Volume object from the response, call the Extract method on the
// CreateResult.
func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateResult {
var res CreateResult
reqBody, err := opts.ToVolumeCreateMap()
if err != nil {
res.Err = err
return res
}
_, res.Err = client.Post(createURL(client), reqBody, &res.Body, &gophercloud.RequestOpts{
OkCodes: []int{200, 201},
})
return res
}
// Delete will delete the existing Volume with the provided ID.
func Delete(client *gophercloud.ServiceClient, id string) DeleteResult {
var res DeleteResult
_, res.Err = client.Delete(deleteURL(client, id), nil)
return res
}
// Get retrieves the Volume with the provided ID. To extract the Volume object
// from the response, call the Extract method on the GetResult.
func Get(client *gophercloud.ServiceClient, id string) GetResult {
var res GetResult
_, res.Err = client.Get(getURL(client, id), &res.Body, nil)
return res
}
// ListOptsBuilder allows extensions to add additional parameters to the List
// request.
type ListOptsBuilder interface {
ToVolumeListQuery() (string, error)
}
// ListOpts holds options for listing Volumes. It is passed to the volumes.List
// function.
type ListOpts struct {
// admin-only option. Set it to true to see all tenant volumes.
AllTenants bool `q:"all_tenants"`
// List only volumes that contain Metadata.
Metadata map[string]string `q:"metadata"`
// List only volumes that have Name as the display name.
Name string `q:"name"`
// List only volumes that have a status of Status.
Status string `q:"status"`
}
// ToVolumeListQuery formats a ListOpts into a query string.
func (opts ListOpts) ToVolumeListQuery() (string, error) {
q, err := gophercloud.BuildQueryString(opts)
if err != nil {
return "", err
}
return q.String(), nil
}
// List returns Volumes optionally limited by the conditions provided in ListOpts.
func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager {
url := listURL(client)
if opts != nil {
query, err := opts.ToVolumeListQuery()
if err != nil {
return pagination.Pager{Err: err}
}
url += query
}
createPage := func(r pagination.PageResult) pagination.Page {
return ListResult{pagination.SinglePageBase(r)}
}
return pagination.NewPager(client, url, createPage)
}
// UpdateOptsBuilder allows extensions to add additional parameters to the
// Update request.
type UpdateOptsBuilder interface {
ToVolumeUpdateMap() (map[string]interface{}, error)
}
// UpdateOpts contain options for updating an existing Volume. This object is passed
// to the volumes.Update function. For more information about the parameters, see
// the Volume object.
type UpdateOpts struct {
// OPTIONAL
Name string
// OPTIONAL
Description string
// OPTIONAL
Metadata map[string]string
}
// ToVolumeUpdateMap assembles a request body based on the contents of an
// UpdateOpts.
func (opts UpdateOpts) ToVolumeUpdateMap() (map[string]interface{}, error) {
v := make(map[string]interface{})
if opts.Description != "" {
v["display_description"] = opts.Description
}
if opts.Metadata != nil {
v["metadata"] = opts.Metadata
}
if opts.Name != "" {
v["display_name"] = opts.Name
}
return map[string]interface{}{"volume": v}, nil
}
// Update will update the Volume with provided information. To extract the updated
// Volume from the response, call the Extract method on the UpdateResult.
func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) UpdateResult {
var res UpdateResult
reqBody, err := opts.ToVolumeUpdateMap()
if err != nil {
res.Err = err
return res
}
_, res.Err = client.Put(updateURL(client, id), reqBody, &res.Body, &gophercloud.RequestOpts{
OkCodes: []int{200},
})
return res
}

View File

@ -1,113 +0,0 @@
package volumes
import (
"github.com/rackspace/gophercloud"
"github.com/rackspace/gophercloud/pagination"
"github.com/mitchellh/mapstructure"
)
// Volume contains all the information associated with an OpenStack Volume.
type Volume struct {
// Current status of the volume.
Status string `mapstructure:"status"`
// Human-readable display name for the volume.
Name string `mapstructure:"display_name"`
// Instances onto which the volume is attached.
Attachments []map[string]interface{} `mapstructure:"attachments"`
// This parameter is no longer used.
AvailabilityZone string `mapstructure:"availability_zone"`
// Indicates whether this is a bootable volume.
Bootable string `mapstructure:"bootable"`
// The date when this volume was created.
CreatedAt string `mapstructure:"created_at"`
// Human-readable description for the volume.
Description string `mapstructure:"display_description"`
// The type of volume to create, either SATA or SSD.
VolumeType string `mapstructure:"volume_type"`
// The ID of the snapshot from which the volume was created
SnapshotID string `mapstructure:"snapshot_id"`
// The ID of another block storage volume from which the current volume was created
SourceVolID string `mapstructure:"source_volid"`
// Arbitrary key-value pairs defined by the user.
Metadata map[string]string `mapstructure:"metadata"`
// Unique identifier for the volume.
ID string `mapstructure:"id"`
// Size of the volume in GB.
Size int `mapstructure:"size"`
}
// CreateResult contains the response body and error from a Create request.
type CreateResult struct {
commonResult
}
// GetResult contains the response body and error from a Get request.
type GetResult struct {
commonResult
}
// DeleteResult contains the response body and error from a Delete request.
type DeleteResult struct {
gophercloud.ErrResult
}
// ListResult is a pagination.pager that is returned from a call to the List function.
type ListResult struct {
pagination.SinglePageBase
}
// IsEmpty returns true if a ListResult contains no Volumes.
func (r ListResult) IsEmpty() (bool, error) {
volumes, err := ExtractVolumes(r)
if err != nil {
return true, err
}
return len(volumes) == 0, nil
}
// ExtractVolumes extracts and returns Volumes. It is used while iterating over a volumes.List call.
func ExtractVolumes(page pagination.Page) ([]Volume, error) {
var response struct {
Volumes []Volume `json:"volumes"`
}
err := mapstructure.Decode(page.(ListResult).Body, &response)
return response.Volumes, err
}
// UpdateResult contains the response body and error from an Update request.
type UpdateResult struct {
commonResult
}
type commonResult struct {
gophercloud.Result
}
// Extract will get the Volume object out of the commonResult object.
func (r commonResult) Extract() (*Volume, error) {
if r.Err != nil {
return nil, r.Err
}
var res struct {
Volume *Volume `json:"volume"`
}
err := mapstructure.Decode(r.Body, &res)
return res.Volume, err
}

View File

@ -1,23 +0,0 @@
package volumes
import "github.com/rackspace/gophercloud"
func createURL(c *gophercloud.ServiceClient) string {
return c.ServiceURL("volumes")
}
func listURL(c *gophercloud.ServiceClient) string {
return createURL(c)
}
func deleteURL(c *gophercloud.ServiceClient, id string) string {
return c.ServiceURL("volumes", id)
}
func getURL(c *gophercloud.ServiceClient, id string) string {
return deleteURL(c, id)
}
func updateURL(c *gophercloud.ServiceClient, id string) string {
return deleteURL(c, id)
}

View File

@ -1,22 +0,0 @@
package volumes
import (
"github.com/rackspace/gophercloud"
)
// WaitForStatus will continually poll the resource, checking for a particular
// status. It will do this for the amount of seconds defined.
func WaitForStatus(c *gophercloud.ServiceClient, id, status string, secs int) error {
return gophercloud.WaitFor(secs, func() (bool, error) {
current, err := Get(c, id).Extract()
if err != nil {
return false, err
}
if current.Status == status {
return true, nil
}
return false, nil
})
}

View File

@ -1,9 +0,0 @@
// Package volumetypes provides information and interaction with volume types
// in the OpenStack Block Storage service. A volume type indicates the type of
// a block storage volume, such as SATA, SCSCI, SSD, etc. These can be
// customized or defined by the OpenStack admin.
//
// You can also define extra_specs associated with your volume types. For
// instance, you could have a VolumeType=SATA, with extra_specs (RPM=10000,
// RAID-Level=5) . Extra_specs are defined and customized by the admin.
package volumetypes

View File

@ -1,60 +0,0 @@
package volumetypes
import (
"fmt"
"net/http"
"testing"
th "github.com/rackspace/gophercloud/testhelper"
fake "github.com/rackspace/gophercloud/testhelper/client"
)
func MockListResponse(t *testing.T) {
th.Mux.HandleFunc("/types", func(w http.ResponseWriter, r *http.Request) {
th.TestMethod(t, r, "GET")
th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
w.Header().Add("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, `
{
"volume_types": [
{
"id": "289da7f8-6440-407c-9fb4-7db01ec49164",
"name": "vol-type-001",
"extra_specs": {
"capabilities": "gpu"
}
},
{
"id": "96c3bda7-c82a-4f50-be73-ca7621794835",
"name": "vol-type-002",
"extra_specs": {}
}
]
}
`)
})
}
func MockGetResponse(t *testing.T) {
th.Mux.HandleFunc("/types/d32019d3-bc6e-4319-9c1d-6722fc136a22", func(w http.ResponseWriter, r *http.Request) {
th.TestMethod(t, r, "GET")
th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
w.Header().Add("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, `
{
"volume_type": {
"name": "vol-type-001",
"id": "d32019d3-bc6e-4319-9c1d-6722fc136a22",
"extra_specs": {
"serverNumber": "2"
}
}
}
`)
})
}

View File

@ -1,76 +0,0 @@
package volumetypes
import (
"github.com/rackspace/gophercloud"
"github.com/rackspace/gophercloud/pagination"
)
// CreateOptsBuilder allows extensions to add additional parameters to the
// Create request.
type CreateOptsBuilder interface {
ToVolumeTypeCreateMap() (map[string]interface{}, error)
}
// CreateOpts are options for creating a volume type.
type CreateOpts struct {
// OPTIONAL. See VolumeType.
ExtraSpecs map[string]interface{}
// OPTIONAL. See VolumeType.
Name string
}
// ToVolumeTypeCreateMap casts a CreateOpts struct to a map.
func (opts CreateOpts) ToVolumeTypeCreateMap() (map[string]interface{}, error) {
vt := make(map[string]interface{})
if opts.ExtraSpecs != nil {
vt["extra_specs"] = opts.ExtraSpecs
}
if opts.Name != "" {
vt["name"] = opts.Name
}
return map[string]interface{}{"volume_type": vt}, nil
}
// Create will create a new volume. To extract the created volume type object,
// call the Extract method on the CreateResult.
func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateResult {
var res CreateResult
reqBody, err := opts.ToVolumeTypeCreateMap()
if err != nil {
res.Err = err
return res
}
_, res.Err = client.Post(createURL(client), reqBody, &res.Body, &gophercloud.RequestOpts{
OkCodes: []int{200, 201},
})
return res
}
// Delete will delete the volume type with the provided ID.
func Delete(client *gophercloud.ServiceClient, id string) DeleteResult {
var res DeleteResult
_, res.Err = client.Delete(deleteURL(client, id), nil)
return res
}
// Get will retrieve the volume type with the provided ID. To extract the volume
// type from the result, call the Extract method on the GetResult.
func Get(client *gophercloud.ServiceClient, id string) GetResult {
var res GetResult
_, err := client.Get(getURL(client, id), &res.Body, nil)
res.Err = err
return res
}
// List returns all volume types.
func List(client *gophercloud.ServiceClient) pagination.Pager {
createPage := func(r pagination.PageResult) pagination.Page {
return ListResult{pagination.SinglePageBase(r)}
}
return pagination.NewPager(client, listURL(client), createPage)
}

View File

@ -1,72 +0,0 @@
package volumetypes
import (
"github.com/mitchellh/mapstructure"
"github.com/rackspace/gophercloud"
"github.com/rackspace/gophercloud/pagination"
)
// VolumeType contains all information associated with an OpenStack Volume Type.
type VolumeType struct {
ExtraSpecs map[string]interface{} `json:"extra_specs" mapstructure:"extra_specs"` // user-defined metadata
ID string `json:"id" mapstructure:"id"` // unique identifier
Name string `json:"name" mapstructure:"name"` // display name
}
// CreateResult contains the response body and error from a Create request.
type CreateResult struct {
commonResult
}
// GetResult contains the response body and error from a Get request.
type GetResult struct {
commonResult
}
// DeleteResult contains the response error from a Delete request.
type DeleteResult struct {
gophercloud.ErrResult
}
// ListResult is a pagination.Pager that is returned from a call to the List function.
type ListResult struct {
pagination.SinglePageBase
}
// IsEmpty returns true if a ListResult contains no Volume Types.
func (r ListResult) IsEmpty() (bool, error) {
volumeTypes, err := ExtractVolumeTypes(r)
if err != nil {
return true, err
}
return len(volumeTypes) == 0, nil
}
// ExtractVolumeTypes extracts and returns Volume Types.
func ExtractVolumeTypes(page pagination.Page) ([]VolumeType, error) {
var response struct {
VolumeTypes []VolumeType `mapstructure:"volume_types"`
}
err := mapstructure.Decode(page.(ListResult).Body, &response)
return response.VolumeTypes, err
}
type commonResult struct {
gophercloud.Result
}
// Extract will get the Volume Type object out of the commonResult object.
func (r commonResult) Extract() (*VolumeType, error) {
if r.Err != nil {
return nil, r.Err
}
var res struct {
VolumeType *VolumeType `json:"volume_type" mapstructure:"volume_type"`
}
err := mapstructure.Decode(r.Body, &res)
return res.VolumeType, err
}

View File

@ -1,19 +0,0 @@
package volumetypes
import "github.com/rackspace/gophercloud"
func listURL(c *gophercloud.ServiceClient) string {
return c.ServiceURL("types")
}
func createURL(c *gophercloud.ServiceClient) string {
return listURL(c)
}
func getURL(c *gophercloud.ServiceClient, id string) string {
return c.ServiceURL("types", id)
}
func deleteURL(c *gophercloud.ServiceClient, id string) string {
return getURL(c, id)
}

View File

@ -1,4 +0,0 @@
// Package base provides information and interaction with the base API
// resource in the OpenStack CDN service. This API resource allows for
// retrieving the Home Document and pinging the root URL.
package base

View File

@ -1,53 +0,0 @@
package base
import (
"fmt"
"net/http"
"testing"
th "github.com/rackspace/gophercloud/testhelper"
fake "github.com/rackspace/gophercloud/testhelper/client"
)
// HandleGetSuccessfully creates an HTTP handler at `/` on the test handler mux
// that responds with a `Get` response.
func HandleGetSuccessfully(t *testing.T) {
th.Mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
th.TestMethod(t, r, "GET")
th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
th.TestHeader(t, r, "Accept", "application/json")
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, `
{
"resources": {
"rel/cdn": {
"href-template": "services{?marker,limit}",
"href-vars": {
"marker": "param/marker",
"limit": "param/limit"
},
"hints": {
"allow": [
"GET"
],
"formats": {
"application/json": {}
}
}
}
}
}
`)
})
}
// HandlePingSuccessfully creates an HTTP handler at `/ping` on the test handler
// mux that responds with a `Ping` response.
func HandlePingSuccessfully(t *testing.T) {
th.Mux.HandleFunc("/ping", func(w http.ResponseWriter, r *http.Request) {
th.TestMethod(t, r, "GET")
th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
w.WriteHeader(http.StatusNoContent)
})
}

View File

@ -1,21 +0,0 @@
package base
import "github.com/rackspace/gophercloud"
// Get retrieves the home document, allowing the user to discover the
// entire API.
func Get(c *gophercloud.ServiceClient) GetResult {
var res GetResult
_, res.Err = c.Get(getURL(c), &res.Body, nil)
return res
}
// Ping retrieves a ping to the server.
func Ping(c *gophercloud.ServiceClient) PingResult {
var res PingResult
_, res.Err = c.Get(pingURL(c), nil, &gophercloud.RequestOpts{
OkCodes: []int{204},
MoreHeaders: map[string]string{"Accept": ""},
})
return res
}

View File

@ -1,35 +0,0 @@
package base
import (
"errors"
"github.com/rackspace/gophercloud"
)
// HomeDocument is a resource that contains all the resources for the CDN API.
type HomeDocument map[string]interface{}
// GetResult represents the result of a Get operation.
type GetResult struct {
gophercloud.Result
}
// Extract is a function that accepts a result and extracts a home document resource.
func (r GetResult) Extract() (*HomeDocument, error) {
if r.Err != nil {
return nil, r.Err
}
submap, ok := r.Body.(map[string]interface{})["resources"]
if !ok {
return nil, errors.New("Unexpected HomeDocument structure")
}
casted := HomeDocument(submap.(map[string]interface{}))
return &casted, nil
}
// PingResult represents the result of a Ping operation.
type PingResult struct {
gophercloud.ErrResult
}

Some files were not shown because too many files have changed in this diff Show More