Merge branch 'master' into naming

This commit is contained in:
Yaron Schneider 2019-11-22 11:15:50 -08:00 committed by GitHub
commit 62856f9e0c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
175 changed files with 33633 additions and 268 deletions

12
bindings/requests.go Normal file
View File

@ -0,0 +1,12 @@
// ------------------------------------------------------------
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
// ------------------------------------------------------------
package bindings
// WriteRequest is the object given to an dapr output binding
type WriteRequest struct {
Data []byte `json:"data"`
Metadata map[string]string `json:"metadata"`
}

View File

@ -15,12 +15,6 @@ type ReadResponse struct {
Metadata map[string]string `json:"metadata"`
}
// WriteRequest is the object given to an dapr output binding
type WriteRequest struct {
Data []byte `json:"data"`
Metadata map[string]string `json:"metadata"`
}
// AppResponse is the object describing the response from user code after a bindings event
type AppResponse struct {
Data interface{} `json:"data"`

11
go.mod
View File

@ -30,8 +30,10 @@ require (
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f // indirect
github.com/eclipse/paho.mqtt.golang v1.2.0
github.com/go-redis/redis v6.15.5+incompatible
github.com/go-stack/stack v1.8.0 // indirect
github.com/gocql/gocql v0.0.0-20191018090344-07ace3bab0f8
github.com/golang/mock v1.3.1
github.com/google/pprof v0.0.0-20190908185732-236ed259b199 // indirect
github.com/google/uuid v1.1.1
github.com/gorilla/websocket v1.4.1 // indirect
github.com/grandcat/zeroconf v0.0.0-20190424104450-85eadb44205c
@ -69,6 +71,15 @@ require (
golang.org/x/tools v0.0.0-20191028215554-80f3f9ca0853 // indirect
google.golang.org/api v0.10.0
google.golang.org/grpc v1.24.0
gopkg.in/airbrake/gobrake.v2 v2.0.9 // indirect
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect
gopkg.in/couchbase/gocb.v1 v1.6.4
gopkg.in/couchbase/gocbcore.v7 v7.1.15 // indirect
gopkg.in/couchbaselabs/gocbconnstr.v1 v1.0.4 // indirect
gopkg.in/couchbaselabs/jsonx.v1 v1.0.0 // indirect
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 // indirect
gopkg.in/jcmturner/goidentity.v3 v3.0.0 // indirect
gopkg.in/jcmturner/gokrb5.v7 v7.3.0 // indirect
k8s.io/apimachinery v0.0.0-20190817020851-f2f3a405f61d
k8s.io/client v0.0.0-00010101000000-000000000000 // indirect
k8s.io/client-go v0.0.0-20190620085101-78d2af792bab

9
go.sum
View File

@ -440,6 +440,7 @@ github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3 h1:EooPXg51Tn+xmWPXJUG
github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/openzipkin/zipkin-go v0.1.1 h1:A/ADD6HaPnAKj3yS7HjGHRK77qi41Hi0DirOOIQAeIw=
@ -749,6 +750,14 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/couchbase/gocb.v1 v1.6.4 h1:vAworfH5ZKDbonmayrwbGiD9jkAMroWmHXDf1GAIqMM=
gopkg.in/couchbase/gocb.v1 v1.6.4/go.mod h1:Ri5Qok4ZKiwmPr75YxZ0uELQy45XJgUSzeUnK806gTY=
gopkg.in/couchbase/gocbcore.v7 v7.1.15 h1:2nhfrqKz6TBex0Vcc+iq9UnAZltfCGklnM4mgdf2I3o=
gopkg.in/couchbase/gocbcore.v7 v7.1.15/go.mod h1:48d2Be0MxRtsyuvn+mWzqmoGUG9uA00ghopzOs148/E=
gopkg.in/couchbaselabs/gocbconnstr.v1 v1.0.4 h1:VVVoIV/nSw1w9ZnTEOjmkeJVcAzaCyxEujKglarxz7U=
gopkg.in/couchbaselabs/gocbconnstr.v1 v1.0.4/go.mod h1:ZjII0iKx4Veo6N6da+pEZu/ptNyKLg9QTVt7fFmR6sw=
gopkg.in/couchbaselabs/jsonx.v1 v1.0.0 h1:SJGarb8dXAsVZWizC26rxBkBYEKhSUxVh5wAnyzBVaI=
gopkg.in/couchbaselabs/jsonx.v1 v1.0.0/go.mod h1:oR201IRovxvLW/eISevH12/+MiKHtNQAKfcX8iWZvJY=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=

View File

@ -13,6 +13,7 @@ Currently supported state stores are:
* MongoDB
* Zookeeper
* Cloud Firestore (Datastore mode)
* Couchbase
## Implementing a new State Store

View File

@ -115,7 +115,7 @@ func (c *StateStore) Get(req *state.GetRequest) (*state.GetResponse, error) {
if err != nil {
return nil, err
} else if len(items) == 0 {
return nil, nil
return &state.GetResponse{}, nil
}
b, err := jsoniter.ConfigFastest.Marshal(&items[0].Value)

View File

@ -0,0 +1,246 @@
// ------------------------------------------------------------
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
// ------------------------------------------------------------
package couchbase
import (
"errors"
"fmt"
"strconv"
"github.com/dapr/components-contrib/state"
"gopkg.in/couchbase/gocb.v1"
jsoniter "github.com/json-iterator/go"
)
const (
couchbaseURL = "couchbaseURL"
username = "username"
password = "password"
bucketName = "bucketName"
//see https://docs.couchbase.com/go-sdk/1.6/durability.html#configuring-durability
numReplicasDurableReplication = "numReplicasDurableReplication"
numReplicasDurablePersistence = "numReplicasDurablePersistence"
)
// Couchbase is a couchbase state store
type Couchbase struct {
bucket *gocb.Bucket
bucketName string //TODO: having bucket name sent as part of request (get,set etc.) metadata would be more flexible
numReplicasDurableReplication uint
numReplicasDurablePersistence uint
json jsoniter.API
}
// NewCouchbaseStateStore returns a new couchbase state store
func NewCouchbaseStateStore() *Couchbase {
return &Couchbase{
json: jsoniter.ConfigFastest,
}
}
func validateMetadata(metadata state.Metadata) error {
if metadata.Properties[couchbaseURL] == "" {
return errors.New("couchbase error: couchbase URL is missing")
}
if metadata.Properties[username] == "" {
return errors.New("couchbase error: couchbase username is missing")
}
if metadata.Properties[password] == "" {
return errors.New("couchbase error: couchbase password is missing")
}
if metadata.Properties[bucketName] == "" {
return errors.New("couchbase error: couchbase bucket name is missing")
}
v := metadata.Properties[numReplicasDurableReplication]
if v != "" {
_, err := strconv.ParseUint(v, 10, 0)
if err != nil {
return fmt.Errorf("couchbase error: %v", err)
}
}
v = metadata.Properties[numReplicasDurablePersistence]
if v != "" {
_, err := strconv.ParseUint(v, 10, 0)
if err != nil {
return fmt.Errorf("couchbase error: %v", err)
}
}
return nil
}
// Init does metadata and connection parsing
func (cbs *Couchbase) Init(metadata state.Metadata) error {
err := validateMetadata(metadata)
if err != nil {
return err
}
cbs.bucketName = metadata.Properties[bucketName]
c, err := gocb.Connect(metadata.Properties[couchbaseURL])
if err != nil {
return fmt.Errorf("couchbase error: unable to connect to couchbase at %s - %v ", metadata.Properties[couchbaseURL], err)
}
//does not actually trigger the authentication
c.Authenticate(gocb.PasswordAuthenticator{
Username: metadata.Properties[username],
Password: metadata.Properties[password],
})
//with RBAC, bucket-passwords are no longer used - https://docs.couchbase.com/go-sdk/1.6/sdk-authentication-overview.html#authenticating-with-legacy-sdk-versions
bucket, err := c.OpenBucket(cbs.bucketName, "")
if err != nil {
return fmt.Errorf("couchbase error: failed to open bucket %s - %v", cbs.bucketName, err)
}
cbs.bucket = bucket
r := metadata.Properties[numReplicasDurableReplication]
if r != "" {
_r, _ := strconv.ParseUint(r, 10, 0)
cbs.numReplicasDurableReplication = uint(_r)
}
p := metadata.Properties[numReplicasDurablePersistence]
if p != "" {
_p, _ := strconv.ParseUint(p, 10, 0)
cbs.numReplicasDurablePersistence = uint(_p)
}
return nil
}
//Set stores value for a key to couchbase. It honors ETag (for concurrency) and consistency settings
func (cbs *Couchbase) Set(req *state.SetRequest) error {
err := state.CheckSetRequestOptions(req)
if err != nil {
return err
}
var value string
b, ok := req.Value.([]byte)
if ok {
value = string(b)
} else {
value, err = cbs.json.MarshalToString(req.Value)
}
if err != nil {
return fmt.Errorf("couchbase error: failed to convert value %v", err)
}
//key already exists (use Replace)
if req.ETag != "" {
//compare-and-swap (CAS) for managing concurrent modifications - https://docs.couchbase.com/go-sdk/current/concurrent-mutations-cluster.html
cas, cerr := eTagToCas(req.ETag)
if cerr != nil {
return fmt.Errorf("couchbase error: failed to set value for key %s - %v", req.Key, err)
}
if req.Options.Consistency == state.Strong {
_, err = cbs.bucket.ReplaceDura(req.Key, value, cas, 0, cbs.numReplicasDurableReplication, cbs.numReplicasDurablePersistence)
} else {
_, err = cbs.bucket.Replace(req.Key, value, cas, 0)
}
} else {
//key does not exist: replace or insert (with Upsert)
if req.Options.Consistency == state.Strong {
_, err = cbs.bucket.UpsertDura(req.Key, value, 0, cbs.numReplicasDurableReplication, cbs.numReplicasDurablePersistence)
} else {
_, err = cbs.bucket.Upsert(req.Key, value, 0)
}
}
if err != nil {
return fmt.Errorf("couchbase error: failed to set value for key %s - %v", req.Key, err)
}
return nil
}
// BulkSet performs a bulks save operation
func (cbs *Couchbase) BulkSet(req []state.SetRequest) error {
for _, s := range req {
err := cbs.Set(&s)
if err != nil {
return err
}
}
return nil
}
// Get retrieves state from couchbase with a key
func (cbs *Couchbase) Get(req *state.GetRequest) (*state.GetResponse, error) {
var data interface{}
cas, err := cbs.bucket.Get(req.Key, &data)
if err != nil {
return nil, fmt.Errorf("couchbase error: failed to get value for key %s - %v", req.Key, err)
}
value, err := cbs.json.Marshal(&data)
if err != nil {
return nil, fmt.Errorf("couchbase error: failed to convert value to byte[] - %v", err)
}
return &state.GetResponse{
Data: value,
ETag: fmt.Sprintf("%d", cas),
}, nil
}
// Delete performs a delete operation
func (cbs *Couchbase) Delete(req *state.DeleteRequest) error {
err := state.CheckDeleteRequestOptions(req)
if err != nil {
return err
}
var cas gocb.Cas = 0
if req.ETag != "" {
cas, err = eTagToCas(req.ETag)
if err != nil {
return fmt.Errorf("couchbase error: failed to delete key %s - %v", req.Key, err)
}
}
if req.Options.Consistency == state.Strong {
_, err = cbs.bucket.RemoveDura(req.Key, cas, cbs.numReplicasDurableReplication, cbs.numReplicasDurablePersistence)
} else {
_, err = cbs.bucket.Remove(req.Key, cas)
}
if err != nil {
return fmt.Errorf("couchbase error: failed to delete key %s - %v", req.Key, err)
}
return nil
}
// BulkDelete performs a bulk delete operation
func (cbs *Couchbase) BulkDelete(req []state.DeleteRequest) error {
for _, re := range req {
err := cbs.Delete(&re)
if err != nil {
return err
}
}
return nil
}
//converts string etag sent by the application into a gocb.Cas object, which can then be used for optimistic locking for set and delete operations
func eTagToCas(eTag string) (gocb.Cas, error) {
var cas gocb.Cas = 0
//CAS is a 64-bit integer - https://docs.couchbase.com/go-sdk/current/concurrent-mutations-cluster.html#cas-value-format
temp, err := strconv.ParseUint(eTag, 10, 64)
if err != nil {
return cas, err
}
cas = gocb.Cas(temp)
return cas, nil
}

View File

@ -0,0 +1,121 @@
// ------------------------------------------------------------
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
// ------------------------------------------------------------
package couchbase
import (
"testing"
"github.com/dapr/components-contrib/state"
"gopkg.in/couchbase/gocb.v1"
"github.com/stretchr/testify/assert"
)
func TestValidateMetadata(t *testing.T) {
t.Run("with mandatory fields", func(t *testing.T) {
props := map[string]string{
couchbaseURL: "foo://bar",
username: "kehsihba",
password: "secret",
bucketName: "testbucket",
}
metadata := state.Metadata{Properties: props}
err := validateMetadata(metadata)
assert.Equal(t, nil, err)
})
t.Run("with optional fields", func(t *testing.T) {
props := map[string]string{
couchbaseURL: "foo://bar",
username: "kehsihba",
password: "secret",
bucketName: "testbucket",
numReplicasDurablePersistence: "1",
numReplicasDurableReplication: "2",
}
metadata := state.Metadata{Properties: props}
err := validateMetadata(metadata)
assert.Equal(t, nil, err)
})
t.Run("With missing couchbase URL", func(t *testing.T) {
props := map[string]string{
username: "kehsihba",
password: "secret",
bucketName: "testbucket",
}
metadata := state.Metadata{Properties: props}
err := validateMetadata(metadata)
assert.NotNil(t, err)
})
t.Run("With missing username", func(t *testing.T) {
props := map[string]string{
couchbaseURL: "foo://bar",
password: "secret",
bucketName: "testbucket",
}
metadata := state.Metadata{Properties: props}
err := validateMetadata(metadata)
assert.NotNil(t, err)
})
t.Run("With missing password", func(t *testing.T) {
props := map[string]string{
couchbaseURL: "foo://bar",
username: "kehsihba",
bucketName: "testbucket",
}
metadata := state.Metadata{Properties: props}
err := validateMetadata(metadata)
assert.NotNil(t, err)
})
t.Run("With missing bucket", func(t *testing.T) {
props := map[string]string{
couchbaseURL: "foo://bar",
username: "kehsihba",
password: "secret",
}
metadata := state.Metadata{Properties: props}
err := validateMetadata(metadata)
assert.NotNil(t, err)
})
t.Run("With invalid durable replication", func(t *testing.T) {
props := map[string]string{
couchbaseURL: "foo://bar",
username: "kehsihba",
password: "secret",
numReplicasDurableReplication: "junk",
}
metadata := state.Metadata{Properties: props}
err := validateMetadata(metadata)
assert.NotNil(t, err)
})
t.Run("With invalid durable persistence", func(t *testing.T) {
props := map[string]string{
couchbaseURL: "foo://bar",
username: "kehsihba",
password: "secret",
numReplicasDurablePersistence: "junk",
}
metadata := state.Metadata{Properties: props}
err := validateMetadata(metadata)
assert.NotNil(t, err)
})
}
func TestETagToCas(t *testing.T) {
t.Run("with valid string", func(t *testing.T) {
casStr := "1572938024378368000"
ver := uint64(1572938024378368000)
var expectedCas gocb.Cas = gocb.Cas(ver)
cas, err := eTagToCas(casStr)
assert.Equal(t, nil, err)
assert.Equal(t, expectedCas, cas)
})
t.Run("with empty string", func(t *testing.T) {
_, err := eTagToCas("")
assert.NotNil(t, err)
})
}

View File

@ -10,6 +10,7 @@ import "time"
// GetRequest is the object describing a state fetch request
type GetRequest struct {
Key string `json:"key"`
Metadata map[string]string `json:"metadata"`
Options GetStateOption `json:"options,omitempty"`
}
@ -22,6 +23,7 @@ type GetStateOption struct {
type DeleteRequest struct {
Key string `json:"key"`
ETag string `json:"etag,omitempty"`
Metadata map[string]string `json:"metadata"`
Options DeleteStateOption `json:"options,omitempty"`
}

198
vendor/github.com/miekg/dns/compress_generate.go generated vendored Normal file
View File

@ -0,0 +1,198 @@
//+build ignore
// compression_generate.go is meant to run with go generate. It will use
// go/{importer,types} to track down all the RR struct types. Then for each type
// it will look to see if there are (compressible) names, if so it will add that
// type to compressionLenHelperType and comressionLenSearchType which "fake" the
// compression so that Len() is fast.
package main
import (
"bytes"
"fmt"
"go/format"
"go/importer"
"go/types"
"log"
"os"
)
var packageHdr = `
// Code generated by "go run compress_generate.go"; DO NOT EDIT.
package dns
`
// getTypeStruct will take a type and the package scope, and return the
// (innermost) struct if the type is considered a RR type (currently defined as
// those structs beginning with a RR_Header, could be redefined as implementing
// the RR interface). The bool return value indicates if embedded structs were
// resolved.
func getTypeStruct(t types.Type, scope *types.Scope) (*types.Struct, bool) {
st, ok := t.Underlying().(*types.Struct)
if !ok {
return nil, false
}
if st.Field(0).Type() == scope.Lookup("RR_Header").Type() {
return st, false
}
if st.Field(0).Anonymous() {
st, _ := getTypeStruct(st.Field(0).Type(), scope)
return st, true
}
return nil, false
}
func main() {
// Import and type-check the package
pkg, err := importer.Default().Import("github.com/miekg/dns")
fatalIfErr(err)
scope := pkg.Scope()
var domainTypes []string // Types that have a domain name in them (either compressible or not).
var cdomainTypes []string // Types that have a compressible domain name in them (subset of domainType)
Names:
for _, name := range scope.Names() {
o := scope.Lookup(name)
if o == nil || !o.Exported() {
continue
}
st, _ := getTypeStruct(o.Type(), scope)
if st == nil {
continue
}
if name == "PrivateRR" {
continue
}
if scope.Lookup("Type"+o.Name()) == nil && o.Name() != "RFC3597" {
log.Fatalf("Constant Type%s does not exist.", o.Name())
}
for i := 1; i < st.NumFields(); i++ {
if _, ok := st.Field(i).Type().(*types.Slice); ok {
if st.Tag(i) == `dns:"domain-name"` {
domainTypes = append(domainTypes, o.Name())
continue Names
}
if st.Tag(i) == `dns:"cdomain-name"` {
cdomainTypes = append(cdomainTypes, o.Name())
domainTypes = append(domainTypes, o.Name())
continue Names
}
continue
}
switch {
case st.Tag(i) == `dns:"domain-name"`:
domainTypes = append(domainTypes, o.Name())
continue Names
case st.Tag(i) == `dns:"cdomain-name"`:
cdomainTypes = append(cdomainTypes, o.Name())
domainTypes = append(domainTypes, o.Name())
continue Names
}
}
}
b := &bytes.Buffer{}
b.WriteString(packageHdr)
// compressionLenHelperType - all types that have domain-name/cdomain-name can be used for compressing names
fmt.Fprint(b, "func compressionLenHelperType(c map[string]int, r RR, initLen int) int {\n")
fmt.Fprint(b, "currentLen := initLen\n")
fmt.Fprint(b, "switch x := r.(type) {\n")
for _, name := range domainTypes {
o := scope.Lookup(name)
st, _ := getTypeStruct(o.Type(), scope)
fmt.Fprintf(b, "case *%s:\n", name)
for i := 1; i < st.NumFields(); i++ {
out := func(s string) {
fmt.Fprintf(b, "currentLen -= len(x.%s) + 1\n", st.Field(i).Name())
fmt.Fprintf(b, "currentLen += compressionLenHelper(c, x.%s, currentLen)\n", st.Field(i).Name())
}
if _, ok := st.Field(i).Type().(*types.Slice); ok {
switch st.Tag(i) {
case `dns:"domain-name"`:
fallthrough
case `dns:"cdomain-name"`:
// For HIP we need to slice over the elements in this slice.
fmt.Fprintf(b, `for i := range x.%s {
currentLen -= len(x.%s[i]) + 1
}
`, st.Field(i).Name(), st.Field(i).Name())
fmt.Fprintf(b, `for i := range x.%s {
currentLen += compressionLenHelper(c, x.%s[i], currentLen)
}
`, st.Field(i).Name(), st.Field(i).Name())
}
continue
}
switch {
case st.Tag(i) == `dns:"cdomain-name"`:
fallthrough
case st.Tag(i) == `dns:"domain-name"`:
out(st.Field(i).Name())
}
}
}
fmt.Fprintln(b, "}\nreturn currentLen - initLen\n}\n\n")
// compressionLenSearchType - search cdomain-tags types for compressible names.
fmt.Fprint(b, "func compressionLenSearchType(c map[string]int, r RR) (int, bool, int) {\n")
fmt.Fprint(b, "switch x := r.(type) {\n")
for _, name := range cdomainTypes {
o := scope.Lookup(name)
st, _ := getTypeStruct(o.Type(), scope)
fmt.Fprintf(b, "case *%s:\n", name)
j := 1
for i := 1; i < st.NumFields(); i++ {
out := func(s string, j int) {
fmt.Fprintf(b, "k%d, ok%d, sz%d := compressionLenSearch(c, x.%s)\n", j, j, j, st.Field(i).Name())
}
// There are no slice types with names that can be compressed.
switch {
case st.Tag(i) == `dns:"cdomain-name"`:
out(st.Field(i).Name(), j)
j++
}
}
k := "k1"
ok := "ok1"
sz := "sz1"
for i := 2; i < j; i++ {
k += fmt.Sprintf(" + k%d", i)
ok += fmt.Sprintf(" && ok%d", i)
sz += fmt.Sprintf(" + sz%d", i)
}
fmt.Fprintf(b, "return %s, %s, %s\n", k, ok, sz)
}
fmt.Fprintln(b, "}\nreturn 0, false, 0\n}\n\n")
// gofmt
res, err := format.Source(b.Bytes())
if err != nil {
b.WriteTo(os.Stderr)
log.Fatal(err)
}
f, err := os.Create("zcompress.go")
fatalIfErr(err)
defer f.Close()
f.Write(res)
}
func fatalIfErr(err error) {
if err != nil {
log.Fatal(err)
}
}

158
vendor/github.com/miekg/dns/duplicate_generate.go generated vendored Normal file
View File

@ -0,0 +1,158 @@
//+build ignore
// types_generate.go is meant to run with go generate. It will use
// go/{importer,types} to track down all the RR struct types. Then for each type
// it will generate conversion tables (TypeToRR and TypeToString) and banal
// methods (len, Header, copy) based on the struct tags. The generated source is
// written to ztypes.go, and is meant to be checked into git.
package main
import (
"bytes"
"fmt"
"go/format"
"go/importer"
"go/types"
"log"
"os"
)
var packageHdr = `
// Code generated by "go run duplicate_generate.go"; DO NOT EDIT.
package dns
`
func getTypeStruct(t types.Type, scope *types.Scope) (*types.Struct, bool) {
st, ok := t.Underlying().(*types.Struct)
if !ok {
return nil, false
}
if st.Field(0).Type() == scope.Lookup("RR_Header").Type() {
return st, false
}
if st.Field(0).Anonymous() {
st, _ := getTypeStruct(st.Field(0).Type(), scope)
return st, true
}
return nil, false
}
func main() {
// Import and type-check the package
pkg, err := importer.Default().Import("github.com/miekg/dns")
fatalIfErr(err)
scope := pkg.Scope()
// Collect actual types (*X)
var namedTypes []string
for _, name := range scope.Names() {
o := scope.Lookup(name)
if o == nil || !o.Exported() {
continue
}
if st, _ := getTypeStruct(o.Type(), scope); st == nil {
continue
}
if name == "PrivateRR" || name == "RFC3597" {
continue
}
if name == "OPT" || name == "ANY" || name == "IXFR" || name == "AXFR" {
continue
}
namedTypes = append(namedTypes, o.Name())
}
b := &bytes.Buffer{}
b.WriteString(packageHdr)
// Generate the giant switch that calls the correct function for each type.
fmt.Fprint(b, "// isDuplicateRdata calls the rdata specific functions\n")
fmt.Fprint(b, "func isDuplicateRdata(r1, r2 RR) bool {\n")
fmt.Fprint(b, "switch r1.Header().Rrtype {\n")
for _, name := range namedTypes {
o := scope.Lookup(name)
_, isEmbedded := getTypeStruct(o.Type(), scope)
if isEmbedded {
continue
}
fmt.Fprintf(b, "case Type%s:\nreturn isDuplicate%s(r1.(*%s), r2.(*%s))\n", name, name, name, name)
}
fmt.Fprintf(b, "}\nreturn false\n}\n")
// Generate the duplicate check for each type.
fmt.Fprint(b, "// isDuplicate() functions\n\n")
for _, name := range namedTypes {
o := scope.Lookup(name)
st, isEmbedded := getTypeStruct(o.Type(), scope)
if isEmbedded {
continue
}
fmt.Fprintf(b, "func isDuplicate%s(r1, r2 *%s) bool {\n", name, name)
for i := 1; i < st.NumFields(); i++ {
field := st.Field(i).Name()
o2 := func(s string) { fmt.Fprintf(b, s+"\n", field, field) }
o3 := func(s string) { fmt.Fprintf(b, s+"\n", field, field, field) }
// For some reason, a and aaaa don't pop up as *types.Slice here (mostly like because the are
// *indirectly* defined as a slice in the net package).
if _, ok := st.Field(i).Type().(*types.Slice); ok || st.Tag(i) == `dns:"a"` || st.Tag(i) == `dns:"aaaa"` {
o2("if len(r1.%s) != len(r2.%s) {\nreturn false\n}")
if st.Tag(i) == `dns:"cdomain-name"` || st.Tag(i) == `dns:"domain-name"` {
o3(`for i := 0; i < len(r1.%s); i++ {
if !isDulicateName(r1.%s[i], r2.%s[i]) {
return false
}
}`)
continue
}
o3(`for i := 0; i < len(r1.%s); i++ {
if r1.%s[i] != r2.%s[i] {
return false
}
}`)
continue
}
switch st.Tag(i) {
case `dns:"-"`:
// ignored
case `dns:"cdomain-name"`, `dns:"domain-name"`:
o2("if !isDulicateName(r1.%s, r2.%s) {\nreturn false\n}")
default:
o2("if r1.%s != r2.%s {\nreturn false\n}")
}
}
fmt.Fprintf(b, "return true\n}\n\n")
}
// gofmt
res, err := format.Source(b.Bytes())
if err != nil {
b.WriteTo(os.Stderr)
log.Fatal(err)
}
// write result
f, err := os.Create("zduplicate.go")
fatalIfErr(err)
defer f.Close()
f.Write(res)
}
func fatalIfErr(err error) {
if err != nil {
log.Fatal(err)
}
}

348
vendor/github.com/miekg/dns/msg_generate.go generated vendored Normal file
View File

@ -0,0 +1,348 @@
//+build ignore
// msg_generate.go is meant to run with go generate. It will use
// go/{importer,types} to track down all the RR struct types. Then for each type
// it will generate pack/unpack methods based on the struct tags. The generated source is
// written to zmsg.go, and is meant to be checked into git.
package main
import (
"bytes"
"fmt"
"go/format"
"go/importer"
"go/types"
"log"
"os"
"strings"
)
var packageHdr = `
// Code generated by "go run msg_generate.go"; DO NOT EDIT.
package dns
`
// getTypeStruct will take a type and the package scope, and return the
// (innermost) struct if the type is considered a RR type (currently defined as
// those structs beginning with a RR_Header, could be redefined as implementing
// the RR interface). The bool return value indicates if embedded structs were
// resolved.
func getTypeStruct(t types.Type, scope *types.Scope) (*types.Struct, bool) {
st, ok := t.Underlying().(*types.Struct)
if !ok {
return nil, false
}
if st.Field(0).Type() == scope.Lookup("RR_Header").Type() {
return st, false
}
if st.Field(0).Anonymous() {
st, _ := getTypeStruct(st.Field(0).Type(), scope)
return st, true
}
return nil, false
}
func main() {
// Import and type-check the package
pkg, err := importer.Default().Import("github.com/miekg/dns")
fatalIfErr(err)
scope := pkg.Scope()
// Collect actual types (*X)
var namedTypes []string
for _, name := range scope.Names() {
o := scope.Lookup(name)
if o == nil || !o.Exported() {
continue
}
if st, _ := getTypeStruct(o.Type(), scope); st == nil {
continue
}
if name == "PrivateRR" {
continue
}
// Check if corresponding TypeX exists
if scope.Lookup("Type"+o.Name()) == nil && o.Name() != "RFC3597" {
log.Fatalf("Constant Type%s does not exist.", o.Name())
}
namedTypes = append(namedTypes, o.Name())
}
b := &bytes.Buffer{}
b.WriteString(packageHdr)
fmt.Fprint(b, "// pack*() functions\n\n")
for _, name := range namedTypes {
o := scope.Lookup(name)
st, _ := getTypeStruct(o.Type(), scope)
fmt.Fprintf(b, "func (rr *%s) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {\n", name)
fmt.Fprint(b, `off, err := rr.Hdr.pack(msg, off, compression, compress)
if err != nil {
return off, err
}
headerEnd := off
`)
for i := 1; i < st.NumFields(); i++ {
o := func(s string) {
fmt.Fprintf(b, s, st.Field(i).Name())
fmt.Fprint(b, `if err != nil {
return off, err
}
`)
}
if _, ok := st.Field(i).Type().(*types.Slice); ok {
switch st.Tag(i) {
case `dns:"-"`: // ignored
case `dns:"txt"`:
o("off, err = packStringTxt(rr.%s, msg, off)\n")
case `dns:"opt"`:
o("off, err = packDataOpt(rr.%s, msg, off)\n")
case `dns:"nsec"`:
o("off, err = packDataNsec(rr.%s, msg, off)\n")
case `dns:"domain-name"`:
o("off, err = packDataDomainNames(rr.%s, msg, off, compression, compress)\n")
default:
log.Fatalln(name, st.Field(i).Name(), st.Tag(i))
}
continue
}
switch {
case st.Tag(i) == `dns:"-"`: // ignored
case st.Tag(i) == `dns:"cdomain-name"`:
o("off, err = PackDomainName(rr.%s, msg, off, compression, compress)\n")
case st.Tag(i) == `dns:"domain-name"`:
o("off, err = PackDomainName(rr.%s, msg, off, compression, false)\n")
case st.Tag(i) == `dns:"a"`:
o("off, err = packDataA(rr.%s, msg, off)\n")
case st.Tag(i) == `dns:"aaaa"`:
o("off, err = packDataAAAA(rr.%s, msg, off)\n")
case st.Tag(i) == `dns:"uint48"`:
o("off, err = packUint48(rr.%s, msg, off)\n")
case st.Tag(i) == `dns:"txt"`:
o("off, err = packString(rr.%s, msg, off)\n")
case strings.HasPrefix(st.Tag(i), `dns:"size-base32`): // size-base32 can be packed just like base32
fallthrough
case st.Tag(i) == `dns:"base32"`:
o("off, err = packStringBase32(rr.%s, msg, off)\n")
case strings.HasPrefix(st.Tag(i), `dns:"size-base64`): // size-base64 can be packed just like base64
fallthrough
case st.Tag(i) == `dns:"base64"`:
o("off, err = packStringBase64(rr.%s, msg, off)\n")
case strings.HasPrefix(st.Tag(i), `dns:"size-hex:SaltLength`):
// directly write instead of using o() so we get the error check in the correct place
field := st.Field(i).Name()
fmt.Fprintf(b, `// Only pack salt if value is not "-", i.e. empty
if rr.%s != "-" {
off, err = packStringHex(rr.%s, msg, off)
if err != nil {
return off, err
}
}
`, field, field)
continue
case strings.HasPrefix(st.Tag(i), `dns:"size-hex`): // size-hex can be packed just like hex
fallthrough
case st.Tag(i) == `dns:"hex"`:
o("off, err = packStringHex(rr.%s, msg, off)\n")
case st.Tag(i) == `dns:"octet"`:
o("off, err = packStringOctet(rr.%s, msg, off)\n")
case st.Tag(i) == "":
switch st.Field(i).Type().(*types.Basic).Kind() {
case types.Uint8:
o("off, err = packUint8(rr.%s, msg, off)\n")
case types.Uint16:
o("off, err = packUint16(rr.%s, msg, off)\n")
case types.Uint32:
o("off, err = packUint32(rr.%s, msg, off)\n")
case types.Uint64:
o("off, err = packUint64(rr.%s, msg, off)\n")
case types.String:
o("off, err = packString(rr.%s, msg, off)\n")
default:
log.Fatalln(name, st.Field(i).Name())
}
default:
log.Fatalln(name, st.Field(i).Name(), st.Tag(i))
}
}
// We have packed everything, only now we know the rdlength of this RR
fmt.Fprintln(b, "rr.Header().Rdlength = uint16(off-headerEnd)")
fmt.Fprintln(b, "return off, nil }\n")
}
fmt.Fprint(b, "// unpack*() functions\n\n")
for _, name := range namedTypes {
o := scope.Lookup(name)
st, _ := getTypeStruct(o.Type(), scope)
fmt.Fprintf(b, "func unpack%s(h RR_Header, msg []byte, off int) (RR, int, error) {\n", name)
fmt.Fprintf(b, "rr := new(%s)\n", name)
fmt.Fprint(b, "rr.Hdr = h\n")
fmt.Fprint(b, `if noRdata(h) {
return rr, off, nil
}
var err error
rdStart := off
_ = rdStart
`)
for i := 1; i < st.NumFields(); i++ {
o := func(s string) {
fmt.Fprintf(b, s, st.Field(i).Name())
fmt.Fprint(b, `if err != nil {
return rr, off, err
}
`)
}
// size-* are special, because they reference a struct member we should use for the length.
if strings.HasPrefix(st.Tag(i), `dns:"size-`) {
structMember := structMember(st.Tag(i))
structTag := structTag(st.Tag(i))
switch structTag {
case "hex":
fmt.Fprintf(b, "rr.%s, off, err = unpackStringHex(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember)
case "base32":
fmt.Fprintf(b, "rr.%s, off, err = unpackStringBase32(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember)
case "base64":
fmt.Fprintf(b, "rr.%s, off, err = unpackStringBase64(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember)
default:
log.Fatalln(name, st.Field(i).Name(), st.Tag(i))
}
fmt.Fprint(b, `if err != nil {
return rr, off, err
}
`)
continue
}
if _, ok := st.Field(i).Type().(*types.Slice); ok {
switch st.Tag(i) {
case `dns:"-"`: // ignored
case `dns:"txt"`:
o("rr.%s, off, err = unpackStringTxt(msg, off)\n")
case `dns:"opt"`:
o("rr.%s, off, err = unpackDataOpt(msg, off)\n")
case `dns:"nsec"`:
o("rr.%s, off, err = unpackDataNsec(msg, off)\n")
case `dns:"domain-name"`:
o("rr.%s, off, err = unpackDataDomainNames(msg, off, rdStart + int(rr.Hdr.Rdlength))\n")
default:
log.Fatalln(name, st.Field(i).Name(), st.Tag(i))
}
continue
}
switch st.Tag(i) {
case `dns:"-"`: // ignored
case `dns:"cdomain-name"`:
fallthrough
case `dns:"domain-name"`:
o("rr.%s, off, err = UnpackDomainName(msg, off)\n")
case `dns:"a"`:
o("rr.%s, off, err = unpackDataA(msg, off)\n")
case `dns:"aaaa"`:
o("rr.%s, off, err = unpackDataAAAA(msg, off)\n")
case `dns:"uint48"`:
o("rr.%s, off, err = unpackUint48(msg, off)\n")
case `dns:"txt"`:
o("rr.%s, off, err = unpackString(msg, off)\n")
case `dns:"base32"`:
o("rr.%s, off, err = unpackStringBase32(msg, off, rdStart + int(rr.Hdr.Rdlength))\n")
case `dns:"base64"`:
o("rr.%s, off, err = unpackStringBase64(msg, off, rdStart + int(rr.Hdr.Rdlength))\n")
case `dns:"hex"`:
o("rr.%s, off, err = unpackStringHex(msg, off, rdStart + int(rr.Hdr.Rdlength))\n")
case `dns:"octet"`:
o("rr.%s, off, err = unpackStringOctet(msg, off)\n")
case "":
switch st.Field(i).Type().(*types.Basic).Kind() {
case types.Uint8:
o("rr.%s, off, err = unpackUint8(msg, off)\n")
case types.Uint16:
o("rr.%s, off, err = unpackUint16(msg, off)\n")
case types.Uint32:
o("rr.%s, off, err = unpackUint32(msg, off)\n")
case types.Uint64:
o("rr.%s, off, err = unpackUint64(msg, off)\n")
case types.String:
o("rr.%s, off, err = unpackString(msg, off)\n")
default:
log.Fatalln(name, st.Field(i).Name())
}
default:
log.Fatalln(name, st.Field(i).Name(), st.Tag(i))
}
// If we've hit len(msg) we return without error.
if i < st.NumFields()-1 {
fmt.Fprintf(b, `if off == len(msg) {
return rr, off, nil
}
`)
}
}
fmt.Fprintf(b, "return rr, off, err }\n\n")
}
// Generate typeToUnpack map
fmt.Fprintln(b, "var typeToUnpack = map[uint16]func(RR_Header, []byte, int) (RR, int, error){")
for _, name := range namedTypes {
if name == "RFC3597" {
continue
}
fmt.Fprintf(b, "Type%s: unpack%s,\n", name, name)
}
fmt.Fprintln(b, "}\n")
// gofmt
res, err := format.Source(b.Bytes())
if err != nil {
b.WriteTo(os.Stderr)
log.Fatal(err)
}
// write result
f, err := os.Create("zmsg.go")
fatalIfErr(err)
defer f.Close()
f.Write(res)
}
// structMember will take a tag like dns:"size-base32:SaltLength" and return the last part of this string.
func structMember(s string) string {
fields := strings.Split(s, ":")
if len(fields) == 0 {
return ""
}
f := fields[len(fields)-1]
// f should have a closing "
if len(f) > 1 {
return f[:len(f)-1]
}
return f
}
// structTag will take a tag like dns:"size-base32:SaltLength" and return base32.
func structTag(s string) string {
fields := strings.Split(s, ":")
if len(fields) < 2 {
return ""
}
return fields[1][len("\"size-"):]
}
func fatalIfErr(err error) {
if err != nil {
log.Fatal(err)
}
}

272
vendor/github.com/miekg/dns/types_generate.go generated vendored Normal file
View File

@ -0,0 +1,272 @@
//+build ignore
// types_generate.go is meant to run with go generate. It will use
// go/{importer,types} to track down all the RR struct types. Then for each type
// it will generate conversion tables (TypeToRR and TypeToString) and banal
// methods (len, Header, copy) based on the struct tags. The generated source is
// written to ztypes.go, and is meant to be checked into git.
package main
import (
"bytes"
"fmt"
"go/format"
"go/importer"
"go/types"
"log"
"os"
"strings"
"text/template"
)
var skipLen = map[string]struct{}{
"NSEC": {},
"NSEC3": {},
"OPT": {},
"CSYNC": {},
}
var packageHdr = `
// Code generated by "go run types_generate.go"; DO NOT EDIT.
package dns
import (
"encoding/base64"
"net"
)
`
var TypeToRR = template.Must(template.New("TypeToRR").Parse(`
// TypeToRR is a map of constructors for each RR type.
var TypeToRR = map[uint16]func() RR{
{{range .}}{{if ne . "RFC3597"}} Type{{.}}: func() RR { return new({{.}}) },
{{end}}{{end}} }
`))
var typeToString = template.Must(template.New("typeToString").Parse(`
// TypeToString is a map of strings for each RR type.
var TypeToString = map[uint16]string{
{{range .}}{{if ne . "NSAPPTR"}} Type{{.}}: "{{.}}",
{{end}}{{end}} TypeNSAPPTR: "NSAP-PTR",
}
`))
var headerFunc = template.Must(template.New("headerFunc").Parse(`
{{range .}} func (rr *{{.}}) Header() *RR_Header { return &rr.Hdr }
{{end}}
`))
// getTypeStruct will take a type and the package scope, and return the
// (innermost) struct if the type is considered a RR type (currently defined as
// those structs beginning with a RR_Header, could be redefined as implementing
// the RR interface). The bool return value indicates if embedded structs were
// resolved.
func getTypeStruct(t types.Type, scope *types.Scope) (*types.Struct, bool) {
st, ok := t.Underlying().(*types.Struct)
if !ok {
return nil, false
}
if st.Field(0).Type() == scope.Lookup("RR_Header").Type() {
return st, false
}
if st.Field(0).Anonymous() {
st, _ := getTypeStruct(st.Field(0).Type(), scope)
return st, true
}
return nil, false
}
func main() {
// Import and type-check the package
pkg, err := importer.Default().Import("github.com/miekg/dns")
fatalIfErr(err)
scope := pkg.Scope()
// Collect constants like TypeX
var numberedTypes []string
for _, name := range scope.Names() {
o := scope.Lookup(name)
if o == nil || !o.Exported() {
continue
}
b, ok := o.Type().(*types.Basic)
if !ok || b.Kind() != types.Uint16 {
continue
}
if !strings.HasPrefix(o.Name(), "Type") {
continue
}
name := strings.TrimPrefix(o.Name(), "Type")
if name == "PrivateRR" {
continue
}
numberedTypes = append(numberedTypes, name)
}
// Collect actual types (*X)
var namedTypes []string
for _, name := range scope.Names() {
o := scope.Lookup(name)
if o == nil || !o.Exported() {
continue
}
if st, _ := getTypeStruct(o.Type(), scope); st == nil {
continue
}
if name == "PrivateRR" {
continue
}
// Check if corresponding TypeX exists
if scope.Lookup("Type"+o.Name()) == nil && o.Name() != "RFC3597" {
log.Fatalf("Constant Type%s does not exist.", o.Name())
}
namedTypes = append(namedTypes, o.Name())
}
b := &bytes.Buffer{}
b.WriteString(packageHdr)
// Generate TypeToRR
fatalIfErr(TypeToRR.Execute(b, namedTypes))
// Generate typeToString
fatalIfErr(typeToString.Execute(b, numberedTypes))
// Generate headerFunc
fatalIfErr(headerFunc.Execute(b, namedTypes))
// Generate len()
fmt.Fprint(b, "// len() functions\n")
for _, name := range namedTypes {
if _, ok := skipLen[name]; ok {
continue
}
o := scope.Lookup(name)
st, isEmbedded := getTypeStruct(o.Type(), scope)
if isEmbedded {
continue
}
fmt.Fprintf(b, "func (rr *%s) len() int {\n", name)
fmt.Fprintf(b, "l := rr.Hdr.len()\n")
for i := 1; i < st.NumFields(); i++ {
o := func(s string) { fmt.Fprintf(b, s, st.Field(i).Name()) }
if _, ok := st.Field(i).Type().(*types.Slice); ok {
switch st.Tag(i) {
case `dns:"-"`:
// ignored
case `dns:"cdomain-name"`, `dns:"domain-name"`, `dns:"txt"`:
o("for _, x := range rr.%s { l += len(x) + 1 }\n")
default:
log.Fatalln(name, st.Field(i).Name(), st.Tag(i))
}
continue
}
switch {
case st.Tag(i) == `dns:"-"`:
// ignored
case st.Tag(i) == `dns:"cdomain-name"`, st.Tag(i) == `dns:"domain-name"`:
o("l += len(rr.%s) + 1\n")
case st.Tag(i) == `dns:"octet"`:
o("l += len(rr.%s)\n")
case strings.HasPrefix(st.Tag(i), `dns:"size-base64`):
fallthrough
case st.Tag(i) == `dns:"base64"`:
o("l += base64.StdEncoding.DecodedLen(len(rr.%s))\n")
case strings.HasPrefix(st.Tag(i), `dns:"size-hex:`): // this has an extra field where the length is stored
o("l += len(rr.%s)/2\n")
case strings.HasPrefix(st.Tag(i), `dns:"size-hex`):
fallthrough
case st.Tag(i) == `dns:"hex"`:
o("l += len(rr.%s)/2 + 1\n")
case st.Tag(i) == `dns:"a"`:
o("l += net.IPv4len // %s\n")
case st.Tag(i) == `dns:"aaaa"`:
o("l += net.IPv6len // %s\n")
case st.Tag(i) == `dns:"txt"`:
o("for _, t := range rr.%s { l += len(t) + 1 }\n")
case st.Tag(i) == `dns:"uint48"`:
o("l += 6 // %s\n")
case st.Tag(i) == "":
switch st.Field(i).Type().(*types.Basic).Kind() {
case types.Uint8:
o("l++ // %s\n")
case types.Uint16:
o("l += 2 // %s\n")
case types.Uint32:
o("l += 4 // %s\n")
case types.Uint64:
o("l += 8 // %s\n")
case types.String:
o("l += len(rr.%s) + 1\n")
default:
log.Fatalln(name, st.Field(i).Name())
}
default:
log.Fatalln(name, st.Field(i).Name(), st.Tag(i))
}
}
fmt.Fprintf(b, "return l }\n")
}
// Generate copy()
fmt.Fprint(b, "// copy() functions\n")
for _, name := range namedTypes {
o := scope.Lookup(name)
st, isEmbedded := getTypeStruct(o.Type(), scope)
if isEmbedded {
continue
}
fmt.Fprintf(b, "func (rr *%s) copy() RR {\n", name)
fields := []string{"rr.Hdr"}
for i := 1; i < st.NumFields(); i++ {
f := st.Field(i).Name()
if sl, ok := st.Field(i).Type().(*types.Slice); ok {
t := sl.Underlying().String()
t = strings.TrimPrefix(t, "[]")
if strings.Contains(t, ".") {
splits := strings.Split(t, ".")
t = splits[len(splits)-1]
}
fmt.Fprintf(b, "%s := make([]%s, len(rr.%s)); copy(%s, rr.%s)\n",
f, t, f, f, f)
fields = append(fields, f)
continue
}
if st.Field(i).Type().String() == "net.IP" {
fields = append(fields, "copyIP(rr."+f+")")
continue
}
fields = append(fields, "rr."+f)
}
fmt.Fprintf(b, "return &%s{%s}\n", name, strings.Join(fields, ","))
fmt.Fprintf(b, "}\n")
}
// gofmt
res, err := format.Source(b.Bytes())
if err != nil {
b.WriteTo(os.Stderr)
log.Fatal(err)
}
// write result
f, err := os.Create("ztypes.go")
fatalIfErr(err)
defer f.Close()
f.Write(res)
}
func fatalIfErr(err error) {
if err != nil {
log.Fatal(err)
}
}

View File

@ -0,0 +1 @@
coverage.txt

View File

@ -0,0 +1,20 @@
language: go
matrix:
include:
- go: "1.11.x"
- go: "1.12.x"
- go: "tip"
env:
- LINT=true
- COVERAGE=true
install:
- if [ "$LINT" == true ]; then go get -u golang.org/x/lint/golint/... ; else echo 'skipping lint'; fi
- go get -u github.com/stretchr/testify/...
script:
- make test
- go build ./...
- if [ "$LINT" == true ]; then make lint ; else echo 'skipping lint'; fi
- if [ "$COVERAGE" == true ]; then make cover && bash <(curl -s https://codecov.io/bash) ; else echo 'skipping coverage'; fi

View File

@ -0,0 +1,46 @@
Changes by Version
==================
1.1.0 (2019-03-23)
-------------------
Notable changes:
- The library is now released under Apache 2.0 license
- Use Set() instead of Add() in HTTPHeadersCarrier is functionally a breaking change (fixes issue [#159](https://github.com/opentracing/opentracing-go/issues/159))
- 'golang.org/x/net/context' is replaced with 'context' from the standard library
List of all changes:
- Export StartSpanFromContextWithTracer (#214) <Aaron Delaney>
- Add IsGlobalTracerRegistered() to indicate if a tracer has been registered (#201) <Mike Goldsmith>
- Use Set() instead of Add() in HTTPHeadersCarrier (#191) <jeremyxu2010>
- Update license to Apache 2.0 (#181) <Andrea Kao>
- Replace 'golang.org/x/net/context' with 'context' (#176) <Tony Ghita>
- Port of Python opentracing/harness/api_check.py to Go (#146) <chris erway>
- Fix race condition in MockSpan.Context() (#170) <Brad>
- Add PeerHostIPv4.SetString() (#155) <NeoCN>
- Add a Noop log field type to log to allow for optional fields (#150) <Matt Ho>
1.0.2 (2017-04-26)
-------------------
- Add more semantic tags (#139) <Rustam Zagirov>
1.0.1 (2017-02-06)
-------------------
- Correct spelling in comments <Ben Sigelman>
- Address race in nextMockID() (#123) <bill fumerola>
- log: avoid panic marshaling nil error (#131) <Anthony Voutas>
- Deprecate InitGlobalTracer in favor of SetGlobalTracer (#128) <Yuri Shkuro>
- Drop Go 1.5 that fails in Travis (#129) <Yuri Shkuro>
- Add convenience methods Key() and Value() to log.Field <Ben Sigelman>
- Add convenience methods to log.Field (2 years, 6 months ago) <Radu Berinde>
1.0.0 (2016-09-26)
-------------------
- This release implements OpenTracing Specification 1.0 (https://opentracing.io/spec)

201
vendor/github.com/opentracing/opentracing-go/LICENSE generated vendored Normal file
View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2016 The OpenTracing Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

20
vendor/github.com/opentracing/opentracing-go/Makefile generated vendored Normal file
View File

@ -0,0 +1,20 @@
.DEFAULT_GOAL := test-and-lint
.PHONY: test-and-lint
test-and-lint: test lint
.PHONY: test
test:
go test -v -cover -race ./...
.PHONY: cover
cover:
go test -v -coverprofile=coverage.txt -covermode=atomic -race ./...
.PHONY: lint
lint:
go fmt ./...
golint ./...
@# Run again with magic to exit non-zero if golint outputs anything.
@! (golint ./... | read dummy)
go vet ./...

171
vendor/github.com/opentracing/opentracing-go/README.md generated vendored Normal file
View File

@ -0,0 +1,171 @@
[![Gitter chat](http://img.shields.io/badge/gitter-join%20chat%20%E2%86%92-brightgreen.svg)](https://gitter.im/opentracing/public) [![Build Status](https://travis-ci.org/opentracing/opentracing-go.svg?branch=master)](https://travis-ci.org/opentracing/opentracing-go) [![GoDoc](https://godoc.org/github.com/opentracing/opentracing-go?status.svg)](http://godoc.org/github.com/opentracing/opentracing-go)
[![Sourcegraph Badge](https://sourcegraph.com/github.com/opentracing/opentracing-go/-/badge.svg)](https://sourcegraph.com/github.com/opentracing/opentracing-go?badge)
# OpenTracing API for Go
This package is a Go platform API for OpenTracing.
## Required Reading
In order to understand the Go platform API, one must first be familiar with the
[OpenTracing project](https://opentracing.io) and
[terminology](https://opentracing.io/specification/) more specifically.
## API overview for those adding instrumentation
Everyday consumers of this `opentracing` package really only need to worry
about a couple of key abstractions: the `StartSpan` function, the `Span`
interface, and binding a `Tracer` at `main()`-time. Here are code snippets
demonstrating some important use cases.
#### Singleton initialization
The simplest starting point is `./default_tracer.go`. As early as possible, call
```go
import "github.com/opentracing/opentracing-go"
import ".../some_tracing_impl"
func main() {
opentracing.SetGlobalTracer(
// tracing impl specific:
some_tracing_impl.New(...),
)
...
}
```
#### Non-Singleton initialization
If you prefer direct control to singletons, manage ownership of the
`opentracing.Tracer` implementation explicitly.
#### Creating a Span given an existing Go `context.Context`
If you use `context.Context` in your application, OpenTracing's Go library will
happily rely on it for `Span` propagation. To start a new (blocking child)
`Span`, you can use `StartSpanFromContext`.
```go
func xyz(ctx context.Context, ...) {
...
span, ctx := opentracing.StartSpanFromContext(ctx, "operation_name")
defer span.Finish()
span.LogFields(
log.String("event", "soft error"),
log.String("type", "cache timeout"),
log.Int("waited.millis", 1500))
...
}
```
#### Starting an empty trace by creating a "root span"
It's always possible to create a "root" `Span` with no parent or other causal
reference.
```go
func xyz() {
...
sp := opentracing.StartSpan("operation_name")
defer sp.Finish()
...
}
```
#### Creating a (child) Span given an existing (parent) Span
```go
func xyz(parentSpan opentracing.Span, ...) {
...
sp := opentracing.StartSpan(
"operation_name",
opentracing.ChildOf(parentSpan.Context()))
defer sp.Finish()
...
}
```
#### Serializing to the wire
```go
func makeSomeRequest(ctx context.Context) ... {
if span := opentracing.SpanFromContext(ctx); span != nil {
httpClient := &http.Client{}
httpReq, _ := http.NewRequest("GET", "http://myservice/", nil)
// Transmit the span's TraceContext as HTTP headers on our
// outbound request.
opentracing.GlobalTracer().Inject(
span.Context(),
opentracing.HTTPHeaders,
opentracing.HTTPHeadersCarrier(httpReq.Header))
resp, err := httpClient.Do(httpReq)
...
}
...
}
```
#### Deserializing from the wire
```go
http.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) {
var serverSpan opentracing.Span
appSpecificOperationName := ...
wireContext, err := opentracing.GlobalTracer().Extract(
opentracing.HTTPHeaders,
opentracing.HTTPHeadersCarrier(req.Header))
if err != nil {
// Optionally record something about err here
}
// Create the span referring to the RPC client if available.
// If wireContext == nil, a root span will be created.
serverSpan = opentracing.StartSpan(
appSpecificOperationName,
ext.RPCServerOption(wireContext))
defer serverSpan.Finish()
ctx := opentracing.ContextWithSpan(context.Background(), serverSpan)
...
}
```
#### Conditionally capture a field using `log.Noop`
In some situations, you may want to dynamically decide whether or not
to log a field. For example, you may want to capture additional data,
such as a customer ID, in non-production environments:
```go
func Customer(order *Order) log.Field {
if os.Getenv("ENVIRONMENT") == "dev" {
return log.String("customer", order.Customer.ID)
}
return log.Noop()
}
```
#### Goroutine-safety
The entire public API is goroutine-safe and does not require external
synchronization.
## API pointers for those implementing a tracing system
Tracing system implementors may be able to reuse or copy-paste-modify the `basictracer` package, found [here](https://github.com/opentracing/basictracer-go). In particular, see `basictracer.New(...)`.
## API compatibility
For the time being, "mild" backwards-incompatible changes may be made without changing the major version number. As OpenTracing and `opentracing-go` mature, backwards compatibility will become more of a priority.
## Tracer test suite
A test suite is available in the [harness](https://godoc.org/github.com/opentracing/opentracing-go/harness) package that can assist Tracer implementors to assert that their Tracer is working correctly.
## Licensing
[Apache 2.0 License](./LICENSE).

View File

@ -0,0 +1,42 @@
package opentracing
type registeredTracer struct {
tracer Tracer
isRegistered bool
}
var (
globalTracer = registeredTracer{NoopTracer{}, false}
)
// SetGlobalTracer sets the [singleton] opentracing.Tracer returned by
// GlobalTracer(). Those who use GlobalTracer (rather than directly manage an
// opentracing.Tracer instance) should call SetGlobalTracer as early as
// possible in main(), prior to calling the `StartSpan` global func below.
// Prior to calling `SetGlobalTracer`, any Spans started via the `StartSpan`
// (etc) globals are noops.
func SetGlobalTracer(tracer Tracer) {
globalTracer = registeredTracer{tracer, true}
}
// GlobalTracer returns the global singleton `Tracer` implementation.
// Before `SetGlobalTracer()` is called, the `GlobalTracer()` is a noop
// implementation that drops all data handed to it.
func GlobalTracer() Tracer {
return globalTracer.tracer
}
// StartSpan defers to `Tracer.StartSpan`. See `GlobalTracer()`.
func StartSpan(operationName string, opts ...StartSpanOption) Span {
return globalTracer.tracer.StartSpan(operationName, opts...)
}
// InitGlobalTracer is deprecated. Please use SetGlobalTracer.
func InitGlobalTracer(tracer Tracer) {
SetGlobalTracer(tracer)
}
// IsGlobalTracerRegistered returns a `bool` to indicate if a tracer has been globally registered
func IsGlobalTracerRegistered() bool {
return globalTracer.isRegistered
}

View File

@ -0,0 +1,60 @@
package opentracing
import "context"
type contextKey struct{}
var activeSpanKey = contextKey{}
// ContextWithSpan returns a new `context.Context` that holds a reference to
// `span`'s SpanContext.
func ContextWithSpan(ctx context.Context, span Span) context.Context {
return context.WithValue(ctx, activeSpanKey, span)
}
// SpanFromContext returns the `Span` previously associated with `ctx`, or
// `nil` if no such `Span` could be found.
//
// NOTE: context.Context != SpanContext: the former is Go's intra-process
// context propagation mechanism, and the latter houses OpenTracing's per-Span
// identity and baggage information.
func SpanFromContext(ctx context.Context) Span {
val := ctx.Value(activeSpanKey)
if sp, ok := val.(Span); ok {
return sp
}
return nil
}
// StartSpanFromContext starts and returns a Span with `operationName`, using
// any Span found within `ctx` as a ChildOfRef. If no such parent could be
// found, StartSpanFromContext creates a root (parentless) Span.
//
// The second return value is a context.Context object built around the
// returned Span.
//
// Example usage:
//
// SomeFunction(ctx context.Context, ...) {
// sp, ctx := opentracing.StartSpanFromContext(ctx, "SomeFunction")
// defer sp.Finish()
// ...
// }
func StartSpanFromContext(ctx context.Context, operationName string, opts ...StartSpanOption) (Span, context.Context) {
return StartSpanFromContextWithTracer(ctx, GlobalTracer(), operationName, opts...)
}
// StartSpanFromContextWithTracer starts and returns a span with `operationName`
// using a span found within the context as a ChildOfRef. If that doesn't exist
// it creates a root span. It also returns a context.Context object built
// around the returned span.
//
// It's behavior is identical to StartSpanFromContext except that it takes an explicit
// tracer as opposed to using the global tracer.
func StartSpanFromContextWithTracer(ctx context.Context, tracer Tracer, operationName string, opts ...StartSpanOption) (Span, context.Context) {
if parentSpan := SpanFromContext(ctx); parentSpan != nil {
opts = append(opts, ChildOf(parentSpan.Context()))
}
span := tracer.StartSpan(operationName, opts...)
return span, ContextWithSpan(ctx, span)
}

View File

@ -0,0 +1,269 @@
package log
import (
"fmt"
"math"
)
type fieldType int
const (
stringType fieldType = iota
boolType
intType
int32Type
uint32Type
int64Type
uint64Type
float32Type
float64Type
errorType
objectType
lazyLoggerType
noopType
)
// Field instances are constructed via LogBool, LogString, and so on.
// Tracing implementations may then handle them via the Field.Marshal
// method.
//
// "heavily influenced by" (i.e., partially stolen from)
// https://github.com/uber-go/zap
type Field struct {
key string
fieldType fieldType
numericVal int64
stringVal string
interfaceVal interface{}
}
// String adds a string-valued key:value pair to a Span.LogFields() record
func String(key, val string) Field {
return Field{
key: key,
fieldType: stringType,
stringVal: val,
}
}
// Bool adds a bool-valued key:value pair to a Span.LogFields() record
func Bool(key string, val bool) Field {
var numericVal int64
if val {
numericVal = 1
}
return Field{
key: key,
fieldType: boolType,
numericVal: numericVal,
}
}
// Int adds an int-valued key:value pair to a Span.LogFields() record
func Int(key string, val int) Field {
return Field{
key: key,
fieldType: intType,
numericVal: int64(val),
}
}
// Int32 adds an int32-valued key:value pair to a Span.LogFields() record
func Int32(key string, val int32) Field {
return Field{
key: key,
fieldType: int32Type,
numericVal: int64(val),
}
}
// Int64 adds an int64-valued key:value pair to a Span.LogFields() record
func Int64(key string, val int64) Field {
return Field{
key: key,
fieldType: int64Type,
numericVal: val,
}
}
// Uint32 adds a uint32-valued key:value pair to a Span.LogFields() record
func Uint32(key string, val uint32) Field {
return Field{
key: key,
fieldType: uint32Type,
numericVal: int64(val),
}
}
// Uint64 adds a uint64-valued key:value pair to a Span.LogFields() record
func Uint64(key string, val uint64) Field {
return Field{
key: key,
fieldType: uint64Type,
numericVal: int64(val),
}
}
// Float32 adds a float32-valued key:value pair to a Span.LogFields() record
func Float32(key string, val float32) Field {
return Field{
key: key,
fieldType: float32Type,
numericVal: int64(math.Float32bits(val)),
}
}
// Float64 adds a float64-valued key:value pair to a Span.LogFields() record
func Float64(key string, val float64) Field {
return Field{
key: key,
fieldType: float64Type,
numericVal: int64(math.Float64bits(val)),
}
}
// Error adds an error with the key "error" to a Span.LogFields() record
func Error(err error) Field {
return Field{
key: "error",
fieldType: errorType,
interfaceVal: err,
}
}
// Object adds an object-valued key:value pair to a Span.LogFields() record
func Object(key string, obj interface{}) Field {
return Field{
key: key,
fieldType: objectType,
interfaceVal: obj,
}
}
// LazyLogger allows for user-defined, late-bound logging of arbitrary data
type LazyLogger func(fv Encoder)
// Lazy adds a LazyLogger to a Span.LogFields() record; the tracing
// implementation will call the LazyLogger function at an indefinite time in
// the future (after Lazy() returns).
func Lazy(ll LazyLogger) Field {
return Field{
fieldType: lazyLoggerType,
interfaceVal: ll,
}
}
// Noop creates a no-op log field that should be ignored by the tracer.
// It can be used to capture optional fields, for example those that should
// only be logged in non-production environment:
//
// func customerField(order *Order) log.Field {
// if os.Getenv("ENVIRONMENT") == "dev" {
// return log.String("customer", order.Customer.ID)
// }
// return log.Noop()
// }
//
// span.LogFields(log.String("event", "purchase"), customerField(order))
//
func Noop() Field {
return Field{
fieldType: noopType,
}
}
// Encoder allows access to the contents of a Field (via a call to
// Field.Marshal).
//
// Tracer implementations typically provide an implementation of Encoder;
// OpenTracing callers typically do not need to concern themselves with it.
type Encoder interface {
EmitString(key, value string)
EmitBool(key string, value bool)
EmitInt(key string, value int)
EmitInt32(key string, value int32)
EmitInt64(key string, value int64)
EmitUint32(key string, value uint32)
EmitUint64(key string, value uint64)
EmitFloat32(key string, value float32)
EmitFloat64(key string, value float64)
EmitObject(key string, value interface{})
EmitLazyLogger(value LazyLogger)
}
// Marshal passes a Field instance through to the appropriate
// field-type-specific method of an Encoder.
func (lf Field) Marshal(visitor Encoder) {
switch lf.fieldType {
case stringType:
visitor.EmitString(lf.key, lf.stringVal)
case boolType:
visitor.EmitBool(lf.key, lf.numericVal != 0)
case intType:
visitor.EmitInt(lf.key, int(lf.numericVal))
case int32Type:
visitor.EmitInt32(lf.key, int32(lf.numericVal))
case int64Type:
visitor.EmitInt64(lf.key, int64(lf.numericVal))
case uint32Type:
visitor.EmitUint32(lf.key, uint32(lf.numericVal))
case uint64Type:
visitor.EmitUint64(lf.key, uint64(lf.numericVal))
case float32Type:
visitor.EmitFloat32(lf.key, math.Float32frombits(uint32(lf.numericVal)))
case float64Type:
visitor.EmitFloat64(lf.key, math.Float64frombits(uint64(lf.numericVal)))
case errorType:
if err, ok := lf.interfaceVal.(error); ok {
visitor.EmitString(lf.key, err.Error())
} else {
visitor.EmitString(lf.key, "<nil>")
}
case objectType:
visitor.EmitObject(lf.key, lf.interfaceVal)
case lazyLoggerType:
visitor.EmitLazyLogger(lf.interfaceVal.(LazyLogger))
case noopType:
// intentionally left blank
}
}
// Key returns the field's key.
func (lf Field) Key() string {
return lf.key
}
// Value returns the field's value as interface{}.
func (lf Field) Value() interface{} {
switch lf.fieldType {
case stringType:
return lf.stringVal
case boolType:
return lf.numericVal != 0
case intType:
return int(lf.numericVal)
case int32Type:
return int32(lf.numericVal)
case int64Type:
return int64(lf.numericVal)
case uint32Type:
return uint32(lf.numericVal)
case uint64Type:
return uint64(lf.numericVal)
case float32Type:
return math.Float32frombits(uint32(lf.numericVal))
case float64Type:
return math.Float64frombits(uint64(lf.numericVal))
case errorType, objectType, lazyLoggerType:
return lf.interfaceVal
case noopType:
return nil
default:
return nil
}
}
// String returns a string representation of the key and value.
func (lf Field) String() string {
return fmt.Sprint(lf.key, ":", lf.Value())
}

View File

@ -0,0 +1,54 @@
package log
import "fmt"
// InterleavedKVToFields converts keyValues a la Span.LogKV() to a Field slice
// a la Span.LogFields().
func InterleavedKVToFields(keyValues ...interface{}) ([]Field, error) {
if len(keyValues)%2 != 0 {
return nil, fmt.Errorf("non-even keyValues len: %d", len(keyValues))
}
fields := make([]Field, len(keyValues)/2)
for i := 0; i*2 < len(keyValues); i++ {
key, ok := keyValues[i*2].(string)
if !ok {
return nil, fmt.Errorf(
"non-string key (pair #%d): %T",
i, keyValues[i*2])
}
switch typedVal := keyValues[i*2+1].(type) {
case bool:
fields[i] = Bool(key, typedVal)
case string:
fields[i] = String(key, typedVal)
case int:
fields[i] = Int(key, typedVal)
case int8:
fields[i] = Int32(key, int32(typedVal))
case int16:
fields[i] = Int32(key, int32(typedVal))
case int32:
fields[i] = Int32(key, typedVal)
case int64:
fields[i] = Int64(key, typedVal)
case uint:
fields[i] = Uint64(key, uint64(typedVal))
case uint64:
fields[i] = Uint64(key, typedVal)
case uint8:
fields[i] = Uint32(key, uint32(typedVal))
case uint16:
fields[i] = Uint32(key, uint32(typedVal))
case uint32:
fields[i] = Uint32(key, typedVal)
case float32:
fields[i] = Float32(key, typedVal)
case float64:
fields[i] = Float64(key, typedVal)
default:
// When in doubt, coerce to a string
fields[i] = String(key, fmt.Sprint(typedVal))
}
}
return fields, nil
}

64
vendor/github.com/opentracing/opentracing-go/noop.go generated vendored Normal file
View File

@ -0,0 +1,64 @@
package opentracing
import "github.com/opentracing/opentracing-go/log"
// A NoopTracer is a trivial, minimum overhead implementation of Tracer
// for which all operations are no-ops.
//
// The primary use of this implementation is in libraries, such as RPC
// frameworks, that make tracing an optional feature controlled by the
// end user. A no-op implementation allows said libraries to use it
// as the default Tracer and to write instrumentation that does
// not need to keep checking if the tracer instance is nil.
//
// For the same reason, the NoopTracer is the default "global" tracer
// (see GlobalTracer and SetGlobalTracer functions).
//
// WARNING: NoopTracer does not support baggage propagation.
type NoopTracer struct{}
type noopSpan struct{}
type noopSpanContext struct{}
var (
defaultNoopSpanContext = noopSpanContext{}
defaultNoopSpan = noopSpan{}
defaultNoopTracer = NoopTracer{}
)
const (
emptyString = ""
)
// noopSpanContext:
func (n noopSpanContext) ForeachBaggageItem(handler func(k, v string) bool) {}
// noopSpan:
func (n noopSpan) Context() SpanContext { return defaultNoopSpanContext }
func (n noopSpan) SetBaggageItem(key, val string) Span { return defaultNoopSpan }
func (n noopSpan) BaggageItem(key string) string { return emptyString }
func (n noopSpan) SetTag(key string, value interface{}) Span { return n }
func (n noopSpan) LogFields(fields ...log.Field) {}
func (n noopSpan) LogKV(keyVals ...interface{}) {}
func (n noopSpan) Finish() {}
func (n noopSpan) FinishWithOptions(opts FinishOptions) {}
func (n noopSpan) SetOperationName(operationName string) Span { return n }
func (n noopSpan) Tracer() Tracer { return defaultNoopTracer }
func (n noopSpan) LogEvent(event string) {}
func (n noopSpan) LogEventWithPayload(event string, payload interface{}) {}
func (n noopSpan) Log(data LogData) {}
// StartSpan belongs to the Tracer interface.
func (n NoopTracer) StartSpan(operationName string, opts ...StartSpanOption) Span {
return defaultNoopSpan
}
// Inject belongs to the Tracer interface.
func (n NoopTracer) Inject(sp SpanContext, format interface{}, carrier interface{}) error {
return nil
}
// Extract belongs to the Tracer interface.
func (n NoopTracer) Extract(format interface{}, carrier interface{}) (SpanContext, error) {
return nil, ErrSpanContextNotFound
}

View File

@ -0,0 +1,176 @@
package opentracing
import (
"errors"
"net/http"
)
///////////////////////////////////////////////////////////////////////////////
// CORE PROPAGATION INTERFACES:
///////////////////////////////////////////////////////////////////////////////
var (
// ErrUnsupportedFormat occurs when the `format` passed to Tracer.Inject() or
// Tracer.Extract() is not recognized by the Tracer implementation.
ErrUnsupportedFormat = errors.New("opentracing: Unknown or unsupported Inject/Extract format")
// ErrSpanContextNotFound occurs when the `carrier` passed to
// Tracer.Extract() is valid and uncorrupted but has insufficient
// information to extract a SpanContext.
ErrSpanContextNotFound = errors.New("opentracing: SpanContext not found in Extract carrier")
// ErrInvalidSpanContext errors occur when Tracer.Inject() is asked to
// operate on a SpanContext which it is not prepared to handle (for
// example, since it was created by a different tracer implementation).
ErrInvalidSpanContext = errors.New("opentracing: SpanContext type incompatible with tracer")
// ErrInvalidCarrier errors occur when Tracer.Inject() or Tracer.Extract()
// implementations expect a different type of `carrier` than they are
// given.
ErrInvalidCarrier = errors.New("opentracing: Invalid Inject/Extract carrier")
// ErrSpanContextCorrupted occurs when the `carrier` passed to
// Tracer.Extract() is of the expected type but is corrupted.
ErrSpanContextCorrupted = errors.New("opentracing: SpanContext data corrupted in Extract carrier")
)
///////////////////////////////////////////////////////////////////////////////
// BUILTIN PROPAGATION FORMATS:
///////////////////////////////////////////////////////////////////////////////
// BuiltinFormat is used to demarcate the values within package `opentracing`
// that are intended for use with the Tracer.Inject() and Tracer.Extract()
// methods.
type BuiltinFormat byte
const (
// Binary represents SpanContexts as opaque binary data.
//
// For Tracer.Inject(): the carrier must be an `io.Writer`.
//
// For Tracer.Extract(): the carrier must be an `io.Reader`.
Binary BuiltinFormat = iota
// TextMap represents SpanContexts as key:value string pairs.
//
// Unlike HTTPHeaders, the TextMap format does not restrict the key or
// value character sets in any way.
//
// For Tracer.Inject(): the carrier must be a `TextMapWriter`.
//
// For Tracer.Extract(): the carrier must be a `TextMapReader`.
TextMap
// HTTPHeaders represents SpanContexts as HTTP header string pairs.
//
// Unlike TextMap, the HTTPHeaders format requires that the keys and values
// be valid as HTTP headers as-is (i.e., character casing may be unstable
// and special characters are disallowed in keys, values should be
// URL-escaped, etc).
//
// For Tracer.Inject(): the carrier must be a `TextMapWriter`.
//
// For Tracer.Extract(): the carrier must be a `TextMapReader`.
//
// See HTTPHeadersCarrier for an implementation of both TextMapWriter
// and TextMapReader that defers to an http.Header instance for storage.
// For example, Inject():
//
// carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
// err := span.Tracer().Inject(
// span.Context(), opentracing.HTTPHeaders, carrier)
//
// Or Extract():
//
// carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
// clientContext, err := tracer.Extract(
// opentracing.HTTPHeaders, carrier)
//
HTTPHeaders
)
// TextMapWriter is the Inject() carrier for the TextMap builtin format. With
// it, the caller can encode a SpanContext for propagation as entries in a map
// of unicode strings.
type TextMapWriter interface {
// Set a key:value pair to the carrier. Multiple calls to Set() for the
// same key leads to undefined behavior.
//
// NOTE: The backing store for the TextMapWriter may contain data unrelated
// to SpanContext. As such, Inject() and Extract() implementations that
// call the TextMapWriter and TextMapReader interfaces must agree on a
// prefix or other convention to distinguish their own key:value pairs.
Set(key, val string)
}
// TextMapReader is the Extract() carrier for the TextMap builtin format. With it,
// the caller can decode a propagated SpanContext as entries in a map of
// unicode strings.
type TextMapReader interface {
// ForeachKey returns TextMap contents via repeated calls to the `handler`
// function. If any call to `handler` returns a non-nil error, ForeachKey
// terminates and returns that error.
//
// NOTE: The backing store for the TextMapReader may contain data unrelated
// to SpanContext. As such, Inject() and Extract() implementations that
// call the TextMapWriter and TextMapReader interfaces must agree on a
// prefix or other convention to distinguish their own key:value pairs.
//
// The "foreach" callback pattern reduces unnecessary copying in some cases
// and also allows implementations to hold locks while the map is read.
ForeachKey(handler func(key, val string) error) error
}
// TextMapCarrier allows the use of regular map[string]string
// as both TextMapWriter and TextMapReader.
type TextMapCarrier map[string]string
// ForeachKey conforms to the TextMapReader interface.
func (c TextMapCarrier) ForeachKey(handler func(key, val string) error) error {
for k, v := range c {
if err := handler(k, v); err != nil {
return err
}
}
return nil
}
// Set implements Set() of opentracing.TextMapWriter
func (c TextMapCarrier) Set(key, val string) {
c[key] = val
}
// HTTPHeadersCarrier satisfies both TextMapWriter and TextMapReader.
//
// Example usage for server side:
//
// carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
// clientContext, err := tracer.Extract(opentracing.HTTPHeaders, carrier)
//
// Example usage for client side:
//
// carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
// err := tracer.Inject(
// span.Context(),
// opentracing.HTTPHeaders,
// carrier)
//
type HTTPHeadersCarrier http.Header
// Set conforms to the TextMapWriter interface.
func (c HTTPHeadersCarrier) Set(key, val string) {
h := http.Header(c)
h.Set(key, val)
}
// ForeachKey conforms to the TextMapReader interface.
func (c HTTPHeadersCarrier) ForeachKey(handler func(key, val string) error) error {
for k, vals := range c {
for _, v := range vals {
if err := handler(k, v); err != nil {
return err
}
}
}
return nil
}

189
vendor/github.com/opentracing/opentracing-go/span.go generated vendored Normal file
View File

@ -0,0 +1,189 @@
package opentracing
import (
"time"
"github.com/opentracing/opentracing-go/log"
)
// SpanContext represents Span state that must propagate to descendant Spans and across process
// boundaries (e.g., a <trace_id, span_id, sampled> tuple).
type SpanContext interface {
// ForeachBaggageItem grants access to all baggage items stored in the
// SpanContext.
// The handler function will be called for each baggage key/value pair.
// The ordering of items is not guaranteed.
//
// The bool return value indicates if the handler wants to continue iterating
// through the rest of the baggage items; for example if the handler is trying to
// find some baggage item by pattern matching the name, it can return false
// as soon as the item is found to stop further iterations.
ForeachBaggageItem(handler func(k, v string) bool)
}
// Span represents an active, un-finished span in the OpenTracing system.
//
// Spans are created by the Tracer interface.
type Span interface {
// Sets the end timestamp and finalizes Span state.
//
// With the exception of calls to Context() (which are always allowed),
// Finish() must be the last call made to any span instance, and to do
// otherwise leads to undefined behavior.
Finish()
// FinishWithOptions is like Finish() but with explicit control over
// timestamps and log data.
FinishWithOptions(opts FinishOptions)
// Context() yields the SpanContext for this Span. Note that the return
// value of Context() is still valid after a call to Span.Finish(), as is
// a call to Span.Context() after a call to Span.Finish().
Context() SpanContext
// Sets or changes the operation name.
//
// Returns a reference to this Span for chaining.
SetOperationName(operationName string) Span
// Adds a tag to the span.
//
// If there is a pre-existing tag set for `key`, it is overwritten.
//
// Tag values can be numeric types, strings, or bools. The behavior of
// other tag value types is undefined at the OpenTracing level. If a
// tracing system does not know how to handle a particular value type, it
// may ignore the tag, but shall not panic.
//
// Returns a reference to this Span for chaining.
SetTag(key string, value interface{}) Span
// LogFields is an efficient and type-checked way to record key:value
// logging data about a Span, though the programming interface is a little
// more verbose than LogKV(). Here's an example:
//
// span.LogFields(
// log.String("event", "soft error"),
// log.String("type", "cache timeout"),
// log.Int("waited.millis", 1500))
//
// Also see Span.FinishWithOptions() and FinishOptions.BulkLogData.
LogFields(fields ...log.Field)
// LogKV is a concise, readable way to record key:value logging data about
// a Span, though unfortunately this also makes it less efficient and less
// type-safe than LogFields(). Here's an example:
//
// span.LogKV(
// "event", "soft error",
// "type", "cache timeout",
// "waited.millis", 1500)
//
// For LogKV (as opposed to LogFields()), the parameters must appear as
// key-value pairs, like
//
// span.LogKV(key1, val1, key2, val2, key3, val3, ...)
//
// The keys must all be strings. The values may be strings, numeric types,
// bools, Go error instances, or arbitrary structs.
//
// (Note to implementors: consider the log.InterleavedKVToFields() helper)
LogKV(alternatingKeyValues ...interface{})
// SetBaggageItem sets a key:value pair on this Span and its SpanContext
// that also propagates to descendants of this Span.
//
// SetBaggageItem() enables powerful functionality given a full-stack
// opentracing integration (e.g., arbitrary application data from a mobile
// app can make it, transparently, all the way into the depths of a storage
// system), and with it some powerful costs: use this feature with care.
//
// IMPORTANT NOTE #1: SetBaggageItem() will only propagate baggage items to
// *future* causal descendants of the associated Span.
//
// IMPORTANT NOTE #2: Use this thoughtfully and with care. Every key and
// value is copied into every local *and remote* child of the associated
// Span, and that can add up to a lot of network and cpu overhead.
//
// Returns a reference to this Span for chaining.
SetBaggageItem(restrictedKey, value string) Span
// Gets the value for a baggage item given its key. Returns the empty string
// if the value isn't found in this Span.
BaggageItem(restrictedKey string) string
// Provides access to the Tracer that created this Span.
Tracer() Tracer
// Deprecated: use LogFields or LogKV
LogEvent(event string)
// Deprecated: use LogFields or LogKV
LogEventWithPayload(event string, payload interface{})
// Deprecated: use LogFields or LogKV
Log(data LogData)
}
// LogRecord is data associated with a single Span log. Every LogRecord
// instance must specify at least one Field.
type LogRecord struct {
Timestamp time.Time
Fields []log.Field
}
// FinishOptions allows Span.FinishWithOptions callers to override the finish
// timestamp and provide log data via a bulk interface.
type FinishOptions struct {
// FinishTime overrides the Span's finish time, or implicitly becomes
// time.Now() if FinishTime.IsZero().
//
// FinishTime must resolve to a timestamp that's >= the Span's StartTime
// (per StartSpanOptions).
FinishTime time.Time
// LogRecords allows the caller to specify the contents of many LogFields()
// calls with a single slice. May be nil.
//
// None of the LogRecord.Timestamp values may be .IsZero() (i.e., they must
// be set explicitly). Also, they must be >= the Span's start timestamp and
// <= the FinishTime (or time.Now() if FinishTime.IsZero()). Otherwise the
// behavior of FinishWithOptions() is undefined.
//
// If specified, the caller hands off ownership of LogRecords at
// FinishWithOptions() invocation time.
//
// If specified, the (deprecated) BulkLogData must be nil or empty.
LogRecords []LogRecord
// BulkLogData is DEPRECATED.
BulkLogData []LogData
}
// LogData is DEPRECATED
type LogData struct {
Timestamp time.Time
Event string
Payload interface{}
}
// ToLogRecord converts a deprecated LogData to a non-deprecated LogRecord
func (ld *LogData) ToLogRecord() LogRecord {
var literalTimestamp time.Time
if ld.Timestamp.IsZero() {
literalTimestamp = time.Now()
} else {
literalTimestamp = ld.Timestamp
}
rval := LogRecord{
Timestamp: literalTimestamp,
}
if ld.Payload == nil {
rval.Fields = []log.Field{
log.String("event", ld.Event),
}
} else {
rval.Fields = []log.Field{
log.String("event", ld.Event),
log.Object("payload", ld.Payload),
}
}
return rval
}

304
vendor/github.com/opentracing/opentracing-go/tracer.go generated vendored Normal file
View File

@ -0,0 +1,304 @@
package opentracing
import "time"
// Tracer is a simple, thin interface for Span creation and SpanContext
// propagation.
type Tracer interface {
// Create, start, and return a new Span with the given `operationName` and
// incorporate the given StartSpanOption `opts`. (Note that `opts` borrows
// from the "functional options" pattern, per
// http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis)
//
// A Span with no SpanReference options (e.g., opentracing.ChildOf() or
// opentracing.FollowsFrom()) becomes the root of its own trace.
//
// Examples:
//
// var tracer opentracing.Tracer = ...
//
// // The root-span case:
// sp := tracer.StartSpan("GetFeed")
//
// // The vanilla child span case:
// sp := tracer.StartSpan(
// "GetFeed",
// opentracing.ChildOf(parentSpan.Context()))
//
// // All the bells and whistles:
// sp := tracer.StartSpan(
// "GetFeed",
// opentracing.ChildOf(parentSpan.Context()),
// opentracing.Tag{"user_agent", loggedReq.UserAgent},
// opentracing.StartTime(loggedReq.Timestamp),
// )
//
StartSpan(operationName string, opts ...StartSpanOption) Span
// Inject() takes the `sm` SpanContext instance and injects it for
// propagation within `carrier`. The actual type of `carrier` depends on
// the value of `format`.
//
// OpenTracing defines a common set of `format` values (see BuiltinFormat),
// and each has an expected carrier type.
//
// Other packages may declare their own `format` values, much like the keys
// used by `context.Context` (see https://godoc.org/context#WithValue).
//
// Example usage (sans error handling):
//
// carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
// err := tracer.Inject(
// span.Context(),
// opentracing.HTTPHeaders,
// carrier)
//
// NOTE: All opentracing.Tracer implementations MUST support all
// BuiltinFormats.
//
// Implementations may return opentracing.ErrUnsupportedFormat if `format`
// is not supported by (or not known by) the implementation.
//
// Implementations may return opentracing.ErrInvalidCarrier or any other
// implementation-specific error if the format is supported but injection
// fails anyway.
//
// See Tracer.Extract().
Inject(sm SpanContext, format interface{}, carrier interface{}) error
// Extract() returns a SpanContext instance given `format` and `carrier`.
//
// OpenTracing defines a common set of `format` values (see BuiltinFormat),
// and each has an expected carrier type.
//
// Other packages may declare their own `format` values, much like the keys
// used by `context.Context` (see
// https://godoc.org/golang.org/x/net/context#WithValue).
//
// Example usage (with StartSpan):
//
//
// carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
// clientContext, err := tracer.Extract(opentracing.HTTPHeaders, carrier)
//
// // ... assuming the ultimate goal here is to resume the trace with a
// // server-side Span:
// var serverSpan opentracing.Span
// if err == nil {
// span = tracer.StartSpan(
// rpcMethodName, ext.RPCServerOption(clientContext))
// } else {
// span = tracer.StartSpan(rpcMethodName)
// }
//
//
// NOTE: All opentracing.Tracer implementations MUST support all
// BuiltinFormats.
//
// Return values:
// - A successful Extract returns a SpanContext instance and a nil error
// - If there was simply no SpanContext to extract in `carrier`, Extract()
// returns (nil, opentracing.ErrSpanContextNotFound)
// - If `format` is unsupported or unrecognized, Extract() returns (nil,
// opentracing.ErrUnsupportedFormat)
// - If there are more fundamental problems with the `carrier` object,
// Extract() may return opentracing.ErrInvalidCarrier,
// opentracing.ErrSpanContextCorrupted, or implementation-specific
// errors.
//
// See Tracer.Inject().
Extract(format interface{}, carrier interface{}) (SpanContext, error)
}
// StartSpanOptions allows Tracer.StartSpan() callers and implementors a
// mechanism to override the start timestamp, specify Span References, and make
// a single Tag or multiple Tags available at Span start time.
//
// StartSpan() callers should look at the StartSpanOption interface and
// implementations available in this package.
//
// Tracer implementations can convert a slice of `StartSpanOption` instances
// into a `StartSpanOptions` struct like so:
//
// func StartSpan(opName string, opts ...opentracing.StartSpanOption) {
// sso := opentracing.StartSpanOptions{}
// for _, o := range opts {
// o.Apply(&sso)
// }
// ...
// }
//
type StartSpanOptions struct {
// Zero or more causal references to other Spans (via their SpanContext).
// If empty, start a "root" Span (i.e., start a new trace).
References []SpanReference
// StartTime overrides the Span's start time, or implicitly becomes
// time.Now() if StartTime.IsZero().
StartTime time.Time
// Tags may have zero or more entries; the restrictions on map values are
// identical to those for Span.SetTag(). May be nil.
//
// If specified, the caller hands off ownership of Tags at
// StartSpan() invocation time.
Tags map[string]interface{}
}
// StartSpanOption instances (zero or more) may be passed to Tracer.StartSpan.
//
// StartSpanOption borrows from the "functional options" pattern, per
// http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis
type StartSpanOption interface {
Apply(*StartSpanOptions)
}
// SpanReferenceType is an enum type describing different categories of
// relationships between two Spans. If Span-2 refers to Span-1, the
// SpanReferenceType describes Span-1 from Span-2's perspective. For example,
// ChildOfRef means that Span-1 created Span-2.
//
// NOTE: Span-1 and Span-2 do *not* necessarily depend on each other for
// completion; e.g., Span-2 may be part of a background job enqueued by Span-1,
// or Span-2 may be sitting in a distributed queue behind Span-1.
type SpanReferenceType int
const (
// ChildOfRef refers to a parent Span that caused *and* somehow depends
// upon the new child Span. Often (but not always), the parent Span cannot
// finish until the child Span does.
//
// An timing diagram for a ChildOfRef that's blocked on the new Span:
//
// [-Parent Span---------]
// [-Child Span----]
//
// See http://opentracing.io/spec/
//
// See opentracing.ChildOf()
ChildOfRef SpanReferenceType = iota
// FollowsFromRef refers to a parent Span that does not depend in any way
// on the result of the new child Span. For instance, one might use
// FollowsFromRefs to describe pipeline stages separated by queues,
// or a fire-and-forget cache insert at the tail end of a web request.
//
// A FollowsFromRef Span is part of the same logical trace as the new Span:
// i.e., the new Span is somehow caused by the work of its FollowsFromRef.
//
// All of the following could be valid timing diagrams for children that
// "FollowFrom" a parent.
//
// [-Parent Span-] [-Child Span-]
//
//
// [-Parent Span--]
// [-Child Span-]
//
//
// [-Parent Span-]
// [-Child Span-]
//
// See http://opentracing.io/spec/
//
// See opentracing.FollowsFrom()
FollowsFromRef
)
// SpanReference is a StartSpanOption that pairs a SpanReferenceType and a
// referenced SpanContext. See the SpanReferenceType documentation for
// supported relationships. If SpanReference is created with
// ReferencedContext==nil, it has no effect. Thus it allows for a more concise
// syntax for starting spans:
//
// sc, _ := tracer.Extract(someFormat, someCarrier)
// span := tracer.StartSpan("operation", opentracing.ChildOf(sc))
//
// The `ChildOf(sc)` option above will not panic if sc == nil, it will just
// not add the parent span reference to the options.
type SpanReference struct {
Type SpanReferenceType
ReferencedContext SpanContext
}
// Apply satisfies the StartSpanOption interface.
func (r SpanReference) Apply(o *StartSpanOptions) {
if r.ReferencedContext != nil {
o.References = append(o.References, r)
}
}
// ChildOf returns a StartSpanOption pointing to a dependent parent span.
// If sc == nil, the option has no effect.
//
// See ChildOfRef, SpanReference
func ChildOf(sc SpanContext) SpanReference {
return SpanReference{
Type: ChildOfRef,
ReferencedContext: sc,
}
}
// FollowsFrom returns a StartSpanOption pointing to a parent Span that caused
// the child Span but does not directly depend on its result in any way.
// If sc == nil, the option has no effect.
//
// See FollowsFromRef, SpanReference
func FollowsFrom(sc SpanContext) SpanReference {
return SpanReference{
Type: FollowsFromRef,
ReferencedContext: sc,
}
}
// StartTime is a StartSpanOption that sets an explicit start timestamp for the
// new Span.
type StartTime time.Time
// Apply satisfies the StartSpanOption interface.
func (t StartTime) Apply(o *StartSpanOptions) {
o.StartTime = time.Time(t)
}
// Tags are a generic map from an arbitrary string key to an opaque value type.
// The underlying tracing system is responsible for interpreting and
// serializing the values.
type Tags map[string]interface{}
// Apply satisfies the StartSpanOption interface.
func (t Tags) Apply(o *StartSpanOptions) {
if o.Tags == nil {
o.Tags = make(map[string]interface{})
}
for k, v := range t {
o.Tags[k] = v
}
}
// Tag may be passed as a StartSpanOption to add a tag to new spans,
// or its Set method may be used to apply the tag to an existing Span,
// for example:
//
// tracer.StartSpan("opName", Tag{"Key", value})
//
// or
//
// Tag{"key", value}.Set(span)
type Tag struct {
Key string
Value interface{}
}
// Apply satisfies the StartSpanOption interface.
func (t Tag) Apply(o *StartSpanOptions) {
if o.Tags == nil {
o.Tags = make(map[string]interface{})
}
o.Tags[t.Key] = t.Value
}
// Set applies the tag to an existing Span.
func (t Tag) Set(s Span) {
s.SetTag(t.Key, t.Value)
}

383
vendor/golang.org/x/net/internal/iana/gen.go generated vendored Normal file
View File

@ -0,0 +1,383 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
//go:generate go run gen.go
// This program generates internet protocol constants and tables by
// reading IANA protocol registries.
package main
import (
"bytes"
"encoding/xml"
"fmt"
"go/format"
"io"
"io/ioutil"
"net/http"
"os"
"strconv"
"strings"
)
var registries = []struct {
url string
parse func(io.Writer, io.Reader) error
}{
{
"https://www.iana.org/assignments/dscp-registry/dscp-registry.xml",
parseDSCPRegistry,
},
{
"https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xml",
parseProtocolNumbers,
},
{
"https://www.iana.org/assignments/address-family-numbers/address-family-numbers.xml",
parseAddrFamilyNumbers,
},
}
func main() {
var bb bytes.Buffer
fmt.Fprintf(&bb, "// go generate gen.go\n")
fmt.Fprintf(&bb, "// Code generated by the command above; DO NOT EDIT.\n\n")
fmt.Fprintf(&bb, "// Package iana provides protocol number resources managed by the Internet Assigned Numbers Authority (IANA).\n")
fmt.Fprintf(&bb, `package iana // import "golang.org/x/net/internal/iana"`+"\n\n")
for _, r := range registries {
resp, err := http.Get(r.url)
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
fmt.Fprintf(os.Stderr, "got HTTP status code %v for %v\n", resp.StatusCode, r.url)
os.Exit(1)
}
if err := r.parse(&bb, resp.Body); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
fmt.Fprintf(&bb, "\n")
}
b, err := format.Source(bb.Bytes())
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
if err := ioutil.WriteFile("const.go", b, 0644); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}
func parseDSCPRegistry(w io.Writer, r io.Reader) error {
dec := xml.NewDecoder(r)
var dr dscpRegistry
if err := dec.Decode(&dr); err != nil {
return err
}
fmt.Fprintf(w, "// %s, Updated: %s\n", dr.Title, dr.Updated)
fmt.Fprintf(w, "const (\n")
for _, dr := range dr.escapeDSCP() {
fmt.Fprintf(w, "DiffServ%s = %#02x", dr.Name, dr.Value)
fmt.Fprintf(w, "// %s\n", dr.OrigName)
}
for _, er := range dr.escapeECN() {
fmt.Fprintf(w, "%s = %#02x", er.Descr, er.Value)
fmt.Fprintf(w, "// %s\n", er.OrigDescr)
}
fmt.Fprintf(w, ")\n")
return nil
}
type dscpRegistry struct {
XMLName xml.Name `xml:"registry"`
Title string `xml:"title"`
Updated string `xml:"updated"`
Note string `xml:"note"`
Registries []struct {
Title string `xml:"title"`
Registries []struct {
Title string `xml:"title"`
Records []struct {
Name string `xml:"name"`
Space string `xml:"space"`
} `xml:"record"`
} `xml:"registry"`
Records []struct {
Value string `xml:"value"`
Descr string `xml:"description"`
} `xml:"record"`
} `xml:"registry"`
}
type canonDSCPRecord struct {
OrigName string
Name string
Value int
}
func (drr *dscpRegistry) escapeDSCP() []canonDSCPRecord {
var drs []canonDSCPRecord
for _, preg := range drr.Registries {
if !strings.Contains(preg.Title, "Differentiated Services Field Codepoints") {
continue
}
for _, reg := range preg.Registries {
if !strings.Contains(reg.Title, "Pool 1 Codepoints") {
continue
}
drs = make([]canonDSCPRecord, len(reg.Records))
sr := strings.NewReplacer(
"+", "",
"-", "",
"/", "",
".", "",
" ", "",
)
for i, dr := range reg.Records {
s := strings.TrimSpace(dr.Name)
drs[i].OrigName = s
drs[i].Name = sr.Replace(s)
n, err := strconv.ParseUint(dr.Space, 2, 8)
if err != nil {
continue
}
drs[i].Value = int(n) << 2
}
}
}
return drs
}
type canonECNRecord struct {
OrigDescr string
Descr string
Value int
}
func (drr *dscpRegistry) escapeECN() []canonECNRecord {
var ers []canonECNRecord
for _, reg := range drr.Registries {
if !strings.Contains(reg.Title, "ECN Field") {
continue
}
ers = make([]canonECNRecord, len(reg.Records))
sr := strings.NewReplacer(
"Capable", "",
"Not-ECT", "",
"ECT(1)", "",
"ECT(0)", "",
"CE", "",
"(", "",
")", "",
"+", "",
"-", "",
"/", "",
".", "",
" ", "",
)
for i, er := range reg.Records {
s := strings.TrimSpace(er.Descr)
ers[i].OrigDescr = s
ss := strings.Split(s, " ")
if len(ss) > 1 {
ers[i].Descr = strings.Join(ss[1:], " ")
} else {
ers[i].Descr = ss[0]
}
ers[i].Descr = sr.Replace(er.Descr)
n, err := strconv.ParseUint(er.Value, 2, 8)
if err != nil {
continue
}
ers[i].Value = int(n)
}
}
return ers
}
func parseProtocolNumbers(w io.Writer, r io.Reader) error {
dec := xml.NewDecoder(r)
var pn protocolNumbers
if err := dec.Decode(&pn); err != nil {
return err
}
prs := pn.escape()
prs = append([]canonProtocolRecord{{
Name: "IP",
Descr: "IPv4 encapsulation, pseudo protocol number",
Value: 0,
}}, prs...)
fmt.Fprintf(w, "// %s, Updated: %s\n", pn.Title, pn.Updated)
fmt.Fprintf(w, "const (\n")
for _, pr := range prs {
if pr.Name == "" {
continue
}
fmt.Fprintf(w, "Protocol%s = %d", pr.Name, pr.Value)
s := pr.Descr
if s == "" {
s = pr.OrigName
}
fmt.Fprintf(w, "// %s\n", s)
}
fmt.Fprintf(w, ")\n")
return nil
}
type protocolNumbers struct {
XMLName xml.Name `xml:"registry"`
Title string `xml:"title"`
Updated string `xml:"updated"`
RegTitle string `xml:"registry>title"`
Note string `xml:"registry>note"`
Records []struct {
Value string `xml:"value"`
Name string `xml:"name"`
Descr string `xml:"description"`
} `xml:"registry>record"`
}
type canonProtocolRecord struct {
OrigName string
Name string
Descr string
Value int
}
func (pn *protocolNumbers) escape() []canonProtocolRecord {
prs := make([]canonProtocolRecord, len(pn.Records))
sr := strings.NewReplacer(
"-in-", "in",
"-within-", "within",
"-over-", "over",
"+", "P",
"-", "",
"/", "",
".", "",
" ", "",
)
for i, pr := range pn.Records {
if strings.Contains(pr.Name, "Deprecated") ||
strings.Contains(pr.Name, "deprecated") {
continue
}
prs[i].OrigName = pr.Name
s := strings.TrimSpace(pr.Name)
switch pr.Name {
case "ISIS over IPv4":
prs[i].Name = "ISIS"
case "manet":
prs[i].Name = "MANET"
default:
prs[i].Name = sr.Replace(s)
}
ss := strings.Split(pr.Descr, "\n")
for i := range ss {
ss[i] = strings.TrimSpace(ss[i])
}
if len(ss) > 1 {
prs[i].Descr = strings.Join(ss, " ")
} else {
prs[i].Descr = ss[0]
}
prs[i].Value, _ = strconv.Atoi(pr.Value)
}
return prs
}
func parseAddrFamilyNumbers(w io.Writer, r io.Reader) error {
dec := xml.NewDecoder(r)
var afn addrFamilylNumbers
if err := dec.Decode(&afn); err != nil {
return err
}
afrs := afn.escape()
fmt.Fprintf(w, "// %s, Updated: %s\n", afn.Title, afn.Updated)
fmt.Fprintf(w, "const (\n")
for _, afr := range afrs {
if afr.Name == "" {
continue
}
fmt.Fprintf(w, "AddrFamily%s = %d", afr.Name, afr.Value)
fmt.Fprintf(w, "// %s\n", afr.Descr)
}
fmt.Fprintf(w, ")\n")
return nil
}
type addrFamilylNumbers struct {
XMLName xml.Name `xml:"registry"`
Title string `xml:"title"`
Updated string `xml:"updated"`
RegTitle string `xml:"registry>title"`
Note string `xml:"registry>note"`
Records []struct {
Value string `xml:"value"`
Descr string `xml:"description"`
} `xml:"registry>record"`
}
type canonAddrFamilyRecord struct {
Name string
Descr string
Value int
}
func (afn *addrFamilylNumbers) escape() []canonAddrFamilyRecord {
afrs := make([]canonAddrFamilyRecord, len(afn.Records))
sr := strings.NewReplacer(
"IP version 4", "IPv4",
"IP version 6", "IPv6",
"Identifier", "ID",
"-", "",
"-", "",
"/", "",
".", "",
" ", "",
)
for i, afr := range afn.Records {
if strings.Contains(afr.Descr, "Unassigned") ||
strings.Contains(afr.Descr, "Reserved") {
continue
}
afrs[i].Descr = afr.Descr
s := strings.TrimSpace(afr.Descr)
switch s {
case "IP (IP version 4)":
afrs[i].Name = "IPv4"
case "IP6 (IP version 6)":
afrs[i].Name = "IPv6"
case "AFI for L2VPN information":
afrs[i].Name = "L2VPN"
case "E.164 with NSAP format subaddress":
afrs[i].Name = "E164withSubaddress"
case "MT IP: Multi-Topology IP version 4":
afrs[i].Name = "MTIPv4"
case "MAC/24":
afrs[i].Name = "MACFinal24bits"
case "MAC/40":
afrs[i].Name = "MACFinal40bits"
case "IPv6/64":
afrs[i].Name = "IPv6Initial64bits"
default:
n := strings.Index(s, "(")
if n > 0 {
s = s[:n]
}
n = strings.Index(s, ":")
if n > 0 {
s = s[:n]
}
afrs[i].Name = sr.Replace(s)
}
afrs[i].Value, _ = strconv.Atoi(afr.Value)
}
return afrs
}

38
vendor/golang.org/x/net/internal/socket/defs_aix.go generated vendored Normal file
View File

@ -0,0 +1,38 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// +godefs map struct_in_addr [4]byte /* in_addr */
// +godefs map struct_in6_addr [16]byte /* in6_addr */
package socket
/*
#include <sys/socket.h>
#include <netinet/in.h>
*/
import "C"
type iovec C.struct_iovec
type msghdr C.struct_msghdr
type mmsghdr C.struct_mmsghdr
type cmsghdr C.struct_cmsghdr
type sockaddrInet C.struct_sockaddr_in
type sockaddrInet6 C.struct_sockaddr_in6
const (
sizeofIovec = C.sizeof_struct_iovec
sizeofMsghdr = C.sizeof_struct_msghdr
sizeofCmsghdr = C.sizeof_struct_cmsghdr
sizeofSockaddrInet = C.sizeof_struct_sockaddr_in
sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
)

36
vendor/golang.org/x/net/internal/socket/defs_darwin.go generated vendored Normal file
View File

@ -0,0 +1,36 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// +godefs map struct_in_addr [4]byte /* in_addr */
// +godefs map struct_in6_addr [16]byte /* in6_addr */
package socket
/*
#include <sys/socket.h>
#include <netinet/in.h>
*/
import "C"
type iovec C.struct_iovec
type msghdr C.struct_msghdr
type cmsghdr C.struct_cmsghdr
type sockaddrInet C.struct_sockaddr_in
type sockaddrInet6 C.struct_sockaddr_in6
const (
sizeofIovec = C.sizeof_struct_iovec
sizeofMsghdr = C.sizeof_struct_msghdr
sizeofCmsghdr = C.sizeof_struct_cmsghdr
sizeofSockaddrInet = C.sizeof_struct_sockaddr_in
sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
)

View File

@ -0,0 +1,36 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// +godefs map struct_in_addr [4]byte /* in_addr */
// +godefs map struct_in6_addr [16]byte /* in6_addr */
package socket
/*
#include <sys/socket.h>
#include <netinet/in.h>
*/
import "C"
type iovec C.struct_iovec
type msghdr C.struct_msghdr
type cmsghdr C.struct_cmsghdr
type sockaddrInet C.struct_sockaddr_in
type sockaddrInet6 C.struct_sockaddr_in6
const (
sizeofIovec = C.sizeof_struct_iovec
sizeofMsghdr = C.sizeof_struct_msghdr
sizeofCmsghdr = C.sizeof_struct_cmsghdr
sizeofSockaddrInet = C.sizeof_struct_sockaddr_in
sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
)

View File

@ -0,0 +1,36 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// +godefs map struct_in_addr [4]byte /* in_addr */
// +godefs map struct_in6_addr [16]byte /* in6_addr */
package socket
/*
#include <sys/socket.h>
#include <netinet/in.h>
*/
import "C"
type iovec C.struct_iovec
type msghdr C.struct_msghdr
type cmsghdr C.struct_cmsghdr
type sockaddrInet C.struct_sockaddr_in
type sockaddrInet6 C.struct_sockaddr_in6
const (
sizeofIovec = C.sizeof_struct_iovec
sizeofMsghdr = C.sizeof_struct_msghdr
sizeofCmsghdr = C.sizeof_struct_cmsghdr
sizeofSockaddrInet = C.sizeof_struct_sockaddr_in
sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
)

40
vendor/golang.org/x/net/internal/socket/defs_linux.go generated vendored Normal file
View File

@ -0,0 +1,40 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// +godefs map struct_in_addr [4]byte /* in_addr */
// +godefs map struct_in6_addr [16]byte /* in6_addr */
package socket
/*
#include <linux/in.h>
#include <linux/in6.h>
#define _GNU_SOURCE
#include <sys/socket.h>
*/
import "C"
type iovec C.struct_iovec
type msghdr C.struct_msghdr
type mmsghdr C.struct_mmsghdr
type cmsghdr C.struct_cmsghdr
type sockaddrInet C.struct_sockaddr_in
type sockaddrInet6 C.struct_sockaddr_in6
const (
sizeofIovec = C.sizeof_struct_iovec
sizeofMsghdr = C.sizeof_struct_msghdr
sizeofCmsghdr = C.sizeof_struct_cmsghdr
sizeofSockaddrInet = C.sizeof_struct_sockaddr_in
sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
)

38
vendor/golang.org/x/net/internal/socket/defs_netbsd.go generated vendored Normal file
View File

@ -0,0 +1,38 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// +godefs map struct_in_addr [4]byte /* in_addr */
// +godefs map struct_in6_addr [16]byte /* in6_addr */
package socket
/*
#include <sys/socket.h>
#include <netinet/in.h>
*/
import "C"
type iovec C.struct_iovec
type msghdr C.struct_msghdr
type mmsghdr C.struct_mmsghdr
type cmsghdr C.struct_cmsghdr
type sockaddrInet C.struct_sockaddr_in
type sockaddrInet6 C.struct_sockaddr_in6
const (
sizeofIovec = C.sizeof_struct_iovec
sizeofMsghdr = C.sizeof_struct_msghdr
sizeofCmsghdr = C.sizeof_struct_cmsghdr
sizeofSockaddrInet = C.sizeof_struct_sockaddr_in
sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
)

View File

@ -0,0 +1,36 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// +godefs map struct_in_addr [4]byte /* in_addr */
// +godefs map struct_in6_addr [16]byte /* in6_addr */
package socket
/*
#include <sys/socket.h>
#include <netinet/in.h>
*/
import "C"
type iovec C.struct_iovec
type msghdr C.struct_msghdr
type cmsghdr C.struct_cmsghdr
type sockaddrInet C.struct_sockaddr_in
type sockaddrInet6 C.struct_sockaddr_in6
const (
sizeofIovec = C.sizeof_struct_iovec
sizeofMsghdr = C.sizeof_struct_msghdr
sizeofCmsghdr = C.sizeof_struct_cmsghdr
sizeofSockaddrInet = C.sizeof_struct_sockaddr_in
sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
)

View File

@ -0,0 +1,36 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// +godefs map struct_in_addr [4]byte /* in_addr */
// +godefs map struct_in6_addr [16]byte /* in6_addr */
package socket
/*
#include <sys/socket.h>
#include <netinet/in.h>
*/
import "C"
type iovec C.struct_iovec
type msghdr C.struct_msghdr
type cmsghdr C.struct_cmsghdr
type sockaddrInet C.struct_sockaddr_in
type sockaddrInet6 C.struct_sockaddr_in6
const (
sizeofIovec = C.sizeof_struct_iovec
sizeofMsghdr = C.sizeof_struct_msghdr
sizeofCmsghdr = C.sizeof_struct_cmsghdr
sizeofSockaddrInet = C.sizeof_struct_sockaddr_in
sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
)

39
vendor/golang.org/x/net/ipv4/defs_aix.go generated vendored Normal file
View File

@ -0,0 +1,39 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// +godefs map struct_in_addr [4]byte /* in_addr */
package ipv4
/*
#include <netinet/in.h>
*/
import "C"
const (
sysIP_OPTIONS = C.IP_OPTIONS
sysIP_HDRINCL = C.IP_HDRINCL
sysIP_TOS = C.IP_TOS
sysIP_TTL = C.IP_TTL
sysIP_RECVOPTS = C.IP_RECVOPTS
sysIP_RECVRETOPTS = C.IP_RECVRETOPTS
sysIP_RECVDSTADDR = C.IP_RECVDSTADDR
sysIP_RETOPTS = C.IP_RETOPTS
// IP_RECVIF is defined on AIX but doesn't work.
// IP_RECVINTERFACE must be used instead.
sysIP_RECVIF = C.IP_RECVINTERFACE
sysIP_RECVTTL = C.IP_RECVTTL
sysIP_MULTICAST_IF = C.IP_MULTICAST_IF
sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL
sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP
sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP
sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP
sizeofIPMreq = C.sizeof_struct_ip_mreq
)
type ipMreq C.struct_ip_mreq

77
vendor/golang.org/x/net/ipv4/defs_darwin.go generated vendored Normal file
View File

@ -0,0 +1,77 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// +godefs map struct_in_addr [4]byte /* in_addr */
package ipv4
/*
#include <sys/socket.h>
#include <netinet/in.h>
*/
import "C"
const (
sysIP_OPTIONS = C.IP_OPTIONS
sysIP_HDRINCL = C.IP_HDRINCL
sysIP_TOS = C.IP_TOS
sysIP_TTL = C.IP_TTL
sysIP_RECVOPTS = C.IP_RECVOPTS
sysIP_RECVRETOPTS = C.IP_RECVRETOPTS
sysIP_RECVDSTADDR = C.IP_RECVDSTADDR
sysIP_RETOPTS = C.IP_RETOPTS
sysIP_RECVIF = C.IP_RECVIF
sysIP_STRIPHDR = C.IP_STRIPHDR
sysIP_RECVTTL = C.IP_RECVTTL
sysIP_BOUND_IF = C.IP_BOUND_IF
sysIP_PKTINFO = C.IP_PKTINFO
sysIP_RECVPKTINFO = C.IP_RECVPKTINFO
sysIP_MULTICAST_IF = C.IP_MULTICAST_IF
sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL
sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP
sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP
sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP
sysIP_MULTICAST_VIF = C.IP_MULTICAST_VIF
sysIP_MULTICAST_IFINDEX = C.IP_MULTICAST_IFINDEX
sysIP_ADD_SOURCE_MEMBERSHIP = C.IP_ADD_SOURCE_MEMBERSHIP
sysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP
sysIP_BLOCK_SOURCE = C.IP_BLOCK_SOURCE
sysIP_UNBLOCK_SOURCE = C.IP_UNBLOCK_SOURCE
sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP
sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP
sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP
sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP
sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE
sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE
sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage
sizeofSockaddrInet = C.sizeof_struct_sockaddr_in
sizeofInetPktinfo = C.sizeof_struct_in_pktinfo
sizeofIPMreq = C.sizeof_struct_ip_mreq
sizeofIPMreqn = C.sizeof_struct_ip_mreqn
sizeofIPMreqSource = C.sizeof_struct_ip_mreq_source
sizeofGroupReq = C.sizeof_struct_group_req
sizeofGroupSourceReq = C.sizeof_struct_group_source_req
)
type sockaddrStorage C.struct_sockaddr_storage
type sockaddrInet C.struct_sockaddr_in
type inetPktinfo C.struct_in_pktinfo
type ipMreq C.struct_ip_mreq
type ipMreqn C.struct_ip_mreqn
type ipMreqSource C.struct_ip_mreq_source
type groupReq C.struct_group_req
type groupSourceReq C.struct_group_source_req

38
vendor/golang.org/x/net/ipv4/defs_dragonfly.go generated vendored Normal file
View File

@ -0,0 +1,38 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// +godefs map struct_in_addr [4]byte /* in_addr */
package ipv4
/*
#include <netinet/in.h>
*/
import "C"
const (
sysIP_OPTIONS = C.IP_OPTIONS
sysIP_HDRINCL = C.IP_HDRINCL
sysIP_TOS = C.IP_TOS
sysIP_TTL = C.IP_TTL
sysIP_RECVOPTS = C.IP_RECVOPTS
sysIP_RECVRETOPTS = C.IP_RECVRETOPTS
sysIP_RECVDSTADDR = C.IP_RECVDSTADDR
sysIP_RETOPTS = C.IP_RETOPTS
sysIP_RECVIF = C.IP_RECVIF
sysIP_RECVTTL = C.IP_RECVTTL
sysIP_MULTICAST_IF = C.IP_MULTICAST_IF
sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL
sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP
sysIP_MULTICAST_VIF = C.IP_MULTICAST_VIF
sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP
sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP
sizeofIPMreq = C.sizeof_struct_ip_mreq
)
type ipMreq C.struct_ip_mreq

75
vendor/golang.org/x/net/ipv4/defs_freebsd.go generated vendored Normal file
View File

@ -0,0 +1,75 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// +godefs map struct_in_addr [4]byte /* in_addr */
package ipv4
/*
#include <sys/socket.h>
#include <netinet/in.h>
*/
import "C"
const (
sysIP_OPTIONS = C.IP_OPTIONS
sysIP_HDRINCL = C.IP_HDRINCL
sysIP_TOS = C.IP_TOS
sysIP_TTL = C.IP_TTL
sysIP_RECVOPTS = C.IP_RECVOPTS
sysIP_RECVRETOPTS = C.IP_RECVRETOPTS
sysIP_RECVDSTADDR = C.IP_RECVDSTADDR
sysIP_SENDSRCADDR = C.IP_SENDSRCADDR
sysIP_RETOPTS = C.IP_RETOPTS
sysIP_RECVIF = C.IP_RECVIF
sysIP_ONESBCAST = C.IP_ONESBCAST
sysIP_BINDANY = C.IP_BINDANY
sysIP_RECVTTL = C.IP_RECVTTL
sysIP_MINTTL = C.IP_MINTTL
sysIP_DONTFRAG = C.IP_DONTFRAG
sysIP_RECVTOS = C.IP_RECVTOS
sysIP_MULTICAST_IF = C.IP_MULTICAST_IF
sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL
sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP
sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP
sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP
sysIP_MULTICAST_VIF = C.IP_MULTICAST_VIF
sysIP_ADD_SOURCE_MEMBERSHIP = C.IP_ADD_SOURCE_MEMBERSHIP
sysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP
sysIP_BLOCK_SOURCE = C.IP_BLOCK_SOURCE
sysIP_UNBLOCK_SOURCE = C.IP_UNBLOCK_SOURCE
sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP
sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP
sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP
sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP
sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE
sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE
sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage
sizeofSockaddrInet = C.sizeof_struct_sockaddr_in
sizeofIPMreq = C.sizeof_struct_ip_mreq
sizeofIPMreqn = C.sizeof_struct_ip_mreqn
sizeofIPMreqSource = C.sizeof_struct_ip_mreq_source
sizeofGroupReq = C.sizeof_struct_group_req
sizeofGroupSourceReq = C.sizeof_struct_group_source_req
)
type sockaddrStorage C.struct_sockaddr_storage
type sockaddrInet C.struct_sockaddr_in
type ipMreq C.struct_ip_mreq
type ipMreqn C.struct_ip_mreqn
type ipMreqSource C.struct_ip_mreq_source
type groupReq C.struct_group_req
type groupSourceReq C.struct_group_source_req

122
vendor/golang.org/x/net/ipv4/defs_linux.go generated vendored Normal file
View File

@ -0,0 +1,122 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// +godefs map struct_in_addr [4]byte /* in_addr */
package ipv4
/*
#include <time.h>
#include <linux/errqueue.h>
#include <linux/icmp.h>
#include <linux/in.h>
#include <linux/filter.h>
#include <sys/socket.h>
*/
import "C"
const (
sysIP_TOS = C.IP_TOS
sysIP_TTL = C.IP_TTL
sysIP_HDRINCL = C.IP_HDRINCL
sysIP_OPTIONS = C.IP_OPTIONS
sysIP_ROUTER_ALERT = C.IP_ROUTER_ALERT
sysIP_RECVOPTS = C.IP_RECVOPTS
sysIP_RETOPTS = C.IP_RETOPTS
sysIP_PKTINFO = C.IP_PKTINFO
sysIP_PKTOPTIONS = C.IP_PKTOPTIONS
sysIP_MTU_DISCOVER = C.IP_MTU_DISCOVER
sysIP_RECVERR = C.IP_RECVERR
sysIP_RECVTTL = C.IP_RECVTTL
sysIP_RECVTOS = C.IP_RECVTOS
sysIP_MTU = C.IP_MTU
sysIP_FREEBIND = C.IP_FREEBIND
sysIP_TRANSPARENT = C.IP_TRANSPARENT
sysIP_RECVRETOPTS = C.IP_RECVRETOPTS
sysIP_ORIGDSTADDR = C.IP_ORIGDSTADDR
sysIP_RECVORIGDSTADDR = C.IP_RECVORIGDSTADDR
sysIP_MINTTL = C.IP_MINTTL
sysIP_NODEFRAG = C.IP_NODEFRAG
sysIP_UNICAST_IF = C.IP_UNICAST_IF
sysIP_MULTICAST_IF = C.IP_MULTICAST_IF
sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL
sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP
sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP
sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP
sysIP_UNBLOCK_SOURCE = C.IP_UNBLOCK_SOURCE
sysIP_BLOCK_SOURCE = C.IP_BLOCK_SOURCE
sysIP_ADD_SOURCE_MEMBERSHIP = C.IP_ADD_SOURCE_MEMBERSHIP
sysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP
sysIP_MSFILTER = C.IP_MSFILTER
sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP
sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP
sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP
sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP
sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE
sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE
sysMCAST_MSFILTER = C.MCAST_MSFILTER
sysIP_MULTICAST_ALL = C.IP_MULTICAST_ALL
//sysIP_PMTUDISC_DONT = C.IP_PMTUDISC_DONT
//sysIP_PMTUDISC_WANT = C.IP_PMTUDISC_WANT
//sysIP_PMTUDISC_DO = C.IP_PMTUDISC_DO
//sysIP_PMTUDISC_PROBE = C.IP_PMTUDISC_PROBE
//sysIP_PMTUDISC_INTERFACE = C.IP_PMTUDISC_INTERFACE
//sysIP_PMTUDISC_OMIT = C.IP_PMTUDISC_OMIT
sysICMP_FILTER = C.ICMP_FILTER
sysSO_EE_ORIGIN_NONE = C.SO_EE_ORIGIN_NONE
sysSO_EE_ORIGIN_LOCAL = C.SO_EE_ORIGIN_LOCAL
sysSO_EE_ORIGIN_ICMP = C.SO_EE_ORIGIN_ICMP
sysSO_EE_ORIGIN_ICMP6 = C.SO_EE_ORIGIN_ICMP6
sysSO_EE_ORIGIN_TXSTATUS = C.SO_EE_ORIGIN_TXSTATUS
sysSO_EE_ORIGIN_TIMESTAMPING = C.SO_EE_ORIGIN_TIMESTAMPING
sysSOL_SOCKET = C.SOL_SOCKET
sysSO_ATTACH_FILTER = C.SO_ATTACH_FILTER
sizeofKernelSockaddrStorage = C.sizeof_struct___kernel_sockaddr_storage
sizeofSockaddrInet = C.sizeof_struct_sockaddr_in
sizeofInetPktinfo = C.sizeof_struct_in_pktinfo
sizeofSockExtendedErr = C.sizeof_struct_sock_extended_err
sizeofIPMreq = C.sizeof_struct_ip_mreq
sizeofIPMreqn = C.sizeof_struct_ip_mreqn
sizeofIPMreqSource = C.sizeof_struct_ip_mreq_source
sizeofGroupReq = C.sizeof_struct_group_req
sizeofGroupSourceReq = C.sizeof_struct_group_source_req
sizeofICMPFilter = C.sizeof_struct_icmp_filter
sizeofSockFprog = C.sizeof_struct_sock_fprog
)
type kernelSockaddrStorage C.struct___kernel_sockaddr_storage
type sockaddrInet C.struct_sockaddr_in
type inetPktinfo C.struct_in_pktinfo
type sockExtendedErr C.struct_sock_extended_err
type ipMreq C.struct_ip_mreq
type ipMreqn C.struct_ip_mreqn
type ipMreqSource C.struct_ip_mreq_source
type groupReq C.struct_group_req
type groupSourceReq C.struct_group_source_req
type icmpFilter C.struct_icmp_filter
type sockFProg C.struct_sock_fprog
type sockFilter C.struct_sock_filter

37
vendor/golang.org/x/net/ipv4/defs_netbsd.go generated vendored Normal file
View File

@ -0,0 +1,37 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// +godefs map struct_in_addr [4]byte /* in_addr */
package ipv4
/*
#include <netinet/in.h>
*/
import "C"
const (
sysIP_OPTIONS = C.IP_OPTIONS
sysIP_HDRINCL = C.IP_HDRINCL
sysIP_TOS = C.IP_TOS
sysIP_TTL = C.IP_TTL
sysIP_RECVOPTS = C.IP_RECVOPTS
sysIP_RECVRETOPTS = C.IP_RECVRETOPTS
sysIP_RECVDSTADDR = C.IP_RECVDSTADDR
sysIP_RETOPTS = C.IP_RETOPTS
sysIP_RECVIF = C.IP_RECVIF
sysIP_RECVTTL = C.IP_RECVTTL
sysIP_MULTICAST_IF = C.IP_MULTICAST_IF
sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL
sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP
sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP
sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP
sizeofIPMreq = C.sizeof_struct_ip_mreq
)
type ipMreq C.struct_ip_mreq

37
vendor/golang.org/x/net/ipv4/defs_openbsd.go generated vendored Normal file
View File

@ -0,0 +1,37 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// +godefs map struct_in_addr [4]byte /* in_addr */
package ipv4
/*
#include <netinet/in.h>
*/
import "C"
const (
sysIP_OPTIONS = C.IP_OPTIONS
sysIP_HDRINCL = C.IP_HDRINCL
sysIP_TOS = C.IP_TOS
sysIP_TTL = C.IP_TTL
sysIP_RECVOPTS = C.IP_RECVOPTS
sysIP_RECVRETOPTS = C.IP_RECVRETOPTS
sysIP_RECVDSTADDR = C.IP_RECVDSTADDR
sysIP_RETOPTS = C.IP_RETOPTS
sysIP_RECVIF = C.IP_RECVIF
sysIP_RECVTTL = C.IP_RECVTTL
sysIP_MULTICAST_IF = C.IP_MULTICAST_IF
sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL
sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP
sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP
sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP
sizeofIPMreq = C.sizeof_struct_ip_mreq
)
type ipMreq C.struct_ip_mreq

84
vendor/golang.org/x/net/ipv4/defs_solaris.go generated vendored Normal file
View File

@ -0,0 +1,84 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// +godefs map struct_in_addr [4]byte /* in_addr */
package ipv4
/*
#include <sys/socket.h>
#include <netinet/in.h>
*/
import "C"
const (
sysIP_OPTIONS = C.IP_OPTIONS
sysIP_HDRINCL = C.IP_HDRINCL
sysIP_TOS = C.IP_TOS
sysIP_TTL = C.IP_TTL
sysIP_RECVOPTS = C.IP_RECVOPTS
sysIP_RECVRETOPTS = C.IP_RECVRETOPTS
sysIP_RECVDSTADDR = C.IP_RECVDSTADDR
sysIP_RETOPTS = C.IP_RETOPTS
sysIP_RECVIF = C.IP_RECVIF
sysIP_RECVSLLA = C.IP_RECVSLLA
sysIP_RECVTTL = C.IP_RECVTTL
sysIP_MULTICAST_IF = C.IP_MULTICAST_IF
sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL
sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP
sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP
sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP
sysIP_BLOCK_SOURCE = C.IP_BLOCK_SOURCE
sysIP_UNBLOCK_SOURCE = C.IP_UNBLOCK_SOURCE
sysIP_ADD_SOURCE_MEMBERSHIP = C.IP_ADD_SOURCE_MEMBERSHIP
sysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP
sysIP_NEXTHOP = C.IP_NEXTHOP
sysIP_PKTINFO = C.IP_PKTINFO
sysIP_RECVPKTINFO = C.IP_RECVPKTINFO
sysIP_DONTFRAG = C.IP_DONTFRAG
sysIP_BOUND_IF = C.IP_BOUND_IF
sysIP_UNSPEC_SRC = C.IP_UNSPEC_SRC
sysIP_BROADCAST_TTL = C.IP_BROADCAST_TTL
sysIP_DHCPINIT_IF = C.IP_DHCPINIT_IF
sysIP_REUSEADDR = C.IP_REUSEADDR
sysIP_DONTROUTE = C.IP_DONTROUTE
sysIP_BROADCAST = C.IP_BROADCAST
sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP
sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP
sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE
sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE
sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP
sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP
sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage
sizeofSockaddrInet = C.sizeof_struct_sockaddr_in
sizeofInetPktinfo = C.sizeof_struct_in_pktinfo
sizeofIPMreq = C.sizeof_struct_ip_mreq
sizeofIPMreqSource = C.sizeof_struct_ip_mreq_source
sizeofGroupReq = C.sizeof_struct_group_req
sizeofGroupSourceReq = C.sizeof_struct_group_source_req
)
type sockaddrStorage C.struct_sockaddr_storage
type sockaddrInet C.struct_sockaddr_in
type inetPktinfo C.struct_in_pktinfo
type ipMreq C.struct_ip_mreq
type ipMreqSource C.struct_ip_mreq_source
type groupReq C.struct_group_req
type groupSourceReq C.struct_group_source_req

199
vendor/golang.org/x/net/ipv4/gen.go generated vendored Normal file
View File

@ -0,0 +1,199 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
//go:generate go run gen.go
// This program generates system adaptation constants and types,
// internet protocol constants and tables by reading template files
// and IANA protocol registries.
package main
import (
"bytes"
"encoding/xml"
"fmt"
"go/format"
"io"
"io/ioutil"
"net/http"
"os"
"os/exec"
"runtime"
"strconv"
"strings"
)
func main() {
if err := genzsys(); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
if err := geniana(); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}
func genzsys() error {
defs := "defs_" + runtime.GOOS + ".go"
f, err := os.Open(defs)
if err != nil {
if os.IsNotExist(err) {
return nil
}
return err
}
f.Close()
cmd := exec.Command("go", "tool", "cgo", "-godefs", defs)
b, err := cmd.Output()
if err != nil {
return err
}
b, err = format.Source(b)
if err != nil {
return err
}
zsys := "zsys_" + runtime.GOOS + ".go"
switch runtime.GOOS {
case "freebsd", "linux":
zsys = "zsys_" + runtime.GOOS + "_" + runtime.GOARCH + ".go"
}
if err := ioutil.WriteFile(zsys, b, 0644); err != nil {
return err
}
return nil
}
var registries = []struct {
url string
parse func(io.Writer, io.Reader) error
}{
{
"https://www.iana.org/assignments/icmp-parameters/icmp-parameters.xml",
parseICMPv4Parameters,
},
}
func geniana() error {
var bb bytes.Buffer
fmt.Fprintf(&bb, "// go generate gen.go\n")
fmt.Fprintf(&bb, "// Code generated by the command above; DO NOT EDIT.\n\n")
fmt.Fprintf(&bb, "package ipv4\n\n")
for _, r := range registries {
resp, err := http.Get(r.url)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("got HTTP status code %v for %v\n", resp.StatusCode, r.url)
}
if err := r.parse(&bb, resp.Body); err != nil {
return err
}
fmt.Fprintf(&bb, "\n")
}
b, err := format.Source(bb.Bytes())
if err != nil {
return err
}
if err := ioutil.WriteFile("iana.go", b, 0644); err != nil {
return err
}
return nil
}
func parseICMPv4Parameters(w io.Writer, r io.Reader) error {
dec := xml.NewDecoder(r)
var icp icmpv4Parameters
if err := dec.Decode(&icp); err != nil {
return err
}
prs := icp.escape()
fmt.Fprintf(w, "// %s, Updated: %s\n", icp.Title, icp.Updated)
fmt.Fprintf(w, "const (\n")
for _, pr := range prs {
if pr.Descr == "" {
continue
}
fmt.Fprintf(w, "ICMPType%s ICMPType = %d", pr.Descr, pr.Value)
fmt.Fprintf(w, "// %s\n", pr.OrigDescr)
}
fmt.Fprintf(w, ")\n\n")
fmt.Fprintf(w, "// %s, Updated: %s\n", icp.Title, icp.Updated)
fmt.Fprintf(w, "var icmpTypes = map[ICMPType]string{\n")
for _, pr := range prs {
if pr.Descr == "" {
continue
}
fmt.Fprintf(w, "%d: %q,\n", pr.Value, strings.ToLower(pr.OrigDescr))
}
fmt.Fprintf(w, "}\n")
return nil
}
type icmpv4Parameters struct {
XMLName xml.Name `xml:"registry"`
Title string `xml:"title"`
Updated string `xml:"updated"`
Registries []struct {
Title string `xml:"title"`
Records []struct {
Value string `xml:"value"`
Descr string `xml:"description"`
} `xml:"record"`
} `xml:"registry"`
}
type canonICMPv4ParamRecord struct {
OrigDescr string
Descr string
Value int
}
func (icp *icmpv4Parameters) escape() []canonICMPv4ParamRecord {
id := -1
for i, r := range icp.Registries {
if strings.Contains(r.Title, "Type") || strings.Contains(r.Title, "type") {
id = i
break
}
}
if id < 0 {
return nil
}
prs := make([]canonICMPv4ParamRecord, len(icp.Registries[id].Records))
sr := strings.NewReplacer(
"Messages", "",
"Message", "",
"ICMP", "",
"+", "P",
"-", "",
"/", "",
".", "",
" ", "",
)
for i, pr := range icp.Registries[id].Records {
if strings.Contains(pr.Descr, "Reserved") ||
strings.Contains(pr.Descr, "Unassigned") ||
strings.Contains(pr.Descr, "Deprecated") ||
strings.Contains(pr.Descr, "Experiment") ||
strings.Contains(pr.Descr, "experiment") {
continue
}
ss := strings.Split(pr.Descr, "\n")
if len(ss) > 1 {
prs[i].Descr = strings.Join(ss, " ")
} else {
prs[i].Descr = ss[0]
}
s := strings.TrimSpace(prs[i].Descr)
prs[i].OrigDescr = s
prs[i].Descr = sr.Replace(s)
prs[i].Value, _ = strconv.Atoi(pr.Value)
}
return prs
}

82
vendor/golang.org/x/net/ipv6/defs_aix.go generated vendored Normal file
View File

@ -0,0 +1,82 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// +godefs map struct_in6_addr [16]byte /* in6_addr */
package ipv6
/*
#include <sys/param.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <netinet/icmp6.h>
*/
import "C"
const (
sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS
sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF
sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS
sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP
sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP
sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP
sysICMP6_FILTER = C.ICMP6_FILTER
sysIPV6_CHECKSUM = C.IPV6_CHECKSUM
sysIPV6_V6ONLY = C.IPV6_V6ONLY
sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS
sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO
sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT
sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR
sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS
sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS
sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU
sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU
sysIPV6_PATHMTU = C.IPV6_PATHMTU
sysIPV6_PKTINFO = C.IPV6_PKTINFO
sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT
sysIPV6_NEXTHOP = C.IPV6_NEXTHOP
sysIPV6_HOPOPTS = C.IPV6_HOPOPTS
sysIPV6_DSTOPTS = C.IPV6_DSTOPTS
sysIPV6_RTHDR = C.IPV6_RTHDR
sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS
sysIPV6_TCLASS = C.IPV6_TCLASS
sysIPV6_DONTFRAG = C.IPV6_DONTFRAG
sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage
sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo
sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
sizeofGroupReq = C.sizeof_struct_group_req
sizeofGroupSourceReq = C.sizeof_struct_group_source_req
sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
)
type sockaddrStorage C.struct_sockaddr_storage
type sockaddrInet6 C.struct_sockaddr_in6
type inet6Pktinfo C.struct_in6_pktinfo
type ipv6Mtuinfo C.struct_ip6_mtuinfo
type ipv6Mreq C.struct_ipv6_mreq
type icmpv6Filter C.struct_icmp6_filter
type groupReq C.struct_group_req
type groupSourceReq C.struct_group_source_req

112
vendor/golang.org/x/net/ipv6/defs_darwin.go generated vendored Normal file
View File

@ -0,0 +1,112 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// +godefs map struct_in6_addr [16]byte /* in6_addr */
package ipv6
/*
#define __APPLE_USE_RFC_3542
#include <netinet/in.h>
#include <netinet/icmp6.h>
*/
import "C"
const (
sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS
sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF
sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS
sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP
sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP
sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP
sysIPV6_PORTRANGE = C.IPV6_PORTRANGE
sysICMP6_FILTER = C.ICMP6_FILTER
sysIPV6_2292PKTINFO = C.IPV6_2292PKTINFO
sysIPV6_2292HOPLIMIT = C.IPV6_2292HOPLIMIT
sysIPV6_2292NEXTHOP = C.IPV6_2292NEXTHOP
sysIPV6_2292HOPOPTS = C.IPV6_2292HOPOPTS
sysIPV6_2292DSTOPTS = C.IPV6_2292DSTOPTS
sysIPV6_2292RTHDR = C.IPV6_2292RTHDR
sysIPV6_2292PKTOPTIONS = C.IPV6_2292PKTOPTIONS
sysIPV6_CHECKSUM = C.IPV6_CHECKSUM
sysIPV6_V6ONLY = C.IPV6_V6ONLY
sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY
sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS
sysIPV6_TCLASS = C.IPV6_TCLASS
sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS
sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO
sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT
sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR
sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS
sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS
sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU
sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU
sysIPV6_PATHMTU = C.IPV6_PATHMTU
sysIPV6_PKTINFO = C.IPV6_PKTINFO
sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT
sysIPV6_NEXTHOP = C.IPV6_NEXTHOP
sysIPV6_HOPOPTS = C.IPV6_HOPOPTS
sysIPV6_DSTOPTS = C.IPV6_DSTOPTS
sysIPV6_RTHDR = C.IPV6_RTHDR
sysIPV6_AUTOFLOWLABEL = C.IPV6_AUTOFLOWLABEL
sysIPV6_DONTFRAG = C.IPV6_DONTFRAG
sysIPV6_PREFER_TEMPADDR = C.IPV6_PREFER_TEMPADDR
sysIPV6_MSFILTER = C.IPV6_MSFILTER
sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP
sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP
sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP
sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP
sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE
sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE
sysIPV6_BOUND_IF = C.IPV6_BOUND_IF
sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT
sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH
sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW
sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage
sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo
sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
sizeofGroupReq = C.sizeof_struct_group_req
sizeofGroupSourceReq = C.sizeof_struct_group_source_req
sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
)
type sockaddrStorage C.struct_sockaddr_storage
type sockaddrInet6 C.struct_sockaddr_in6
type inet6Pktinfo C.struct_in6_pktinfo
type ipv6Mtuinfo C.struct_ip6_mtuinfo
type ipv6Mreq C.struct_ipv6_mreq
type icmpv6Filter C.struct_icmp6_filter
type groupReq C.struct_group_req
type groupSourceReq C.struct_group_source_req

84
vendor/golang.org/x/net/ipv6/defs_dragonfly.go generated vendored Normal file
View File

@ -0,0 +1,84 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// +godefs map struct_in6_addr [16]byte /* in6_addr */
package ipv6
/*
#include <sys/param.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <netinet/icmp6.h>
*/
import "C"
const (
sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS
sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF
sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS
sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP
sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP
sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP
sysIPV6_PORTRANGE = C.IPV6_PORTRANGE
sysICMP6_FILTER = C.ICMP6_FILTER
sysIPV6_CHECKSUM = C.IPV6_CHECKSUM
sysIPV6_V6ONLY = C.IPV6_V6ONLY
sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY
sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS
sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO
sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT
sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR
sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS
sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS
sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU
sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU
sysIPV6_PATHMTU = C.IPV6_PATHMTU
sysIPV6_PKTINFO = C.IPV6_PKTINFO
sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT
sysIPV6_NEXTHOP = C.IPV6_NEXTHOP
sysIPV6_HOPOPTS = C.IPV6_HOPOPTS
sysIPV6_DSTOPTS = C.IPV6_DSTOPTS
sysIPV6_RTHDR = C.IPV6_RTHDR
sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS
sysIPV6_AUTOFLOWLABEL = C.IPV6_AUTOFLOWLABEL
sysIPV6_TCLASS = C.IPV6_TCLASS
sysIPV6_DONTFRAG = C.IPV6_DONTFRAG
sysIPV6_PREFER_TEMPADDR = C.IPV6_PREFER_TEMPADDR
sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT
sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH
sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW
sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo
sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
)
type sockaddrInet6 C.struct_sockaddr_in6
type inet6Pktinfo C.struct_in6_pktinfo
type ipv6Mtuinfo C.struct_ip6_mtuinfo
type ipv6Mreq C.struct_ipv6_mreq
type icmpv6Filter C.struct_icmp6_filter

105
vendor/golang.org/x/net/ipv6/defs_freebsd.go generated vendored Normal file
View File

@ -0,0 +1,105 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// +godefs map struct_in6_addr [16]byte /* in6_addr */
package ipv6
/*
#include <sys/param.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <netinet/icmp6.h>
*/
import "C"
const (
sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS
sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF
sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS
sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP
sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP
sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP
sysIPV6_PORTRANGE = C.IPV6_PORTRANGE
sysICMP6_FILTER = C.ICMP6_FILTER
sysIPV6_CHECKSUM = C.IPV6_CHECKSUM
sysIPV6_V6ONLY = C.IPV6_V6ONLY
sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY
sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS
sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO
sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT
sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR
sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS
sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS
sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU
sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU
sysIPV6_PATHMTU = C.IPV6_PATHMTU
sysIPV6_PKTINFO = C.IPV6_PKTINFO
sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT
sysIPV6_NEXTHOP = C.IPV6_NEXTHOP
sysIPV6_HOPOPTS = C.IPV6_HOPOPTS
sysIPV6_DSTOPTS = C.IPV6_DSTOPTS
sysIPV6_RTHDR = C.IPV6_RTHDR
sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS
sysIPV6_AUTOFLOWLABEL = C.IPV6_AUTOFLOWLABEL
sysIPV6_TCLASS = C.IPV6_TCLASS
sysIPV6_DONTFRAG = C.IPV6_DONTFRAG
sysIPV6_PREFER_TEMPADDR = C.IPV6_PREFER_TEMPADDR
sysIPV6_BINDANY = C.IPV6_BINDANY
sysIPV6_MSFILTER = C.IPV6_MSFILTER
sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP
sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP
sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP
sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP
sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE
sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE
sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT
sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH
sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW
sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage
sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo
sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
sizeofGroupReq = C.sizeof_struct_group_req
sizeofGroupSourceReq = C.sizeof_struct_group_source_req
sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
)
type sockaddrStorage C.struct_sockaddr_storage
type sockaddrInet6 C.struct_sockaddr_in6
type inet6Pktinfo C.struct_in6_pktinfo
type ipv6Mtuinfo C.struct_ip6_mtuinfo
type ipv6Mreq C.struct_ipv6_mreq
type groupReq C.struct_group_req
type groupSourceReq C.struct_group_source_req
type icmpv6Filter C.struct_icmp6_filter

147
vendor/golang.org/x/net/ipv6/defs_linux.go generated vendored Normal file
View File

@ -0,0 +1,147 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// +godefs map struct_in6_addr [16]byte /* in6_addr */
package ipv6
/*
#include <linux/in.h>
#include <linux/in6.h>
#include <linux/ipv6.h>
#include <linux/icmpv6.h>
#include <linux/filter.h>
#include <sys/socket.h>
*/
import "C"
const (
sysIPV6_ADDRFORM = C.IPV6_ADDRFORM
sysIPV6_2292PKTINFO = C.IPV6_2292PKTINFO
sysIPV6_2292HOPOPTS = C.IPV6_2292HOPOPTS
sysIPV6_2292DSTOPTS = C.IPV6_2292DSTOPTS
sysIPV6_2292RTHDR = C.IPV6_2292RTHDR
sysIPV6_2292PKTOPTIONS = C.IPV6_2292PKTOPTIONS
sysIPV6_CHECKSUM = C.IPV6_CHECKSUM
sysIPV6_2292HOPLIMIT = C.IPV6_2292HOPLIMIT
sysIPV6_NEXTHOP = C.IPV6_NEXTHOP
sysIPV6_FLOWINFO = C.IPV6_FLOWINFO
sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS
sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF
sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS
sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP
sysIPV6_ADD_MEMBERSHIP = C.IPV6_ADD_MEMBERSHIP
sysIPV6_DROP_MEMBERSHIP = C.IPV6_DROP_MEMBERSHIP
sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP
sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP
sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP
sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP
sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE
sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE
sysMCAST_MSFILTER = C.MCAST_MSFILTER
sysIPV6_ROUTER_ALERT = C.IPV6_ROUTER_ALERT
sysIPV6_MTU_DISCOVER = C.IPV6_MTU_DISCOVER
sysIPV6_MTU = C.IPV6_MTU
sysIPV6_RECVERR = C.IPV6_RECVERR
sysIPV6_V6ONLY = C.IPV6_V6ONLY
sysIPV6_JOIN_ANYCAST = C.IPV6_JOIN_ANYCAST
sysIPV6_LEAVE_ANYCAST = C.IPV6_LEAVE_ANYCAST
//sysIPV6_PMTUDISC_DONT = C.IPV6_PMTUDISC_DONT
//sysIPV6_PMTUDISC_WANT = C.IPV6_PMTUDISC_WANT
//sysIPV6_PMTUDISC_DO = C.IPV6_PMTUDISC_DO
//sysIPV6_PMTUDISC_PROBE = C.IPV6_PMTUDISC_PROBE
//sysIPV6_PMTUDISC_INTERFACE = C.IPV6_PMTUDISC_INTERFACE
//sysIPV6_PMTUDISC_OMIT = C.IPV6_PMTUDISC_OMIT
sysIPV6_FLOWLABEL_MGR = C.IPV6_FLOWLABEL_MGR
sysIPV6_FLOWINFO_SEND = C.IPV6_FLOWINFO_SEND
sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY
sysIPV6_XFRM_POLICY = C.IPV6_XFRM_POLICY
sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO
sysIPV6_PKTINFO = C.IPV6_PKTINFO
sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT
sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT
sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS
sysIPV6_HOPOPTS = C.IPV6_HOPOPTS
sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS
sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR
sysIPV6_RTHDR = C.IPV6_RTHDR
sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS
sysIPV6_DSTOPTS = C.IPV6_DSTOPTS
sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU
sysIPV6_PATHMTU = C.IPV6_PATHMTU
sysIPV6_DONTFRAG = C.IPV6_DONTFRAG
sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS
sysIPV6_TCLASS = C.IPV6_TCLASS
sysIPV6_ADDR_PREFERENCES = C.IPV6_ADDR_PREFERENCES
sysIPV6_PREFER_SRC_TMP = C.IPV6_PREFER_SRC_TMP
sysIPV6_PREFER_SRC_PUBLIC = C.IPV6_PREFER_SRC_PUBLIC
sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = C.IPV6_PREFER_SRC_PUBTMP_DEFAULT
sysIPV6_PREFER_SRC_COA = C.IPV6_PREFER_SRC_COA
sysIPV6_PREFER_SRC_HOME = C.IPV6_PREFER_SRC_HOME
sysIPV6_PREFER_SRC_CGA = C.IPV6_PREFER_SRC_CGA
sysIPV6_PREFER_SRC_NONCGA = C.IPV6_PREFER_SRC_NONCGA
sysIPV6_MINHOPCOUNT = C.IPV6_MINHOPCOUNT
sysIPV6_ORIGDSTADDR = C.IPV6_ORIGDSTADDR
sysIPV6_RECVORIGDSTADDR = C.IPV6_RECVORIGDSTADDR
sysIPV6_TRANSPARENT = C.IPV6_TRANSPARENT
sysIPV6_UNICAST_IF = C.IPV6_UNICAST_IF
sysICMPV6_FILTER = C.ICMPV6_FILTER
sysICMPV6_FILTER_BLOCK = C.ICMPV6_FILTER_BLOCK
sysICMPV6_FILTER_PASS = C.ICMPV6_FILTER_PASS
sysICMPV6_FILTER_BLOCKOTHERS = C.ICMPV6_FILTER_BLOCKOTHERS
sysICMPV6_FILTER_PASSONLY = C.ICMPV6_FILTER_PASSONLY
sysSOL_SOCKET = C.SOL_SOCKET
sysSO_ATTACH_FILTER = C.SO_ATTACH_FILTER
sizeofKernelSockaddrStorage = C.sizeof_struct___kernel_sockaddr_storage
sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo
sizeofIPv6FlowlabelReq = C.sizeof_struct_in6_flowlabel_req
sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
sizeofGroupReq = C.sizeof_struct_group_req
sizeofGroupSourceReq = C.sizeof_struct_group_source_req
sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
sizeofSockFprog = C.sizeof_struct_sock_fprog
)
type kernelSockaddrStorage C.struct___kernel_sockaddr_storage
type sockaddrInet6 C.struct_sockaddr_in6
type inet6Pktinfo C.struct_in6_pktinfo
type ipv6Mtuinfo C.struct_ip6_mtuinfo
type ipv6FlowlabelReq C.struct_in6_flowlabel_req
type ipv6Mreq C.struct_ipv6_mreq
type groupReq C.struct_group_req
type groupSourceReq C.struct_group_source_req
type icmpv6Filter C.struct_icmp6_filter
type sockFProg C.struct_sock_fprog
type sockFilter C.struct_sock_filter

80
vendor/golang.org/x/net/ipv6/defs_netbsd.go generated vendored Normal file
View File

@ -0,0 +1,80 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// +godefs map struct_in6_addr [16]byte /* in6_addr */
package ipv6
/*
#include <sys/param.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <netinet/icmp6.h>
*/
import "C"
const (
sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS
sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF
sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS
sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP
sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP
sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP
sysIPV6_PORTRANGE = C.IPV6_PORTRANGE
sysICMP6_FILTER = C.ICMP6_FILTER
sysIPV6_CHECKSUM = C.IPV6_CHECKSUM
sysIPV6_V6ONLY = C.IPV6_V6ONLY
sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY
sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS
sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO
sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT
sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR
sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS
sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS
sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU
sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU
sysIPV6_PATHMTU = C.IPV6_PATHMTU
sysIPV6_PKTINFO = C.IPV6_PKTINFO
sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT
sysIPV6_NEXTHOP = C.IPV6_NEXTHOP
sysIPV6_HOPOPTS = C.IPV6_HOPOPTS
sysIPV6_DSTOPTS = C.IPV6_DSTOPTS
sysIPV6_RTHDR = C.IPV6_RTHDR
sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS
sysIPV6_TCLASS = C.IPV6_TCLASS
sysIPV6_DONTFRAG = C.IPV6_DONTFRAG
sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT
sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH
sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW
sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo
sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
)
type sockaddrInet6 C.struct_sockaddr_in6
type inet6Pktinfo C.struct_in6_pktinfo
type ipv6Mtuinfo C.struct_ip6_mtuinfo
type ipv6Mreq C.struct_ipv6_mreq
type icmpv6Filter C.struct_icmp6_filter

89
vendor/golang.org/x/net/ipv6/defs_openbsd.go generated vendored Normal file
View File

@ -0,0 +1,89 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// +godefs map struct_in6_addr [16]byte /* in6_addr */
package ipv6
/*
#include <sys/param.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <netinet/icmp6.h>
*/
import "C"
const (
sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS
sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF
sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS
sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP
sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP
sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP
sysIPV6_PORTRANGE = C.IPV6_PORTRANGE
sysICMP6_FILTER = C.ICMP6_FILTER
sysIPV6_CHECKSUM = C.IPV6_CHECKSUM
sysIPV6_V6ONLY = C.IPV6_V6ONLY
sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS
sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO
sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT
sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR
sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS
sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS
sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU
sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU
sysIPV6_PATHMTU = C.IPV6_PATHMTU
sysIPV6_PKTINFO = C.IPV6_PKTINFO
sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT
sysIPV6_NEXTHOP = C.IPV6_NEXTHOP
sysIPV6_HOPOPTS = C.IPV6_HOPOPTS
sysIPV6_DSTOPTS = C.IPV6_DSTOPTS
sysIPV6_RTHDR = C.IPV6_RTHDR
sysIPV6_AUTH_LEVEL = C.IPV6_AUTH_LEVEL
sysIPV6_ESP_TRANS_LEVEL = C.IPV6_ESP_TRANS_LEVEL
sysIPV6_ESP_NETWORK_LEVEL = C.IPV6_ESP_NETWORK_LEVEL
sysIPSEC6_OUTSA = C.IPSEC6_OUTSA
sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS
sysIPV6_AUTOFLOWLABEL = C.IPV6_AUTOFLOWLABEL
sysIPV6_IPCOMP_LEVEL = C.IPV6_IPCOMP_LEVEL
sysIPV6_TCLASS = C.IPV6_TCLASS
sysIPV6_DONTFRAG = C.IPV6_DONTFRAG
sysIPV6_PIPEX = C.IPV6_PIPEX
sysIPV6_RTABLE = C.IPV6_RTABLE
sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT
sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH
sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW
sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo
sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
)
type sockaddrInet6 C.struct_sockaddr_in6
type inet6Pktinfo C.struct_in6_pktinfo
type ipv6Mtuinfo C.struct_ip6_mtuinfo
type ipv6Mreq C.struct_ipv6_mreq
type icmpv6Filter C.struct_icmp6_filter

114
vendor/golang.org/x/net/ipv6/defs_solaris.go generated vendored Normal file
View File

@ -0,0 +1,114 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// +godefs map struct_in6_addr [16]byte /* in6_addr */
package ipv6
/*
#include <sys/socket.h>
#include <netinet/in.h>
#include <netinet/icmp6.h>
*/
import "C"
const (
sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS
sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF
sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS
sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP
sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP
sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP
sysIPV6_PKTINFO = C.IPV6_PKTINFO
sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT
sysIPV6_NEXTHOP = C.IPV6_NEXTHOP
sysIPV6_HOPOPTS = C.IPV6_HOPOPTS
sysIPV6_DSTOPTS = C.IPV6_DSTOPTS
sysIPV6_RTHDR = C.IPV6_RTHDR
sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS
sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO
sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT
sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS
sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR
sysIPV6_RECVRTHDRDSTOPTS = C.IPV6_RECVRTHDRDSTOPTS
sysIPV6_CHECKSUM = C.IPV6_CHECKSUM
sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS
sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU
sysIPV6_DONTFRAG = C.IPV6_DONTFRAG
sysIPV6_SEC_OPT = C.IPV6_SEC_OPT
sysIPV6_SRC_PREFERENCES = C.IPV6_SRC_PREFERENCES
sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU
sysIPV6_PATHMTU = C.IPV6_PATHMTU
sysIPV6_TCLASS = C.IPV6_TCLASS
sysIPV6_V6ONLY = C.IPV6_V6ONLY
sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS
sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP
sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP
sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE
sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE
sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP
sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP
sysIPV6_PREFER_SRC_HOME = C.IPV6_PREFER_SRC_HOME
sysIPV6_PREFER_SRC_COA = C.IPV6_PREFER_SRC_COA
sysIPV6_PREFER_SRC_PUBLIC = C.IPV6_PREFER_SRC_PUBLIC
sysIPV6_PREFER_SRC_TMP = C.IPV6_PREFER_SRC_TMP
sysIPV6_PREFER_SRC_NONCGA = C.IPV6_PREFER_SRC_NONCGA
sysIPV6_PREFER_SRC_CGA = C.IPV6_PREFER_SRC_CGA
sysIPV6_PREFER_SRC_MIPMASK = C.IPV6_PREFER_SRC_MIPMASK
sysIPV6_PREFER_SRC_MIPDEFAULT = C.IPV6_PREFER_SRC_MIPDEFAULT
sysIPV6_PREFER_SRC_TMPMASK = C.IPV6_PREFER_SRC_TMPMASK
sysIPV6_PREFER_SRC_TMPDEFAULT = C.IPV6_PREFER_SRC_TMPDEFAULT
sysIPV6_PREFER_SRC_CGAMASK = C.IPV6_PREFER_SRC_CGAMASK
sysIPV6_PREFER_SRC_CGADEFAULT = C.IPV6_PREFER_SRC_CGADEFAULT
sysIPV6_PREFER_SRC_MASK = C.IPV6_PREFER_SRC_MASK
sysIPV6_PREFER_SRC_DEFAULT = C.IPV6_PREFER_SRC_DEFAULT
sysIPV6_BOUND_IF = C.IPV6_BOUND_IF
sysIPV6_UNSPEC_SRC = C.IPV6_UNSPEC_SRC
sysICMP6_FILTER = C.ICMP6_FILTER
sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage
sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo
sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
sizeofGroupReq = C.sizeof_struct_group_req
sizeofGroupSourceReq = C.sizeof_struct_group_source_req
sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
)
type sockaddrStorage C.struct_sockaddr_storage
type sockaddrInet6 C.struct_sockaddr_in6
type inet6Pktinfo C.struct_in6_pktinfo
type ipv6Mtuinfo C.struct_ip6_mtuinfo
type ipv6Mreq C.struct_ipv6_mreq
type groupReq C.struct_group_req
type groupSourceReq C.struct_group_source_req
type icmpv6Filter C.struct_icmp6_filter

199
vendor/golang.org/x/net/ipv6/gen.go generated vendored Normal file
View File

@ -0,0 +1,199 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
//go:generate go run gen.go
// This program generates system adaptation constants and types,
// internet protocol constants and tables by reading template files
// and IANA protocol registries.
package main
import (
"bytes"
"encoding/xml"
"fmt"
"go/format"
"io"
"io/ioutil"
"net/http"
"os"
"os/exec"
"runtime"
"strconv"
"strings"
)
func main() {
if err := genzsys(); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
if err := geniana(); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}
func genzsys() error {
defs := "defs_" + runtime.GOOS + ".go"
f, err := os.Open(defs)
if err != nil {
if os.IsNotExist(err) {
return nil
}
return err
}
f.Close()
cmd := exec.Command("go", "tool", "cgo", "-godefs", defs)
b, err := cmd.Output()
if err != nil {
return err
}
b, err = format.Source(b)
if err != nil {
return err
}
zsys := "zsys_" + runtime.GOOS + ".go"
switch runtime.GOOS {
case "freebsd", "linux":
zsys = "zsys_" + runtime.GOOS + "_" + runtime.GOARCH + ".go"
}
if err := ioutil.WriteFile(zsys, b, 0644); err != nil {
return err
}
return nil
}
var registries = []struct {
url string
parse func(io.Writer, io.Reader) error
}{
{
"https://www.iana.org/assignments/icmpv6-parameters/icmpv6-parameters.xml",
parseICMPv6Parameters,
},
}
func geniana() error {
var bb bytes.Buffer
fmt.Fprintf(&bb, "// go generate gen.go\n")
fmt.Fprintf(&bb, "// Code generated by the command above; DO NOT EDIT.\n\n")
fmt.Fprintf(&bb, "package ipv6\n\n")
for _, r := range registries {
resp, err := http.Get(r.url)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("got HTTP status code %v for %v\n", resp.StatusCode, r.url)
}
if err := r.parse(&bb, resp.Body); err != nil {
return err
}
fmt.Fprintf(&bb, "\n")
}
b, err := format.Source(bb.Bytes())
if err != nil {
return err
}
if err := ioutil.WriteFile("iana.go", b, 0644); err != nil {
return err
}
return nil
}
func parseICMPv6Parameters(w io.Writer, r io.Reader) error {
dec := xml.NewDecoder(r)
var icp icmpv6Parameters
if err := dec.Decode(&icp); err != nil {
return err
}
prs := icp.escape()
fmt.Fprintf(w, "// %s, Updated: %s\n", icp.Title, icp.Updated)
fmt.Fprintf(w, "const (\n")
for _, pr := range prs {
if pr.Name == "" {
continue
}
fmt.Fprintf(w, "ICMPType%s ICMPType = %d", pr.Name, pr.Value)
fmt.Fprintf(w, "// %s\n", pr.OrigName)
}
fmt.Fprintf(w, ")\n\n")
fmt.Fprintf(w, "// %s, Updated: %s\n", icp.Title, icp.Updated)
fmt.Fprintf(w, "var icmpTypes = map[ICMPType]string{\n")
for _, pr := range prs {
if pr.Name == "" {
continue
}
fmt.Fprintf(w, "%d: %q,\n", pr.Value, strings.ToLower(pr.OrigName))
}
fmt.Fprintf(w, "}\n")
return nil
}
type icmpv6Parameters struct {
XMLName xml.Name `xml:"registry"`
Title string `xml:"title"`
Updated string `xml:"updated"`
Registries []struct {
Title string `xml:"title"`
Records []struct {
Value string `xml:"value"`
Name string `xml:"name"`
} `xml:"record"`
} `xml:"registry"`
}
type canonICMPv6ParamRecord struct {
OrigName string
Name string
Value int
}
func (icp *icmpv6Parameters) escape() []canonICMPv6ParamRecord {
id := -1
for i, r := range icp.Registries {
if strings.Contains(r.Title, "Type") || strings.Contains(r.Title, "type") {
id = i
break
}
}
if id < 0 {
return nil
}
prs := make([]canonICMPv6ParamRecord, len(icp.Registries[id].Records))
sr := strings.NewReplacer(
"Messages", "",
"Message", "",
"ICMP", "",
"+", "P",
"-", "",
"/", "",
".", "",
" ", "",
)
for i, pr := range icp.Registries[id].Records {
if strings.Contains(pr.Name, "Reserved") ||
strings.Contains(pr.Name, "Unassigned") ||
strings.Contains(pr.Name, "Deprecated") ||
strings.Contains(pr.Name, "Experiment") ||
strings.Contains(pr.Name, "experiment") {
continue
}
ss := strings.Split(pr.Name, "\n")
if len(ss) > 1 {
prs[i].Name = strings.Join(ss, " ")
} else {
prs[i].Name = ss[0]
}
s := strings.TrimSpace(prs[i].Name)
prs[i].OrigName = s
prs[i].Name = sr.Replace(s)
prs[i].Value, _ = strconv.Atoi(pr.Value)
}
return prs
}

78
vendor/golang.org/x/sys/unix/mkasm_darwin.go generated vendored Normal file
View File

@ -0,0 +1,78 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// mkasm_darwin.go generates assembly trampolines to call libSystem routines from Go.
//This program must be run after mksyscall.go.
package main
import (
"bytes"
"fmt"
"io/ioutil"
"log"
"os"
"strings"
)
func writeASMFile(in string, fileName string, buildTags string) {
trampolines := map[string]bool{}
var out bytes.Buffer
fmt.Fprintf(&out, "// go run mkasm_darwin.go %s\n", strings.Join(os.Args[1:], " "))
fmt.Fprintf(&out, "// Code generated by the command above; DO NOT EDIT.\n")
fmt.Fprintf(&out, "\n")
fmt.Fprintf(&out, "// +build %s\n", buildTags)
fmt.Fprintf(&out, "\n")
fmt.Fprintf(&out, "#include \"textflag.h\"\n")
for _, line := range strings.Split(in, "\n") {
if !strings.HasPrefix(line, "func ") || !strings.HasSuffix(line, "_trampoline()") {
continue
}
fn := line[5 : len(line)-13]
if !trampolines[fn] {
trampolines[fn] = true
fmt.Fprintf(&out, "TEXT ·%s_trampoline(SB),NOSPLIT,$0-0\n", fn)
fmt.Fprintf(&out, "\tJMP\t%s(SB)\n", fn)
}
}
err := ioutil.WriteFile(fileName, out.Bytes(), 0644)
if err != nil {
log.Fatalf("can't write %s: %s", fileName, err)
}
}
func main() {
in1, err := ioutil.ReadFile("syscall_darwin.go")
if err != nil {
log.Fatalf("can't open syscall_darwin.go: %s", err)
}
arch := os.Args[1]
in2, err := ioutil.ReadFile(fmt.Sprintf("syscall_darwin_%s.go", arch))
if err != nil {
log.Fatalf("can't open syscall_darwin_%s.go: %s", arch, err)
}
in3, err := ioutil.ReadFile(fmt.Sprintf("zsyscall_darwin_%s.go", arch))
if err != nil {
log.Fatalf("can't open zsyscall_darwin_%s.go: %s", arch, err)
}
in := string(in1) + string(in2) + string(in3)
writeASMFile(in, fmt.Sprintf("zsyscall_darwin_%s.s", arch), "go1.12")
in1, err = ioutil.ReadFile("syscall_darwin.1_13.go")
if err != nil {
log.Fatalf("can't open syscall_darwin.1_13.go: %s", err)
}
in2, err = ioutil.ReadFile(fmt.Sprintf("zsyscall_darwin_%s.1_13.go", arch))
if err != nil {
log.Fatalf("can't open zsyscall_darwin_%s.1_13.go: %s", arch, err)
}
in = string(in1) + string(in2)
writeASMFile(in, fmt.Sprintf("zsyscall_darwin_%s.1_13.s", arch), "go1.13")
}

122
vendor/golang.org/x/sys/unix/mkpost.go generated vendored Normal file
View File

@ -0,0 +1,122 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// mkpost processes the output of cgo -godefs to
// modify the generated types. It is used to clean up
// the sys API in an architecture specific manner.
//
// mkpost is run after cgo -godefs; see README.md.
package main
import (
"bytes"
"fmt"
"go/format"
"io/ioutil"
"log"
"os"
"regexp"
)
func main() {
// Get the OS and architecture (using GOARCH_TARGET if it exists)
goos := os.Getenv("GOOS")
goarch := os.Getenv("GOARCH_TARGET")
if goarch == "" {
goarch = os.Getenv("GOARCH")
}
// Check that we are using the Docker-based build system if we should be.
if goos == "linux" {
if os.Getenv("GOLANG_SYS_BUILD") != "docker" {
os.Stderr.WriteString("In the Docker-based build system, mkpost should not be called directly.\n")
os.Stderr.WriteString("See README.md\n")
os.Exit(1)
}
}
b, err := ioutil.ReadAll(os.Stdin)
if err != nil {
log.Fatal(err)
}
if goos == "aix" {
// Replace type of Atim, Mtim and Ctim by Timespec in Stat_t
// to avoid having both StTimespec and Timespec.
sttimespec := regexp.MustCompile(`_Ctype_struct_st_timespec`)
b = sttimespec.ReplaceAll(b, []byte("Timespec"))
}
// Intentionally export __val fields in Fsid and Sigset_t
valRegex := regexp.MustCompile(`type (Fsid|Sigset_t) struct {(\s+)X__(bits|val)(\s+\S+\s+)}`)
b = valRegex.ReplaceAll(b, []byte("type $1 struct {${2}Val$4}"))
// Intentionally export __fds_bits field in FdSet
fdSetRegex := regexp.MustCompile(`type (FdSet) struct {(\s+)X__fds_bits(\s+\S+\s+)}`)
b = fdSetRegex.ReplaceAll(b, []byte("type $1 struct {${2}Bits$3}"))
// If we have empty Ptrace structs, we should delete them. Only s390x emits
// nonempty Ptrace structs.
ptraceRexexp := regexp.MustCompile(`type Ptrace((Psw|Fpregs|Per) struct {\s*})`)
b = ptraceRexexp.ReplaceAll(b, nil)
// Replace the control_regs union with a blank identifier for now.
controlRegsRegex := regexp.MustCompile(`(Control_regs)\s+\[0\]uint64`)
b = controlRegsRegex.ReplaceAll(b, []byte("_ [0]uint64"))
// Remove fields that are added by glibc
// Note that this is unstable as the identifers are private.
removeFieldsRegex := regexp.MustCompile(`X__glibc\S*`)
b = removeFieldsRegex.ReplaceAll(b, []byte("_"))
// Convert [65]int8 to [65]byte in Utsname members to simplify
// conversion to string; see golang.org/issue/20753
convertUtsnameRegex := regexp.MustCompile(`((Sys|Node|Domain)name|Release|Version|Machine)(\s+)\[(\d+)\]u?int8`)
b = convertUtsnameRegex.ReplaceAll(b, []byte("$1$3[$4]byte"))
// Convert [1024]int8 to [1024]byte in Ptmget members
convertPtmget := regexp.MustCompile(`([SC]n)(\s+)\[(\d+)\]u?int8`)
b = convertPtmget.ReplaceAll(b, []byte("$1[$3]byte"))
// Remove spare fields (e.g. in Statx_t)
spareFieldsRegex := regexp.MustCompile(`X__spare\S*`)
b = spareFieldsRegex.ReplaceAll(b, []byte("_"))
// Remove cgo padding fields
removePaddingFieldsRegex := regexp.MustCompile(`Pad_cgo_\d+`)
b = removePaddingFieldsRegex.ReplaceAll(b, []byte("_"))
// Remove padding, hidden, or unused fields
removeFieldsRegex = regexp.MustCompile(`\b(X_\S+|Padding)`)
b = removeFieldsRegex.ReplaceAll(b, []byte("_"))
// Remove the first line of warning from cgo
b = b[bytes.IndexByte(b, '\n')+1:]
// Modify the command in the header to include:
// mkpost, our own warning, and a build tag.
replacement := fmt.Sprintf(`$1 | go run mkpost.go
// Code generated by the command above; see README.md. DO NOT EDIT.
// +build %s,%s`, goarch, goos)
cgoCommandRegex := regexp.MustCompile(`(cgo -godefs .*)`)
b = cgoCommandRegex.ReplaceAll(b, []byte(replacement))
// Rename Stat_t time fields
if goos == "freebsd" && goarch == "386" {
// Hide Stat_t.[AMCB]tim_ext fields
renameStatTimeExtFieldsRegex := regexp.MustCompile(`[AMCB]tim_ext`)
b = renameStatTimeExtFieldsRegex.ReplaceAll(b, []byte("_"))
}
renameStatTimeFieldsRegex := regexp.MustCompile(`([AMCB])(?:irth)?time?(?:spec)?\s+(Timespec|StTimespec)`)
b = renameStatTimeFieldsRegex.ReplaceAll(b, []byte("${1}tim ${2}"))
// gofmt
b, err = format.Source(b)
if err != nil {
log.Fatal(err)
}
os.Stdout.Write(b)
}

402
vendor/golang.org/x/sys/unix/mksyscall.go generated vendored Normal file
View File

@ -0,0 +1,402 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
/*
This program reads a file containing function prototypes
(like syscall_darwin.go) and generates system call bodies.
The prototypes are marked by lines beginning with "//sys"
and read like func declarations if //sys is replaced by func, but:
* The parameter lists must give a name for each argument.
This includes return parameters.
* The parameter lists must give a type for each argument:
the (x, y, z int) shorthand is not allowed.
* If the return parameter is an error number, it must be named errno.
A line beginning with //sysnb is like //sys, except that the
goroutine will not be suspended during the execution of the system
call. This must only be used for system calls which can never
block, as otherwise the system call could cause all goroutines to
hang.
*/
package main
import (
"bufio"
"flag"
"fmt"
"os"
"regexp"
"strings"
)
var (
b32 = flag.Bool("b32", false, "32bit big-endian")
l32 = flag.Bool("l32", false, "32bit little-endian")
plan9 = flag.Bool("plan9", false, "plan9")
openbsd = flag.Bool("openbsd", false, "openbsd")
netbsd = flag.Bool("netbsd", false, "netbsd")
dragonfly = flag.Bool("dragonfly", false, "dragonfly")
arm = flag.Bool("arm", false, "arm") // 64-bit value should use (even, odd)-pair
tags = flag.String("tags", "", "build tags")
filename = flag.String("output", "", "output file name (standard output if omitted)")
)
// cmdLine returns this programs's commandline arguments
func cmdLine() string {
return "go run mksyscall.go " + strings.Join(os.Args[1:], " ")
}
// buildTags returns build tags
func buildTags() string {
return *tags
}
// Param is function parameter
type Param struct {
Name string
Type string
}
// usage prints the program usage
func usage() {
fmt.Fprintf(os.Stderr, "usage: go run mksyscall.go [-b32 | -l32] [-tags x,y] [file ...]\n")
os.Exit(1)
}
// parseParamList parses parameter list and returns a slice of parameters
func parseParamList(list string) []string {
list = strings.TrimSpace(list)
if list == "" {
return []string{}
}
return regexp.MustCompile(`\s*,\s*`).Split(list, -1)
}
// parseParam splits a parameter into name and type
func parseParam(p string) Param {
ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p)
if ps == nil {
fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p)
os.Exit(1)
}
return Param{ps[1], ps[2]}
}
func main() {
// Get the OS and architecture (using GOARCH_TARGET if it exists)
goos := os.Getenv("GOOS")
if goos == "" {
fmt.Fprintln(os.Stderr, "GOOS not defined in environment")
os.Exit(1)
}
goarch := os.Getenv("GOARCH_TARGET")
if goarch == "" {
goarch = os.Getenv("GOARCH")
}
// Check that we are using the Docker-based build system if we should
if goos == "linux" {
if os.Getenv("GOLANG_SYS_BUILD") != "docker" {
fmt.Fprintf(os.Stderr, "In the Docker-based build system, mksyscall should not be called directly.\n")
fmt.Fprintf(os.Stderr, "See README.md\n")
os.Exit(1)
}
}
flag.Usage = usage
flag.Parse()
if len(flag.Args()) <= 0 {
fmt.Fprintf(os.Stderr, "no files to parse provided\n")
usage()
}
endianness := ""
if *b32 {
endianness = "big-endian"
} else if *l32 {
endianness = "little-endian"
}
libc := false
if goos == "darwin" && (strings.Contains(buildTags(), ",go1.12") || strings.Contains(buildTags(), ",go1.13")) {
libc = true
}
trampolines := map[string]bool{}
text := ""
for _, path := range flag.Args() {
file, err := os.Open(path)
if err != nil {
fmt.Fprintf(os.Stderr, err.Error())
os.Exit(1)
}
s := bufio.NewScanner(file)
for s.Scan() {
t := s.Text()
t = strings.TrimSpace(t)
t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `)
nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t)
if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil {
continue
}
// Line must be of the form
// func Open(path string, mode int, perm int) (fd int, errno error)
// Split into name, in params, out params.
f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*((?i)SYS_[A-Z0-9_]+))?$`).FindStringSubmatch(t)
if f == nil {
fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t)
os.Exit(1)
}
funct, inps, outps, sysname := f[2], f[3], f[4], f[5]
// ClockGettime doesn't have a syscall number on Darwin, only generate libc wrappers.
if goos == "darwin" && !libc && funct == "ClockGettime" {
continue
}
// Split argument lists on comma.
in := parseParamList(inps)
out := parseParamList(outps)
// Try in vain to keep people from editing this file.
// The theory is that they jump into the middle of the file
// without reading the header.
text += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n"
// Go function header.
outDecl := ""
if len(out) > 0 {
outDecl = fmt.Sprintf(" (%s)", strings.Join(out, ", "))
}
text += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outDecl)
// Check if err return available
errvar := ""
for _, param := range out {
p := parseParam(param)
if p.Type == "error" {
errvar = p.Name
break
}
}
// Prepare arguments to Syscall.
var args []string
n := 0
for _, param := range in {
p := parseParam(param)
if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil {
args = append(args, "uintptr(unsafe.Pointer("+p.Name+"))")
} else if p.Type == "string" && errvar != "" {
text += fmt.Sprintf("\tvar _p%d *byte\n", n)
text += fmt.Sprintf("\t_p%d, %s = BytePtrFromString(%s)\n", n, errvar, p.Name)
text += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar)
args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n))
n++
} else if p.Type == "string" {
fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n")
text += fmt.Sprintf("\tvar _p%d *byte\n", n)
text += fmt.Sprintf("\t_p%d, _ = BytePtrFromString(%s)\n", n, p.Name)
args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n))
n++
} else if regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type) != nil {
// Convert slice into pointer, length.
// Have to be careful not to take address of &a[0] if len == 0:
// pass dummy pointer in that case.
// Used to pass nil, but some OSes or simulators reject write(fd, nil, 0).
text += fmt.Sprintf("\tvar _p%d unsafe.Pointer\n", n)
text += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = unsafe.Pointer(&%s[0])\n\t}", p.Name, n, p.Name)
text += fmt.Sprintf(" else {\n\t\t_p%d = unsafe.Pointer(&_zero)\n\t}\n", n)
args = append(args, fmt.Sprintf("uintptr(_p%d)", n), fmt.Sprintf("uintptr(len(%s))", p.Name))
n++
} else if p.Type == "int64" && (*openbsd || *netbsd) {
args = append(args, "0")
if endianness == "big-endian" {
args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name))
} else if endianness == "little-endian" {
args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name))
} else {
args = append(args, fmt.Sprintf("uintptr(%s)", p.Name))
}
} else if p.Type == "int64" && *dragonfly {
if regexp.MustCompile(`^(?i)extp(read|write)`).FindStringSubmatch(funct) == nil {
args = append(args, "0")
}
if endianness == "big-endian" {
args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name))
} else if endianness == "little-endian" {
args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name))
} else {
args = append(args, fmt.Sprintf("uintptr(%s)", p.Name))
}
} else if (p.Type == "int64" || p.Type == "uint64") && endianness != "" {
if len(args)%2 == 1 && *arm {
// arm abi specifies 64-bit argument uses
// (even, odd) pair
args = append(args, "0")
}
if endianness == "big-endian" {
args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name))
} else {
args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name))
}
} else {
args = append(args, fmt.Sprintf("uintptr(%s)", p.Name))
}
}
// Determine which form to use; pad args with zeros.
asm := "Syscall"
if nonblock != nil {
if errvar == "" && goos == "linux" {
asm = "RawSyscallNoError"
} else {
asm = "RawSyscall"
}
} else {
if errvar == "" && goos == "linux" {
asm = "SyscallNoError"
}
}
if len(args) <= 3 {
for len(args) < 3 {
args = append(args, "0")
}
} else if len(args) <= 6 {
asm += "6"
for len(args) < 6 {
args = append(args, "0")
}
} else if len(args) <= 9 {
asm += "9"
for len(args) < 9 {
args = append(args, "0")
}
} else {
fmt.Fprintf(os.Stderr, "%s:%s too many arguments to system call\n", path, funct)
}
// System call number.
if sysname == "" {
sysname = "SYS_" + funct
sysname = regexp.MustCompile(`([a-z])([A-Z])`).ReplaceAllString(sysname, `${1}_$2`)
sysname = strings.ToUpper(sysname)
}
var libcFn string
if libc {
asm = "syscall_" + strings.ToLower(asm[:1]) + asm[1:] // internal syscall call
sysname = strings.TrimPrefix(sysname, "SYS_") // remove SYS_
sysname = strings.ToLower(sysname) // lowercase
libcFn = sysname
sysname = "funcPC(libc_" + sysname + "_trampoline)"
}
// Actual call.
arglist := strings.Join(args, ", ")
call := fmt.Sprintf("%s(%s, %s)", asm, sysname, arglist)
// Assign return values.
body := ""
ret := []string{"_", "_", "_"}
doErrno := false
for i := 0; i < len(out); i++ {
p := parseParam(out[i])
reg := ""
if p.Name == "err" && !*plan9 {
reg = "e1"
ret[2] = reg
doErrno = true
} else if p.Name == "err" && *plan9 {
ret[0] = "r0"
ret[2] = "e1"
break
} else {
reg = fmt.Sprintf("r%d", i)
ret[i] = reg
}
if p.Type == "bool" {
reg = fmt.Sprintf("%s != 0", reg)
}
if p.Type == "int64" && endianness != "" {
// 64-bit number in r1:r0 or r0:r1.
if i+2 > len(out) {
fmt.Fprintf(os.Stderr, "%s:%s not enough registers for int64 return\n", path, funct)
}
if endianness == "big-endian" {
reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i, i+1)
} else {
reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i+1, i)
}
ret[i] = fmt.Sprintf("r%d", i)
ret[i+1] = fmt.Sprintf("r%d", i+1)
}
if reg != "e1" || *plan9 {
body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg)
}
}
if ret[0] == "_" && ret[1] == "_" && ret[2] == "_" {
text += fmt.Sprintf("\t%s\n", call)
} else {
if errvar == "" && goos == "linux" {
// raw syscall without error on Linux, see golang.org/issue/22924
text += fmt.Sprintf("\t%s, %s := %s\n", ret[0], ret[1], call)
} else {
text += fmt.Sprintf("\t%s, %s, %s := %s\n", ret[0], ret[1], ret[2], call)
}
}
text += body
if *plan9 && ret[2] == "e1" {
text += "\tif int32(r0) == -1 {\n"
text += "\t\terr = e1\n"
text += "\t}\n"
} else if doErrno {
text += "\tif e1 != 0 {\n"
text += "\t\terr = errnoErr(e1)\n"
text += "\t}\n"
}
text += "\treturn\n"
text += "}\n\n"
if libc && !trampolines[libcFn] {
// some system calls share a trampoline, like read and readlen.
trampolines[libcFn] = true
// Declare assembly trampoline.
text += fmt.Sprintf("func libc_%s_trampoline()\n", libcFn)
// Assembly trampoline calls the libc_* function, which this magic
// redirects to use the function from libSystem.
text += fmt.Sprintf("//go:linkname libc_%s libc_%s\n", libcFn, libcFn)
text += fmt.Sprintf("//go:cgo_import_dynamic libc_%s %s \"/usr/lib/libSystem.B.dylib\"\n", libcFn, libcFn)
text += "\n"
}
}
if err := s.Err(); err != nil {
fmt.Fprintf(os.Stderr, err.Error())
os.Exit(1)
}
file.Close()
}
fmt.Printf(srcTemplate, cmdLine(), buildTags(), text)
}
const srcTemplate = `// %s
// Code generated by the command above; see README.md. DO NOT EDIT.
// +build %s
package unix
import (
"syscall"
"unsafe"
)
var _ syscall.Errno
%s
`

415
vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go generated vendored Normal file
View File

@ -0,0 +1,415 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
/*
This program reads a file containing function prototypes
(like syscall_aix.go) and generates system call bodies.
The prototypes are marked by lines beginning with "//sys"
and read like func declarations if //sys is replaced by func, but:
* The parameter lists must give a name for each argument.
This includes return parameters.
* The parameter lists must give a type for each argument:
the (x, y, z int) shorthand is not allowed.
* If the return parameter is an error number, it must be named err.
* If go func name needs to be different than its libc name,
* or the function is not in libc, name could be specified
* at the end, after "=" sign, like
//sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt
*/
package main
import (
"bufio"
"flag"
"fmt"
"os"
"regexp"
"strings"
)
var (
b32 = flag.Bool("b32", false, "32bit big-endian")
l32 = flag.Bool("l32", false, "32bit little-endian")
aix = flag.Bool("aix", false, "aix")
tags = flag.String("tags", "", "build tags")
)
// cmdLine returns this programs's commandline arguments
func cmdLine() string {
return "go run mksyscall_aix_ppc.go " + strings.Join(os.Args[1:], " ")
}
// buildTags returns build tags
func buildTags() string {
return *tags
}
// Param is function parameter
type Param struct {
Name string
Type string
}
// usage prints the program usage
func usage() {
fmt.Fprintf(os.Stderr, "usage: go run mksyscall_aix_ppc.go [-b32 | -l32] [-tags x,y] [file ...]\n")
os.Exit(1)
}
// parseParamList parses parameter list and returns a slice of parameters
func parseParamList(list string) []string {
list = strings.TrimSpace(list)
if list == "" {
return []string{}
}
return regexp.MustCompile(`\s*,\s*`).Split(list, -1)
}
// parseParam splits a parameter into name and type
func parseParam(p string) Param {
ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p)
if ps == nil {
fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p)
os.Exit(1)
}
return Param{ps[1], ps[2]}
}
func main() {
flag.Usage = usage
flag.Parse()
if len(flag.Args()) <= 0 {
fmt.Fprintf(os.Stderr, "no files to parse provided\n")
usage()
}
endianness := ""
if *b32 {
endianness = "big-endian"
} else if *l32 {
endianness = "little-endian"
}
pack := ""
text := ""
cExtern := "/*\n#include <stdint.h>\n#include <stddef.h>\n"
for _, path := range flag.Args() {
file, err := os.Open(path)
if err != nil {
fmt.Fprintf(os.Stderr, err.Error())
os.Exit(1)
}
s := bufio.NewScanner(file)
for s.Scan() {
t := s.Text()
t = strings.TrimSpace(t)
t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `)
if p := regexp.MustCompile(`^package (\S+)$`).FindStringSubmatch(t); p != nil && pack == "" {
pack = p[1]
}
nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t)
if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil {
continue
}
// Line must be of the form
// func Open(path string, mode int, perm int) (fd int, err error)
// Split into name, in params, out params.
f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$`).FindStringSubmatch(t)
if f == nil {
fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t)
os.Exit(1)
}
funct, inps, outps, modname, sysname := f[2], f[3], f[4], f[5], f[6]
// Split argument lists on comma.
in := parseParamList(inps)
out := parseParamList(outps)
inps = strings.Join(in, ", ")
outps = strings.Join(out, ", ")
// Try in vain to keep people from editing this file.
// The theory is that they jump into the middle of the file
// without reading the header.
text += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n"
// Check if value return, err return available
errvar := ""
retvar := ""
rettype := ""
for _, param := range out {
p := parseParam(param)
if p.Type == "error" {
errvar = p.Name
} else {
retvar = p.Name
rettype = p.Type
}
}
// System call name.
if sysname == "" {
sysname = funct
}
sysname = regexp.MustCompile(`([a-z])([A-Z])`).ReplaceAllString(sysname, `${1}_$2`)
sysname = strings.ToLower(sysname) // All libc functions are lowercase.
cRettype := ""
if rettype == "unsafe.Pointer" {
cRettype = "uintptr_t"
} else if rettype == "uintptr" {
cRettype = "uintptr_t"
} else if regexp.MustCompile(`^_`).FindStringSubmatch(rettype) != nil {
cRettype = "uintptr_t"
} else if rettype == "int" {
cRettype = "int"
} else if rettype == "int32" {
cRettype = "int"
} else if rettype == "int64" {
cRettype = "long long"
} else if rettype == "uint32" {
cRettype = "unsigned int"
} else if rettype == "uint64" {
cRettype = "unsigned long long"
} else {
cRettype = "int"
}
if sysname == "exit" {
cRettype = "void"
}
// Change p.Types to c
var cIn []string
for _, param := range in {
p := parseParam(param)
if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil {
cIn = append(cIn, "uintptr_t")
} else if p.Type == "string" {
cIn = append(cIn, "uintptr_t")
} else if regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type) != nil {
cIn = append(cIn, "uintptr_t", "size_t")
} else if p.Type == "unsafe.Pointer" {
cIn = append(cIn, "uintptr_t")
} else if p.Type == "uintptr" {
cIn = append(cIn, "uintptr_t")
} else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil {
cIn = append(cIn, "uintptr_t")
} else if p.Type == "int" {
cIn = append(cIn, "int")
} else if p.Type == "int32" {
cIn = append(cIn, "int")
} else if p.Type == "int64" {
cIn = append(cIn, "long long")
} else if p.Type == "uint32" {
cIn = append(cIn, "unsigned int")
} else if p.Type == "uint64" {
cIn = append(cIn, "unsigned long long")
} else {
cIn = append(cIn, "int")
}
}
if funct != "fcntl" && funct != "FcntlInt" && funct != "readlen" && funct != "writelen" {
if sysname == "select" {
// select is a keyword of Go. Its name is
// changed to c_select.
cExtern += "#define c_select select\n"
}
// Imports of system calls from libc
cExtern += fmt.Sprintf("%s %s", cRettype, sysname)
cIn := strings.Join(cIn, ", ")
cExtern += fmt.Sprintf("(%s);\n", cIn)
}
// So file name.
if *aix {
if modname == "" {
modname = "libc.a/shr_64.o"
} else {
fmt.Fprintf(os.Stderr, "%s: only syscall using libc are available\n", funct)
os.Exit(1)
}
}
strconvfunc := "C.CString"
// Go function header.
if outps != "" {
outps = fmt.Sprintf(" (%s)", outps)
}
if text != "" {
text += "\n"
}
text += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outps)
// Prepare arguments to Syscall.
var args []string
n := 0
argN := 0
for _, param := range in {
p := parseParam(param)
if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil {
args = append(args, "C.uintptr_t(uintptr(unsafe.Pointer("+p.Name+")))")
} else if p.Type == "string" && errvar != "" {
text += fmt.Sprintf("\t_p%d := uintptr(unsafe.Pointer(%s(%s)))\n", n, strconvfunc, p.Name)
args = append(args, fmt.Sprintf("C.uintptr_t(_p%d)", n))
n++
} else if p.Type == "string" {
fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n")
text += fmt.Sprintf("\t_p%d := uintptr(unsafe.Pointer(%s(%s)))\n", n, strconvfunc, p.Name)
args = append(args, fmt.Sprintf("C.uintptr_t(_p%d)", n))
n++
} else if m := regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type); m != nil {
// Convert slice into pointer, length.
// Have to be careful not to take address of &a[0] if len == 0:
// pass nil in that case.
text += fmt.Sprintf("\tvar _p%d *%s\n", n, m[1])
text += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = &%s[0]\n\t}\n", p.Name, n, p.Name)
args = append(args, fmt.Sprintf("C.uintptr_t(uintptr(unsafe.Pointer(_p%d)))", n))
n++
text += fmt.Sprintf("\tvar _p%d int\n", n)
text += fmt.Sprintf("\t_p%d = len(%s)\n", n, p.Name)
args = append(args, fmt.Sprintf("C.size_t(_p%d)", n))
n++
} else if p.Type == "int64" && endianness != "" {
if endianness == "big-endian" {
args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name))
} else {
args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name))
}
n++
} else if p.Type == "bool" {
text += fmt.Sprintf("\tvar _p%d uint32\n", n)
text += fmt.Sprintf("\tif %s {\n\t\t_p%d = 1\n\t} else {\n\t\t_p%d = 0\n\t}\n", p.Name, n, n)
args = append(args, fmt.Sprintf("_p%d", n))
} else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil {
args = append(args, fmt.Sprintf("C.uintptr_t(uintptr(%s))", p.Name))
} else if p.Type == "unsafe.Pointer" {
args = append(args, fmt.Sprintf("C.uintptr_t(uintptr(%s))", p.Name))
} else if p.Type == "int" {
if (argN == 2) && ((funct == "readlen") || (funct == "writelen")) {
args = append(args, fmt.Sprintf("C.size_t(%s)", p.Name))
} else if argN == 0 && funct == "fcntl" {
args = append(args, fmt.Sprintf("C.uintptr_t(%s)", p.Name))
} else if (argN == 2) && ((funct == "fcntl") || (funct == "FcntlInt")) {
args = append(args, fmt.Sprintf("C.uintptr_t(%s)", p.Name))
} else {
args = append(args, fmt.Sprintf("C.int(%s)", p.Name))
}
} else if p.Type == "int32" {
args = append(args, fmt.Sprintf("C.int(%s)", p.Name))
} else if p.Type == "int64" {
args = append(args, fmt.Sprintf("C.longlong(%s)", p.Name))
} else if p.Type == "uint32" {
args = append(args, fmt.Sprintf("C.uint(%s)", p.Name))
} else if p.Type == "uint64" {
args = append(args, fmt.Sprintf("C.ulonglong(%s)", p.Name))
} else if p.Type == "uintptr" {
args = append(args, fmt.Sprintf("C.uintptr_t(%s)", p.Name))
} else {
args = append(args, fmt.Sprintf("C.int(%s)", p.Name))
}
argN++
}
// Actual call.
arglist := strings.Join(args, ", ")
call := ""
if sysname == "exit" {
if errvar != "" {
call += "er :="
} else {
call += ""
}
} else if errvar != "" {
call += "r0,er :="
} else if retvar != "" {
call += "r0,_ :="
} else {
call += ""
}
if sysname == "select" {
// select is a keyword of Go. Its name is
// changed to c_select.
call += fmt.Sprintf("C.c_%s(%s)", sysname, arglist)
} else {
call += fmt.Sprintf("C.%s(%s)", sysname, arglist)
}
// Assign return values.
body := ""
for i := 0; i < len(out); i++ {
p := parseParam(out[i])
reg := ""
if p.Name == "err" {
reg = "e1"
} else {
reg = "r0"
}
if reg != "e1" {
body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg)
}
}
// verify return
if sysname != "exit" && errvar != "" {
if regexp.MustCompile(`^uintptr`).FindStringSubmatch(cRettype) != nil {
body += "\tif (uintptr(r0) ==^uintptr(0) && er != nil) {\n"
body += fmt.Sprintf("\t\t%s = er\n", errvar)
body += "\t}\n"
} else {
body += "\tif (r0 ==-1 && er != nil) {\n"
body += fmt.Sprintf("\t\t%s = er\n", errvar)
body += "\t}\n"
}
} else if errvar != "" {
body += "\tif (er != nil) {\n"
body += fmt.Sprintf("\t\t%s = er\n", errvar)
body += "\t}\n"
}
text += fmt.Sprintf("\t%s\n", call)
text += body
text += "\treturn\n"
text += "}\n"
}
if err := s.Err(); err != nil {
fmt.Fprintf(os.Stderr, err.Error())
os.Exit(1)
}
file.Close()
}
imp := ""
if pack != "unix" {
imp = "import \"golang.org/x/sys/unix\"\n"
}
fmt.Printf(srcTemplate, cmdLine(), buildTags(), pack, cExtern, imp, text)
}
const srcTemplate = `// %s
// Code generated by the command above; see README.md. DO NOT EDIT.
// +build %s
package %s
%s
*/
import "C"
import (
"unsafe"
)
%s
%s
`

614
vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go generated vendored Normal file
View File

@ -0,0 +1,614 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
/*
This program reads a file containing function prototypes
(like syscall_aix.go) and generates system call bodies.
The prototypes are marked by lines beginning with "//sys"
and read like func declarations if //sys is replaced by func, but:
* The parameter lists must give a name for each argument.
This includes return parameters.
* The parameter lists must give a type for each argument:
the (x, y, z int) shorthand is not allowed.
* If the return parameter is an error number, it must be named err.
* If go func name needs to be different than its libc name,
* or the function is not in libc, name could be specified
* at the end, after "=" sign, like
//sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt
This program will generate three files and handle both gc and gccgo implementation:
- zsyscall_aix_ppc64.go: the common part of each implementation (error handler, pointer creation)
- zsyscall_aix_ppc64_gc.go: gc part with //go_cgo_import_dynamic and a call to syscall6
- zsyscall_aix_ppc64_gccgo.go: gccgo part with C function and conversion to C type.
The generated code looks like this
zsyscall_aix_ppc64.go
func asyscall(...) (n int, err error) {
// Pointer Creation
r1, e1 := callasyscall(...)
// Type Conversion
// Error Handler
return
}
zsyscall_aix_ppc64_gc.go
//go:cgo_import_dynamic libc_asyscall asyscall "libc.a/shr_64.o"
//go:linkname libc_asyscall libc_asyscall
var asyscall syscallFunc
func callasyscall(...) (r1 uintptr, e1 Errno) {
r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_asyscall)), "nb_args", ... )
return
}
zsyscall_aix_ppc64_ggcgo.go
// int asyscall(...)
import "C"
func callasyscall(...) (r1 uintptr, e1 Errno) {
r1 = uintptr(C.asyscall(...))
e1 = syscall.GetErrno()
return
}
*/
package main
import (
"bufio"
"flag"
"fmt"
"io/ioutil"
"os"
"regexp"
"strings"
)
var (
b32 = flag.Bool("b32", false, "32bit big-endian")
l32 = flag.Bool("l32", false, "32bit little-endian")
aix = flag.Bool("aix", false, "aix")
tags = flag.String("tags", "", "build tags")
)
// cmdLine returns this programs's commandline arguments
func cmdLine() string {
return "go run mksyscall_aix_ppc64.go " + strings.Join(os.Args[1:], " ")
}
// buildTags returns build tags
func buildTags() string {
return *tags
}
// Param is function parameter
type Param struct {
Name string
Type string
}
// usage prints the program usage
func usage() {
fmt.Fprintf(os.Stderr, "usage: go run mksyscall_aix_ppc64.go [-b32 | -l32] [-tags x,y] [file ...]\n")
os.Exit(1)
}
// parseParamList parses parameter list and returns a slice of parameters
func parseParamList(list string) []string {
list = strings.TrimSpace(list)
if list == "" {
return []string{}
}
return regexp.MustCompile(`\s*,\s*`).Split(list, -1)
}
// parseParam splits a parameter into name and type
func parseParam(p string) Param {
ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p)
if ps == nil {
fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p)
os.Exit(1)
}
return Param{ps[1], ps[2]}
}
func main() {
flag.Usage = usage
flag.Parse()
if len(flag.Args()) <= 0 {
fmt.Fprintf(os.Stderr, "no files to parse provided\n")
usage()
}
endianness := ""
if *b32 {
endianness = "big-endian"
} else if *l32 {
endianness = "little-endian"
}
pack := ""
// GCCGO
textgccgo := ""
cExtern := "/*\n#include <stdint.h>\n"
// GC
textgc := ""
dynimports := ""
linknames := ""
var vars []string
// COMMON
textcommon := ""
for _, path := range flag.Args() {
file, err := os.Open(path)
if err != nil {
fmt.Fprintf(os.Stderr, err.Error())
os.Exit(1)
}
s := bufio.NewScanner(file)
for s.Scan() {
t := s.Text()
t = strings.TrimSpace(t)
t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `)
if p := regexp.MustCompile(`^package (\S+)$`).FindStringSubmatch(t); p != nil && pack == "" {
pack = p[1]
}
nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t)
if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil {
continue
}
// Line must be of the form
// func Open(path string, mode int, perm int) (fd int, err error)
// Split into name, in params, out params.
f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$`).FindStringSubmatch(t)
if f == nil {
fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t)
os.Exit(1)
}
funct, inps, outps, modname, sysname := f[2], f[3], f[4], f[5], f[6]
// Split argument lists on comma.
in := parseParamList(inps)
out := parseParamList(outps)
inps = strings.Join(in, ", ")
outps = strings.Join(out, ", ")
if sysname == "" {
sysname = funct
}
onlyCommon := false
if funct == "readlen" || funct == "writelen" || funct == "FcntlInt" || funct == "FcntlFlock" {
// This function call another syscall which is already implemented.
// Therefore, the gc and gccgo part must not be generated.
onlyCommon = true
}
// Try in vain to keep people from editing this file.
// The theory is that they jump into the middle of the file
// without reading the header.
textcommon += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n"
if !onlyCommon {
textgccgo += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n"
textgc += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n"
}
// Check if value return, err return available
errvar := ""
rettype := ""
for _, param := range out {
p := parseParam(param)
if p.Type == "error" {
errvar = p.Name
} else {
rettype = p.Type
}
}
sysname = regexp.MustCompile(`([a-z])([A-Z])`).ReplaceAllString(sysname, `${1}_$2`)
sysname = strings.ToLower(sysname) // All libc functions are lowercase.
// GCCGO Prototype return type
cRettype := ""
if rettype == "unsafe.Pointer" {
cRettype = "uintptr_t"
} else if rettype == "uintptr" {
cRettype = "uintptr_t"
} else if regexp.MustCompile(`^_`).FindStringSubmatch(rettype) != nil {
cRettype = "uintptr_t"
} else if rettype == "int" {
cRettype = "int"
} else if rettype == "int32" {
cRettype = "int"
} else if rettype == "int64" {
cRettype = "long long"
} else if rettype == "uint32" {
cRettype = "unsigned int"
} else if rettype == "uint64" {
cRettype = "unsigned long long"
} else {
cRettype = "int"
}
if sysname == "exit" {
cRettype = "void"
}
// GCCGO Prototype arguments type
var cIn []string
for i, param := range in {
p := parseParam(param)
if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil {
cIn = append(cIn, "uintptr_t")
} else if p.Type == "string" {
cIn = append(cIn, "uintptr_t")
} else if regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type) != nil {
cIn = append(cIn, "uintptr_t", "size_t")
} else if p.Type == "unsafe.Pointer" {
cIn = append(cIn, "uintptr_t")
} else if p.Type == "uintptr" {
cIn = append(cIn, "uintptr_t")
} else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil {
cIn = append(cIn, "uintptr_t")
} else if p.Type == "int" {
if (i == 0 || i == 2) && funct == "fcntl" {
// These fcntl arguments needs to be uintptr to be able to call FcntlInt and FcntlFlock
cIn = append(cIn, "uintptr_t")
} else {
cIn = append(cIn, "int")
}
} else if p.Type == "int32" {
cIn = append(cIn, "int")
} else if p.Type == "int64" {
cIn = append(cIn, "long long")
} else if p.Type == "uint32" {
cIn = append(cIn, "unsigned int")
} else if p.Type == "uint64" {
cIn = append(cIn, "unsigned long long")
} else {
cIn = append(cIn, "int")
}
}
if !onlyCommon {
// GCCGO Prototype Generation
// Imports of system calls from libc
if sysname == "select" {
// select is a keyword of Go. Its name is
// changed to c_select.
cExtern += "#define c_select select\n"
}
cExtern += fmt.Sprintf("%s %s", cRettype, sysname)
cIn := strings.Join(cIn, ", ")
cExtern += fmt.Sprintf("(%s);\n", cIn)
}
// GC Library name
if modname == "" {
modname = "libc.a/shr_64.o"
} else {
fmt.Fprintf(os.Stderr, "%s: only syscall using libc are available\n", funct)
os.Exit(1)
}
sysvarname := fmt.Sprintf("libc_%s", sysname)
if !onlyCommon {
// GC Runtime import of function to allow cross-platform builds.
dynimports += fmt.Sprintf("//go:cgo_import_dynamic %s %s \"%s\"\n", sysvarname, sysname, modname)
// GC Link symbol to proc address variable.
linknames += fmt.Sprintf("//go:linkname %s %s\n", sysvarname, sysvarname)
// GC Library proc address variable.
vars = append(vars, sysvarname)
}
strconvfunc := "BytePtrFromString"
strconvtype := "*byte"
// Go function header.
if outps != "" {
outps = fmt.Sprintf(" (%s)", outps)
}
if textcommon != "" {
textcommon += "\n"
}
textcommon += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outps)
// Prepare arguments tocall.
var argscommon []string // Arguments in the common part
var argscall []string // Arguments for call prototype
var argsgc []string // Arguments for gc call (with syscall6)
var argsgccgo []string // Arguments for gccgo call (with C.name_of_syscall)
n := 0
argN := 0
for _, param := range in {
p := parseParam(param)
if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil {
argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(%s))", p.Name))
argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name))
argsgc = append(argsgc, p.Name)
argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name))
} else if p.Type == "string" && errvar != "" {
textcommon += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype)
textcommon += fmt.Sprintf("\t_p%d, %s = %s(%s)\n", n, errvar, strconvfunc, p.Name)
textcommon += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar)
argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n))
argscall = append(argscall, fmt.Sprintf("_p%d uintptr ", n))
argsgc = append(argsgc, fmt.Sprintf("_p%d", n))
argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(_p%d)", n))
n++
} else if p.Type == "string" {
fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n")
textcommon += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype)
textcommon += fmt.Sprintf("\t_p%d, %s = %s(%s)\n", n, errvar, strconvfunc, p.Name)
textcommon += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar)
argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n))
argscall = append(argscall, fmt.Sprintf("_p%d uintptr", n))
argsgc = append(argsgc, fmt.Sprintf("_p%d", n))
argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(_p%d)", n))
n++
} else if m := regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type); m != nil {
// Convert slice into pointer, length.
// Have to be careful not to take address of &a[0] if len == 0:
// pass nil in that case.
textcommon += fmt.Sprintf("\tvar _p%d *%s\n", n, m[1])
textcommon += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = &%s[0]\n\t}\n", p.Name, n, p.Name)
argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n), fmt.Sprintf("len(%s)", p.Name))
argscall = append(argscall, fmt.Sprintf("_p%d uintptr", n), fmt.Sprintf("_lenp%d int", n))
argsgc = append(argsgc, fmt.Sprintf("_p%d", n), fmt.Sprintf("uintptr(_lenp%d)", n))
argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(_p%d)", n), fmt.Sprintf("C.size_t(_lenp%d)", n))
n++
} else if p.Type == "int64" && endianness != "" {
fmt.Fprintf(os.Stderr, path+":"+funct+" uses int64 with 32 bits mode. Case not yet implemented\n")
} else if p.Type == "bool" {
fmt.Fprintf(os.Stderr, path+":"+funct+" uses bool. Case not yet implemented\n")
} else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil || p.Type == "unsafe.Pointer" {
argscommon = append(argscommon, fmt.Sprintf("uintptr(%s)", p.Name))
argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name))
argsgc = append(argsgc, p.Name)
argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name))
} else if p.Type == "int" {
if (argN == 0 || argN == 2) && ((funct == "fcntl") || (funct == "FcntlInt") || (funct == "FcntlFlock")) {
// These fcntl arguments need to be uintptr to be able to call FcntlInt and FcntlFlock
argscommon = append(argscommon, fmt.Sprintf("uintptr(%s)", p.Name))
argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name))
argsgc = append(argsgc, p.Name)
argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name))
} else {
argscommon = append(argscommon, p.Name)
argscall = append(argscall, fmt.Sprintf("%s int", p.Name))
argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name))
argsgccgo = append(argsgccgo, fmt.Sprintf("C.int(%s)", p.Name))
}
} else if p.Type == "int32" {
argscommon = append(argscommon, p.Name)
argscall = append(argscall, fmt.Sprintf("%s int32", p.Name))
argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name))
argsgccgo = append(argsgccgo, fmt.Sprintf("C.int(%s)", p.Name))
} else if p.Type == "int64" {
argscommon = append(argscommon, p.Name)
argscall = append(argscall, fmt.Sprintf("%s int64", p.Name))
argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name))
argsgccgo = append(argsgccgo, fmt.Sprintf("C.longlong(%s)", p.Name))
} else if p.Type == "uint32" {
argscommon = append(argscommon, p.Name)
argscall = append(argscall, fmt.Sprintf("%s uint32", p.Name))
argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name))
argsgccgo = append(argsgccgo, fmt.Sprintf("C.uint(%s)", p.Name))
} else if p.Type == "uint64" {
argscommon = append(argscommon, p.Name)
argscall = append(argscall, fmt.Sprintf("%s uint64", p.Name))
argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name))
argsgccgo = append(argsgccgo, fmt.Sprintf("C.ulonglong(%s)", p.Name))
} else if p.Type == "uintptr" {
argscommon = append(argscommon, p.Name)
argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name))
argsgc = append(argsgc, p.Name)
argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name))
} else {
argscommon = append(argscommon, fmt.Sprintf("int(%s)", p.Name))
argscall = append(argscall, fmt.Sprintf("%s int", p.Name))
argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name))
argsgccgo = append(argsgccgo, fmt.Sprintf("C.int(%s)", p.Name))
}
argN++
}
nargs := len(argsgc)
// COMMON function generation
argscommonlist := strings.Join(argscommon, ", ")
callcommon := fmt.Sprintf("call%s(%s)", sysname, argscommonlist)
ret := []string{"_", "_"}
body := ""
doErrno := false
for i := 0; i < len(out); i++ {
p := parseParam(out[i])
reg := ""
if p.Name == "err" {
reg = "e1"
ret[1] = reg
doErrno = true
} else {
reg = "r0"
ret[0] = reg
}
if p.Type == "bool" {
reg = fmt.Sprintf("%s != 0", reg)
}
if reg != "e1" {
body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg)
}
}
if ret[0] == "_" && ret[1] == "_" {
textcommon += fmt.Sprintf("\t%s\n", callcommon)
} else {
textcommon += fmt.Sprintf("\t%s, %s := %s\n", ret[0], ret[1], callcommon)
}
textcommon += body
if doErrno {
textcommon += "\tif e1 != 0 {\n"
textcommon += "\t\terr = errnoErr(e1)\n"
textcommon += "\t}\n"
}
textcommon += "\treturn\n"
textcommon += "}\n"
if onlyCommon {
continue
}
// CALL Prototype
callProto := fmt.Sprintf("func call%s(%s) (r1 uintptr, e1 Errno) {\n", sysname, strings.Join(argscall, ", "))
// GC function generation
asm := "syscall6"
if nonblock != nil {
asm = "rawSyscall6"
}
if len(argsgc) <= 6 {
for len(argsgc) < 6 {
argsgc = append(argsgc, "0")
}
} else {
fmt.Fprintf(os.Stderr, "%s: too many arguments to system call", funct)
os.Exit(1)
}
argsgclist := strings.Join(argsgc, ", ")
callgc := fmt.Sprintf("%s(uintptr(unsafe.Pointer(&%s)), %d, %s)", asm, sysvarname, nargs, argsgclist)
textgc += callProto
textgc += fmt.Sprintf("\tr1, _, e1 = %s\n", callgc)
textgc += "\treturn\n}\n"
// GCCGO function generation
argsgccgolist := strings.Join(argsgccgo, ", ")
var callgccgo string
if sysname == "select" {
// select is a keyword of Go. Its name is
// changed to c_select.
callgccgo = fmt.Sprintf("C.c_%s(%s)", sysname, argsgccgolist)
} else {
callgccgo = fmt.Sprintf("C.%s(%s)", sysname, argsgccgolist)
}
textgccgo += callProto
textgccgo += fmt.Sprintf("\tr1 = uintptr(%s)\n", callgccgo)
textgccgo += "\te1 = syscall.GetErrno()\n"
textgccgo += "\treturn\n}\n"
}
if err := s.Err(); err != nil {
fmt.Fprintf(os.Stderr, err.Error())
os.Exit(1)
}
file.Close()
}
imp := ""
if pack != "unix" {
imp = "import \"golang.org/x/sys/unix\"\n"
}
// Print zsyscall_aix_ppc64.go
err := ioutil.WriteFile("zsyscall_aix_ppc64.go",
[]byte(fmt.Sprintf(srcTemplate1, cmdLine(), buildTags(), pack, imp, textcommon)),
0644)
if err != nil {
fmt.Fprintf(os.Stderr, err.Error())
os.Exit(1)
}
// Print zsyscall_aix_ppc64_gc.go
vardecls := "\t" + strings.Join(vars, ",\n\t")
vardecls += " syscallFunc"
err = ioutil.WriteFile("zsyscall_aix_ppc64_gc.go",
[]byte(fmt.Sprintf(srcTemplate2, cmdLine(), buildTags(), pack, imp, dynimports, linknames, vardecls, textgc)),
0644)
if err != nil {
fmt.Fprintf(os.Stderr, err.Error())
os.Exit(1)
}
// Print zsyscall_aix_ppc64_gccgo.go
err = ioutil.WriteFile("zsyscall_aix_ppc64_gccgo.go",
[]byte(fmt.Sprintf(srcTemplate3, cmdLine(), buildTags(), pack, cExtern, imp, textgccgo)),
0644)
if err != nil {
fmt.Fprintf(os.Stderr, err.Error())
os.Exit(1)
}
}
const srcTemplate1 = `// %s
// Code generated by the command above; see README.md. DO NOT EDIT.
// +build %s
package %s
import (
"unsafe"
)
%s
%s
`
const srcTemplate2 = `// %s
// Code generated by the command above; see README.md. DO NOT EDIT.
// +build %s
// +build !gccgo
package %s
import (
"unsafe"
)
%s
%s
%s
type syscallFunc uintptr
var (
%s
)
// Implemented in runtime/syscall_aix.go.
func rawSyscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno)
func syscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno)
%s
`
const srcTemplate3 = `// %s
// Code generated by the command above; see README.md. DO NOT EDIT.
// +build %s
// +build gccgo
package %s
%s
*/
import "C"
import (
"syscall"
)
%s
%s
`

335
vendor/golang.org/x/sys/unix/mksyscall_solaris.go generated vendored Normal file
View File

@ -0,0 +1,335 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
/*
This program reads a file containing function prototypes
(like syscall_solaris.go) and generates system call bodies.
The prototypes are marked by lines beginning with "//sys"
and read like func declarations if //sys is replaced by func, but:
* The parameter lists must give a name for each argument.
This includes return parameters.
* The parameter lists must give a type for each argument:
the (x, y, z int) shorthand is not allowed.
* If the return parameter is an error number, it must be named err.
* If go func name needs to be different than its libc name,
* or the function is not in libc, name could be specified
* at the end, after "=" sign, like
//sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt
*/
package main
import (
"bufio"
"flag"
"fmt"
"os"
"regexp"
"strings"
)
var (
b32 = flag.Bool("b32", false, "32bit big-endian")
l32 = flag.Bool("l32", false, "32bit little-endian")
tags = flag.String("tags", "", "build tags")
)
// cmdLine returns this programs's commandline arguments
func cmdLine() string {
return "go run mksyscall_solaris.go " + strings.Join(os.Args[1:], " ")
}
// buildTags returns build tags
func buildTags() string {
return *tags
}
// Param is function parameter
type Param struct {
Name string
Type string
}
// usage prints the program usage
func usage() {
fmt.Fprintf(os.Stderr, "usage: go run mksyscall_solaris.go [-b32 | -l32] [-tags x,y] [file ...]\n")
os.Exit(1)
}
// parseParamList parses parameter list and returns a slice of parameters
func parseParamList(list string) []string {
list = strings.TrimSpace(list)
if list == "" {
return []string{}
}
return regexp.MustCompile(`\s*,\s*`).Split(list, -1)
}
// parseParam splits a parameter into name and type
func parseParam(p string) Param {
ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p)
if ps == nil {
fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p)
os.Exit(1)
}
return Param{ps[1], ps[2]}
}
func main() {
flag.Usage = usage
flag.Parse()
if len(flag.Args()) <= 0 {
fmt.Fprintf(os.Stderr, "no files to parse provided\n")
usage()
}
endianness := ""
if *b32 {
endianness = "big-endian"
} else if *l32 {
endianness = "little-endian"
}
pack := ""
text := ""
dynimports := ""
linknames := ""
var vars []string
for _, path := range flag.Args() {
file, err := os.Open(path)
if err != nil {
fmt.Fprintf(os.Stderr, err.Error())
os.Exit(1)
}
s := bufio.NewScanner(file)
for s.Scan() {
t := s.Text()
t = strings.TrimSpace(t)
t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `)
if p := regexp.MustCompile(`^package (\S+)$`).FindStringSubmatch(t); p != nil && pack == "" {
pack = p[1]
}
nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t)
if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil {
continue
}
// Line must be of the form
// func Open(path string, mode int, perm int) (fd int, err error)
// Split into name, in params, out params.
f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$`).FindStringSubmatch(t)
if f == nil {
fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t)
os.Exit(1)
}
funct, inps, outps, modname, sysname := f[2], f[3], f[4], f[5], f[6]
// Split argument lists on comma.
in := parseParamList(inps)
out := parseParamList(outps)
inps = strings.Join(in, ", ")
outps = strings.Join(out, ", ")
// Try in vain to keep people from editing this file.
// The theory is that they jump into the middle of the file
// without reading the header.
text += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n"
// So file name.
if modname == "" {
modname = "libc"
}
// System call name.
if sysname == "" {
sysname = funct
}
// System call pointer variable name.
sysvarname := fmt.Sprintf("proc%s", sysname)
strconvfunc := "BytePtrFromString"
strconvtype := "*byte"
sysname = strings.ToLower(sysname) // All libc functions are lowercase.
// Runtime import of function to allow cross-platform builds.
dynimports += fmt.Sprintf("//go:cgo_import_dynamic libc_%s %s \"%s.so\"\n", sysname, sysname, modname)
// Link symbol to proc address variable.
linknames += fmt.Sprintf("//go:linkname %s libc_%s\n", sysvarname, sysname)
// Library proc address variable.
vars = append(vars, sysvarname)
// Go function header.
outlist := strings.Join(out, ", ")
if outlist != "" {
outlist = fmt.Sprintf(" (%s)", outlist)
}
if text != "" {
text += "\n"
}
text += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outlist)
// Check if err return available
errvar := ""
for _, param := range out {
p := parseParam(param)
if p.Type == "error" {
errvar = p.Name
continue
}
}
// Prepare arguments to Syscall.
var args []string
n := 0
for _, param := range in {
p := parseParam(param)
if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil {
args = append(args, "uintptr(unsafe.Pointer("+p.Name+"))")
} else if p.Type == "string" && errvar != "" {
text += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype)
text += fmt.Sprintf("\t_p%d, %s = %s(%s)\n", n, errvar, strconvfunc, p.Name)
text += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar)
args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n))
n++
} else if p.Type == "string" {
fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n")
text += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype)
text += fmt.Sprintf("\t_p%d, _ = %s(%s)\n", n, strconvfunc, p.Name)
args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n))
n++
} else if s := regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type); s != nil {
// Convert slice into pointer, length.
// Have to be careful not to take address of &a[0] if len == 0:
// pass nil in that case.
text += fmt.Sprintf("\tvar _p%d *%s\n", n, s[1])
text += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = &%s[0]\n\t}\n", p.Name, n, p.Name)
args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n), fmt.Sprintf("uintptr(len(%s))", p.Name))
n++
} else if p.Type == "int64" && endianness != "" {
if endianness == "big-endian" {
args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name))
} else {
args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name))
}
} else if p.Type == "bool" {
text += fmt.Sprintf("\tvar _p%d uint32\n", n)
text += fmt.Sprintf("\tif %s {\n\t\t_p%d = 1\n\t} else {\n\t\t_p%d = 0\n\t}\n", p.Name, n, n)
args = append(args, fmt.Sprintf("uintptr(_p%d)", n))
n++
} else {
args = append(args, fmt.Sprintf("uintptr(%s)", p.Name))
}
}
nargs := len(args)
// Determine which form to use; pad args with zeros.
asm := "sysvicall6"
if nonblock != nil {
asm = "rawSysvicall6"
}
if len(args) <= 6 {
for len(args) < 6 {
args = append(args, "0")
}
} else {
fmt.Fprintf(os.Stderr, "%s: too many arguments to system call\n", path)
os.Exit(1)
}
// Actual call.
arglist := strings.Join(args, ", ")
call := fmt.Sprintf("%s(uintptr(unsafe.Pointer(&%s)), %d, %s)", asm, sysvarname, nargs, arglist)
// Assign return values.
body := ""
ret := []string{"_", "_", "_"}
doErrno := false
for i := 0; i < len(out); i++ {
p := parseParam(out[i])
reg := ""
if p.Name == "err" {
reg = "e1"
ret[2] = reg
doErrno = true
} else {
reg = fmt.Sprintf("r%d", i)
ret[i] = reg
}
if p.Type == "bool" {
reg = fmt.Sprintf("%d != 0", reg)
}
if p.Type == "int64" && endianness != "" {
// 64-bit number in r1:r0 or r0:r1.
if i+2 > len(out) {
fmt.Fprintf(os.Stderr, "%s: not enough registers for int64 return\n", path)
os.Exit(1)
}
if endianness == "big-endian" {
reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i, i+1)
} else {
reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i+1, i)
}
ret[i] = fmt.Sprintf("r%d", i)
ret[i+1] = fmt.Sprintf("r%d", i+1)
}
if reg != "e1" {
body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg)
}
}
if ret[0] == "_" && ret[1] == "_" && ret[2] == "_" {
text += fmt.Sprintf("\t%s\n", call)
} else {
text += fmt.Sprintf("\t%s, %s, %s := %s\n", ret[0], ret[1], ret[2], call)
}
text += body
if doErrno {
text += "\tif e1 != 0 {\n"
text += "\t\terr = e1\n"
text += "\t}\n"
}
text += "\treturn\n"
text += "}\n"
}
if err := s.Err(); err != nil {
fmt.Fprintf(os.Stderr, err.Error())
os.Exit(1)
}
file.Close()
}
imp := ""
if pack != "unix" {
imp = "import \"golang.org/x/sys/unix\"\n"
}
vardecls := "\t" + strings.Join(vars, ",\n\t")
vardecls += " syscallFunc"
fmt.Printf(srcTemplate, cmdLine(), buildTags(), pack, imp, dynimports, linknames, vardecls, text)
}
const srcTemplate = `// %s
// Code generated by the command above; see README.md. DO NOT EDIT.
// +build %s
package %s
import (
"syscall"
"unsafe"
)
%s
%s
%s
var (
%s
)
%s
`

355
vendor/golang.org/x/sys/unix/mksysctl_openbsd.go generated vendored Normal file
View File

@ -0,0 +1,355 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// Parse the header files for OpenBSD and generate a Go usable sysctl MIB.
//
// Build a MIB with each entry being an array containing the level, type and
// a hash that will contain additional entries if the current entry is a node.
// We then walk this MIB and create a flattened sysctl name to OID hash.
package main
import (
"bufio"
"fmt"
"os"
"path/filepath"
"regexp"
"sort"
"strings"
)
var (
goos, goarch string
)
// cmdLine returns this programs's commandline arguments.
func cmdLine() string {
return "go run mksysctl_openbsd.go " + strings.Join(os.Args[1:], " ")
}
// buildTags returns build tags.
func buildTags() string {
return fmt.Sprintf("%s,%s", goarch, goos)
}
// reMatch performs regular expression match and stores the substring slice to value pointed by m.
func reMatch(re *regexp.Regexp, str string, m *[]string) bool {
*m = re.FindStringSubmatch(str)
if *m != nil {
return true
}
return false
}
type nodeElement struct {
n int
t string
pE *map[string]nodeElement
}
var (
debugEnabled bool
mib map[string]nodeElement
node *map[string]nodeElement
nodeMap map[string]string
sysCtl []string
)
var (
ctlNames1RE = regexp.MustCompile(`^#define\s+(CTL_NAMES)\s+{`)
ctlNames2RE = regexp.MustCompile(`^#define\s+(CTL_(.*)_NAMES)\s+{`)
ctlNames3RE = regexp.MustCompile(`^#define\s+((.*)CTL_NAMES)\s+{`)
netInetRE = regexp.MustCompile(`^netinet/`)
netInet6RE = regexp.MustCompile(`^netinet6/`)
netRE = regexp.MustCompile(`^net/`)
bracesRE = regexp.MustCompile(`{.*}`)
ctlTypeRE = regexp.MustCompile(`{\s+"(\w+)",\s+(CTLTYPE_[A-Z]+)\s+}`)
fsNetKernRE = regexp.MustCompile(`^(fs|net|kern)_`)
)
func debug(s string) {
if debugEnabled {
fmt.Fprintln(os.Stderr, s)
}
}
// Walk the MIB and build a sysctl name to OID mapping.
func buildSysctl(pNode *map[string]nodeElement, name string, oid []int) {
lNode := pNode // local copy of pointer to node
var keys []string
for k := range *lNode {
keys = append(keys, k)
}
sort.Strings(keys)
for _, key := range keys {
nodename := name
if name != "" {
nodename += "."
}
nodename += key
nodeoid := append(oid, (*pNode)[key].n)
if (*pNode)[key].t == `CTLTYPE_NODE` {
if _, ok := nodeMap[nodename]; ok {
lNode = &mib
ctlName := nodeMap[nodename]
for _, part := range strings.Split(ctlName, ".") {
lNode = ((*lNode)[part]).pE
}
} else {
lNode = (*pNode)[key].pE
}
buildSysctl(lNode, nodename, nodeoid)
} else if (*pNode)[key].t != "" {
oidStr := []string{}
for j := range nodeoid {
oidStr = append(oidStr, fmt.Sprintf("%d", nodeoid[j]))
}
text := "\t{ \"" + nodename + "\", []_C_int{ " + strings.Join(oidStr, ", ") + " } }, \n"
sysCtl = append(sysCtl, text)
}
}
}
func main() {
// Get the OS (using GOOS_TARGET if it exist)
goos = os.Getenv("GOOS_TARGET")
if goos == "" {
goos = os.Getenv("GOOS")
}
// Get the architecture (using GOARCH_TARGET if it exists)
goarch = os.Getenv("GOARCH_TARGET")
if goarch == "" {
goarch = os.Getenv("GOARCH")
}
// Check if GOOS and GOARCH environment variables are defined
if goarch == "" || goos == "" {
fmt.Fprintf(os.Stderr, "GOARCH or GOOS not defined in environment\n")
os.Exit(1)
}
mib = make(map[string]nodeElement)
headers := [...]string{
`sys/sysctl.h`,
`sys/socket.h`,
`sys/tty.h`,
`sys/malloc.h`,
`sys/mount.h`,
`sys/namei.h`,
`sys/sem.h`,
`sys/shm.h`,
`sys/vmmeter.h`,
`uvm/uvmexp.h`,
`uvm/uvm_param.h`,
`uvm/uvm_swap_encrypt.h`,
`ddb/db_var.h`,
`net/if.h`,
`net/if_pfsync.h`,
`net/pipex.h`,
`netinet/in.h`,
`netinet/icmp_var.h`,
`netinet/igmp_var.h`,
`netinet/ip_ah.h`,
`netinet/ip_carp.h`,
`netinet/ip_divert.h`,
`netinet/ip_esp.h`,
`netinet/ip_ether.h`,
`netinet/ip_gre.h`,
`netinet/ip_ipcomp.h`,
`netinet/ip_ipip.h`,
`netinet/pim_var.h`,
`netinet/tcp_var.h`,
`netinet/udp_var.h`,
`netinet6/in6.h`,
`netinet6/ip6_divert.h`,
`netinet6/pim6_var.h`,
`netinet/icmp6.h`,
`netmpls/mpls.h`,
}
ctls := [...]string{
`kern`,
`vm`,
`fs`,
`net`,
//debug /* Special handling required */
`hw`,
//machdep /* Arch specific */
`user`,
`ddb`,
//vfs /* Special handling required */
`fs.posix`,
`kern.forkstat`,
`kern.intrcnt`,
`kern.malloc`,
`kern.nchstats`,
`kern.seminfo`,
`kern.shminfo`,
`kern.timecounter`,
`kern.tty`,
`kern.watchdog`,
`net.bpf`,
`net.ifq`,
`net.inet`,
`net.inet.ah`,
`net.inet.carp`,
`net.inet.divert`,
`net.inet.esp`,
`net.inet.etherip`,
`net.inet.gre`,
`net.inet.icmp`,
`net.inet.igmp`,
`net.inet.ip`,
`net.inet.ip.ifq`,
`net.inet.ipcomp`,
`net.inet.ipip`,
`net.inet.mobileip`,
`net.inet.pfsync`,
`net.inet.pim`,
`net.inet.tcp`,
`net.inet.udp`,
`net.inet6`,
`net.inet6.divert`,
`net.inet6.ip6`,
`net.inet6.icmp6`,
`net.inet6.pim6`,
`net.inet6.tcp6`,
`net.inet6.udp6`,
`net.mpls`,
`net.mpls.ifq`,
`net.key`,
`net.pflow`,
`net.pfsync`,
`net.pipex`,
`net.rt`,
`vm.swapencrypt`,
//vfsgenctl /* Special handling required */
}
// Node name "fixups"
ctlMap := map[string]string{
"ipproto": "net.inet",
"net.inet.ipproto": "net.inet",
"net.inet6.ipv6proto": "net.inet6",
"net.inet6.ipv6": "net.inet6.ip6",
"net.inet.icmpv6": "net.inet6.icmp6",
"net.inet6.divert6": "net.inet6.divert",
"net.inet6.tcp6": "net.inet.tcp",
"net.inet6.udp6": "net.inet.udp",
"mpls": "net.mpls",
"swpenc": "vm.swapencrypt",
}
// Node mappings
nodeMap = map[string]string{
"net.inet.ip.ifq": "net.ifq",
"net.inet.pfsync": "net.pfsync",
"net.mpls.ifq": "net.ifq",
}
mCtls := make(map[string]bool)
for _, ctl := range ctls {
mCtls[ctl] = true
}
for _, header := range headers {
debug("Processing " + header)
file, err := os.Open(filepath.Join("/usr/include", header))
if err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
os.Exit(1)
}
s := bufio.NewScanner(file)
for s.Scan() {
var sub []string
if reMatch(ctlNames1RE, s.Text(), &sub) ||
reMatch(ctlNames2RE, s.Text(), &sub) ||
reMatch(ctlNames3RE, s.Text(), &sub) {
if sub[1] == `CTL_NAMES` {
// Top level.
node = &mib
} else {
// Node.
nodename := strings.ToLower(sub[2])
ctlName := ""
if reMatch(netInetRE, header, &sub) {
ctlName = "net.inet." + nodename
} else if reMatch(netInet6RE, header, &sub) {
ctlName = "net.inet6." + nodename
} else if reMatch(netRE, header, &sub) {
ctlName = "net." + nodename
} else {
ctlName = nodename
ctlName = fsNetKernRE.ReplaceAllString(ctlName, `$1.`)
}
if val, ok := ctlMap[ctlName]; ok {
ctlName = val
}
if _, ok := mCtls[ctlName]; !ok {
debug("Ignoring " + ctlName + "...")
continue
}
// Walk down from the top of the MIB.
node = &mib
for _, part := range strings.Split(ctlName, ".") {
if _, ok := (*node)[part]; !ok {
debug("Missing node " + part)
(*node)[part] = nodeElement{n: 0, t: "", pE: &map[string]nodeElement{}}
}
node = (*node)[part].pE
}
}
// Populate current node with entries.
i := -1
for !strings.HasPrefix(s.Text(), "}") {
s.Scan()
if reMatch(bracesRE, s.Text(), &sub) {
i++
}
if !reMatch(ctlTypeRE, s.Text(), &sub) {
continue
}
(*node)[sub[1]] = nodeElement{n: i, t: sub[2], pE: &map[string]nodeElement{}}
}
}
}
err = s.Err()
if err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
os.Exit(1)
}
file.Close()
}
buildSysctl(&mib, "", []int{})
sort.Strings(sysCtl)
text := strings.Join(sysCtl, "")
fmt.Printf(srcTemplate, cmdLine(), buildTags(), text)
}
const srcTemplate = `// %s
// Code generated by the command above; DO NOT EDIT.
// +build %s
package unix
type mibentry struct {
ctlname string
ctloid []_C_int
}
var sysctlMib = []mibentry {
%s
}
`

190
vendor/golang.org/x/sys/unix/mksysnum.go generated vendored Normal file
View File

@ -0,0 +1,190 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// Generate system call table for DragonFly, NetBSD,
// FreeBSD, OpenBSD or Darwin from master list
// (for example, /usr/src/sys/kern/syscalls.master or
// sys/syscall.h).
package main
import (
"bufio"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"regexp"
"strings"
)
var (
goos, goarch string
)
// cmdLine returns this programs's commandline arguments
func cmdLine() string {
return "go run mksysnum.go " + strings.Join(os.Args[1:], " ")
}
// buildTags returns build tags
func buildTags() string {
return fmt.Sprintf("%s,%s", goarch, goos)
}
func checkErr(err error) {
if err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
os.Exit(1)
}
}
// source string and substring slice for regexp
type re struct {
str string // source string
sub []string // matched sub-string
}
// Match performs regular expression match
func (r *re) Match(exp string) bool {
r.sub = regexp.MustCompile(exp).FindStringSubmatch(r.str)
if r.sub != nil {
return true
}
return false
}
// fetchFile fetches a text file from URL
func fetchFile(URL string) io.Reader {
resp, err := http.Get(URL)
checkErr(err)
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
checkErr(err)
return strings.NewReader(string(body))
}
// readFile reads a text file from path
func readFile(path string) io.Reader {
file, err := os.Open(os.Args[1])
checkErr(err)
return file
}
func format(name, num, proto string) string {
name = strings.ToUpper(name)
// There are multiple entries for enosys and nosys, so comment them out.
nm := re{str: name}
if nm.Match(`^SYS_E?NOSYS$`) {
name = fmt.Sprintf("// %s", name)
}
if name == `SYS_SYS_EXIT` {
name = `SYS_EXIT`
}
return fmt.Sprintf(" %s = %s; // %s\n", name, num, proto)
}
func main() {
// Get the OS (using GOOS_TARGET if it exist)
goos = os.Getenv("GOOS_TARGET")
if goos == "" {
goos = os.Getenv("GOOS")
}
// Get the architecture (using GOARCH_TARGET if it exists)
goarch = os.Getenv("GOARCH_TARGET")
if goarch == "" {
goarch = os.Getenv("GOARCH")
}
// Check if GOOS and GOARCH environment variables are defined
if goarch == "" || goos == "" {
fmt.Fprintf(os.Stderr, "GOARCH or GOOS not defined in environment\n")
os.Exit(1)
}
file := strings.TrimSpace(os.Args[1])
var syscalls io.Reader
if strings.HasPrefix(file, "https://") || strings.HasPrefix(file, "http://") {
// Download syscalls.master file
syscalls = fetchFile(file)
} else {
syscalls = readFile(file)
}
var text, line string
s := bufio.NewScanner(syscalls)
for s.Scan() {
t := re{str: line}
if t.Match(`^(.*)\\$`) {
// Handle continuation
line = t.sub[1]
line += strings.TrimLeft(s.Text(), " \t")
} else {
// New line
line = s.Text()
}
t = re{str: line}
if t.Match(`\\$`) {
continue
}
t = re{str: line}
switch goos {
case "dragonfly":
if t.Match(`^([0-9]+)\s+STD\s+({ \S+\s+(\w+).*)$`) {
num, proto := t.sub[1], t.sub[2]
name := fmt.Sprintf("SYS_%s", t.sub[3])
text += format(name, num, proto)
}
case "freebsd":
if t.Match(`^([0-9]+)\s+\S+\s+(?:(?:NO)?STD|COMPAT10)\s+({ \S+\s+(\w+).*)$`) {
num, proto := t.sub[1], t.sub[2]
name := fmt.Sprintf("SYS_%s", t.sub[3])
text += format(name, num, proto)
}
case "openbsd":
if t.Match(`^([0-9]+)\s+STD\s+(NOLOCK\s+)?({ \S+\s+\*?(\w+).*)$`) {
num, proto, name := t.sub[1], t.sub[3], t.sub[4]
text += format(name, num, proto)
}
case "netbsd":
if t.Match(`^([0-9]+)\s+((STD)|(NOERR))\s+(RUMP\s+)?({\s+\S+\s*\*?\s*\|(\S+)\|(\S*)\|(\w+).*\s+})(\s+(\S+))?$`) {
num, proto, compat := t.sub[1], t.sub[6], t.sub[8]
name := t.sub[7] + "_" + t.sub[9]
if t.sub[11] != "" {
name = t.sub[7] + "_" + t.sub[11]
}
name = strings.ToUpper(name)
if compat == "" || compat == "13" || compat == "30" || compat == "50" {
text += fmt.Sprintf(" %s = %s; // %s\n", name, num, proto)
}
}
case "darwin":
if t.Match(`^#define\s+SYS_(\w+)\s+([0-9]+)`) {
name, num := t.sub[1], t.sub[2]
name = strings.ToUpper(name)
text += fmt.Sprintf(" SYS_%s = %s;\n", name, num)
}
default:
fmt.Fprintf(os.Stderr, "unrecognized GOOS=%s\n", goos)
os.Exit(1)
}
}
err := s.Err()
checkErr(err)
fmt.Printf(template, cmdLine(), buildTags(), text)
}
const template = `// %s
// Code generated by the command above; see README.md. DO NOT EDIT.
// +build %s
package unix
const(
%s)`

237
vendor/golang.org/x/sys/unix/types_aix.go generated vendored Normal file
View File

@ -0,0 +1,237 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// +build aix
/*
Input to cgo -godefs. See also mkerrors.sh and mkall.sh
*/
// +godefs map struct_in_addr [4]byte /* in_addr */
// +godefs map struct_in6_addr [16]byte /* in6_addr */
package unix
/*
#include <sys/types.h>
#include <sys/time.h>
#include <sys/limits.h>
#include <sys/un.h>
#include <utime.h>
#include <sys/utsname.h>
#include <sys/poll.h>
#include <sys/resource.h>
#include <sys/stat.h>
#include <sys/statfs.h>
#include <sys/termio.h>
#include <sys/ioctl.h>
#include <termios.h>
#include <net/if.h>
#include <net/if_dl.h>
#include <netinet/in.h>
#include <netinet/icmp6.h>
#include <dirent.h>
#include <fcntl.h>
enum {
sizeofPtr = sizeof(void*),
};
union sockaddr_all {
struct sockaddr s1; // this one gets used for fields
struct sockaddr_in s2; // these pad it out
struct sockaddr_in6 s3;
struct sockaddr_un s4;
struct sockaddr_dl s5;
};
struct sockaddr_any {
struct sockaddr addr;
char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)];
};
*/
import "C"
// Machine characteristics
const (
SizeofPtr = C.sizeofPtr
SizeofShort = C.sizeof_short
SizeofInt = C.sizeof_int
SizeofLong = C.sizeof_long
SizeofLongLong = C.sizeof_longlong
PathMax = C.PATH_MAX
)
// Basic types
type (
_C_short C.short
_C_int C.int
_C_long C.long
_C_long_long C.longlong
)
type off64 C.off64_t
type off C.off_t
type Mode_t C.mode_t
// Time
type Timespec C.struct_timespec
type Timeval C.struct_timeval
type Timeval32 C.struct_timeval32
type Timex C.struct_timex
type Time_t C.time_t
type Tms C.struct_tms
type Utimbuf C.struct_utimbuf
type Timezone C.struct_timezone
// Processes
type Rusage C.struct_rusage
type Rlimit C.struct_rlimit64
type Pid_t C.pid_t
type _Gid_t C.gid_t
type dev_t C.dev_t
// Files
type Stat_t C.struct_stat
type StatxTimestamp C.struct_statx_timestamp
type Statx_t C.struct_statx
type Dirent C.struct_dirent
// Sockets
type RawSockaddrInet4 C.struct_sockaddr_in
type RawSockaddrInet6 C.struct_sockaddr_in6
type RawSockaddrUnix C.struct_sockaddr_un
type RawSockaddrDatalink C.struct_sockaddr_dl
type RawSockaddr C.struct_sockaddr
type RawSockaddrAny C.struct_sockaddr_any
type _Socklen C.socklen_t
type Cmsghdr C.struct_cmsghdr
type ICMPv6Filter C.struct_icmp6_filter
type Iovec C.struct_iovec
type IPMreq C.struct_ip_mreq
type IPv6Mreq C.struct_ipv6_mreq
type IPv6MTUInfo C.struct_ip6_mtuinfo
type Linger C.struct_linger
type Msghdr C.struct_msghdr
const (
SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in
SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
SizeofSockaddrAny = C.sizeof_struct_sockaddr_any
SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un
SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl
SizeofLinger = C.sizeof_struct_linger
SizeofIPMreq = C.sizeof_struct_ip_mreq
SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo
SizeofMsghdr = C.sizeof_struct_msghdr
SizeofCmsghdr = C.sizeof_struct_cmsghdr
SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
)
// Routing and interface messages
const (
SizeofIfMsghdr = C.sizeof_struct_if_msghdr
)
type IfMsgHdr C.struct_if_msghdr
// Misc
type FdSet C.fd_set
type Utsname C.struct_utsname
type Ustat_t C.struct_ustat
type Sigset_t C.sigset_t
const (
AT_FDCWD = C.AT_FDCWD
AT_REMOVEDIR = C.AT_REMOVEDIR
AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW
)
// Terminal handling
type Termios C.struct_termios
type Termio C.struct_termio
type Winsize C.struct_winsize
//poll
type PollFd struct {
Fd int32
Events uint16
Revents uint16
}
const (
POLLERR = C.POLLERR
POLLHUP = C.POLLHUP
POLLIN = C.POLLIN
POLLNVAL = C.POLLNVAL
POLLOUT = C.POLLOUT
POLLPRI = C.POLLPRI
POLLRDBAND = C.POLLRDBAND
POLLRDNORM = C.POLLRDNORM
POLLWRBAND = C.POLLWRBAND
POLLWRNORM = C.POLLWRNORM
)
//flock_t
type Flock_t C.struct_flock64
// Statfs
type Fsid_t C.struct_fsid_t
type Fsid64_t C.struct_fsid64_t
type Statfs_t C.struct_statfs
const RNDGETENTCNT = 0x80045200

283
vendor/golang.org/x/sys/unix/types_darwin.go generated vendored Normal file
View File

@ -0,0 +1,283 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
/*
Input to cgo -godefs. See README.md
*/
// +godefs map struct_in_addr [4]byte /* in_addr */
// +godefs map struct_in6_addr [16]byte /* in6_addr */
package unix
/*
#define __DARWIN_UNIX03 0
#define KERNEL
#define _DARWIN_USE_64_BIT_INODE
#include <dirent.h>
#include <fcntl.h>
#include <poll.h>
#include <signal.h>
#include <termios.h>
#include <unistd.h>
#include <mach/mach.h>
#include <mach/message.h>
#include <sys/event.h>
#include <sys/mman.h>
#include <sys/mount.h>
#include <sys/param.h>
#include <sys/ptrace.h>
#include <sys/resource.h>
#include <sys/select.h>
#include <sys/signal.h>
#include <sys/socket.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/uio.h>
#include <sys/un.h>
#include <sys/utsname.h>
#include <sys/wait.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_dl.h>
#include <net/if_var.h>
#include <net/route.h>
#include <netinet/in.h>
#include <netinet/icmp6.h>
#include <netinet/tcp.h>
enum {
sizeofPtr = sizeof(void*),
};
union sockaddr_all {
struct sockaddr s1; // this one gets used for fields
struct sockaddr_in s2; // these pad it out
struct sockaddr_in6 s3;
struct sockaddr_un s4;
struct sockaddr_dl s5;
};
struct sockaddr_any {
struct sockaddr addr;
char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)];
};
*/
import "C"
// Machine characteristics
const (
SizeofPtr = C.sizeofPtr
SizeofShort = C.sizeof_short
SizeofInt = C.sizeof_int
SizeofLong = C.sizeof_long
SizeofLongLong = C.sizeof_longlong
)
// Basic types
type (
_C_short C.short
_C_int C.int
_C_long C.long
_C_long_long C.longlong
)
// Time
type Timespec C.struct_timespec
type Timeval C.struct_timeval
type Timeval32 C.struct_timeval32
// Processes
type Rusage C.struct_rusage
type Rlimit C.struct_rlimit
type _Gid_t C.gid_t
// Files
type Stat_t C.struct_stat64
type Statfs_t C.struct_statfs64
type Flock_t C.struct_flock
type Fstore_t C.struct_fstore
type Radvisory_t C.struct_radvisory
type Fbootstraptransfer_t C.struct_fbootstraptransfer
type Log2phys_t C.struct_log2phys
type Fsid C.struct_fsid
type Dirent C.struct_dirent
// Sockets
type RawSockaddrInet4 C.struct_sockaddr_in
type RawSockaddrInet6 C.struct_sockaddr_in6
type RawSockaddrUnix C.struct_sockaddr_un
type RawSockaddrDatalink C.struct_sockaddr_dl
type RawSockaddr C.struct_sockaddr
type RawSockaddrAny C.struct_sockaddr_any
type _Socklen C.socklen_t
type Linger C.struct_linger
type Iovec C.struct_iovec
type IPMreq C.struct_ip_mreq
type IPv6Mreq C.struct_ipv6_mreq
type Msghdr C.struct_msghdr
type Cmsghdr C.struct_cmsghdr
type Inet4Pktinfo C.struct_in_pktinfo
type Inet6Pktinfo C.struct_in6_pktinfo
type IPv6MTUInfo C.struct_ip6_mtuinfo
type ICMPv6Filter C.struct_icmp6_filter
const (
SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in
SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
SizeofSockaddrAny = C.sizeof_struct_sockaddr_any
SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un
SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl
SizeofLinger = C.sizeof_struct_linger
SizeofIPMreq = C.sizeof_struct_ip_mreq
SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
SizeofMsghdr = C.sizeof_struct_msghdr
SizeofCmsghdr = C.sizeof_struct_cmsghdr
SizeofInet4Pktinfo = C.sizeof_struct_in_pktinfo
SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo
SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
)
// Ptrace requests
const (
PTRACE_TRACEME = C.PT_TRACE_ME
PTRACE_CONT = C.PT_CONTINUE
PTRACE_KILL = C.PT_KILL
)
// Events (kqueue, kevent)
type Kevent_t C.struct_kevent
// Select
type FdSet C.fd_set
// Routing and interface messages
const (
SizeofIfMsghdr = C.sizeof_struct_if_msghdr
SizeofIfData = C.sizeof_struct_if_data
SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr
SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr
SizeofIfmaMsghdr2 = C.sizeof_struct_ifma_msghdr2
SizeofRtMsghdr = C.sizeof_struct_rt_msghdr
SizeofRtMetrics = C.sizeof_struct_rt_metrics
)
type IfMsghdr C.struct_if_msghdr
type IfData C.struct_if_data
type IfaMsghdr C.struct_ifa_msghdr
type IfmaMsghdr C.struct_ifma_msghdr
type IfmaMsghdr2 C.struct_ifma_msghdr2
type RtMsghdr C.struct_rt_msghdr
type RtMetrics C.struct_rt_metrics
// Berkeley packet filter
const (
SizeofBpfVersion = C.sizeof_struct_bpf_version
SizeofBpfStat = C.sizeof_struct_bpf_stat
SizeofBpfProgram = C.sizeof_struct_bpf_program
SizeofBpfInsn = C.sizeof_struct_bpf_insn
SizeofBpfHdr = C.sizeof_struct_bpf_hdr
)
type BpfVersion C.struct_bpf_version
type BpfStat C.struct_bpf_stat
type BpfProgram C.struct_bpf_program
type BpfInsn C.struct_bpf_insn
type BpfHdr C.struct_bpf_hdr
// Terminal handling
type Termios C.struct_termios
type Winsize C.struct_winsize
// fchmodat-like syscalls.
const (
AT_FDCWD = C.AT_FDCWD
AT_REMOVEDIR = C.AT_REMOVEDIR
AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW
AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW
)
// poll
type PollFd C.struct_pollfd
const (
POLLERR = C.POLLERR
POLLHUP = C.POLLHUP
POLLIN = C.POLLIN
POLLNVAL = C.POLLNVAL
POLLOUT = C.POLLOUT
POLLPRI = C.POLLPRI
POLLRDBAND = C.POLLRDBAND
POLLRDNORM = C.POLLRDNORM
POLLWRBAND = C.POLLWRBAND
POLLWRNORM = C.POLLWRNORM
)
// uname
type Utsname C.struct_utsname
// Clockinfo
const SizeofClockinfo = C.sizeof_struct_clockinfo
type Clockinfo C.struct_clockinfo

263
vendor/golang.org/x/sys/unix/types_dragonfly.go generated vendored Normal file
View File

@ -0,0 +1,263 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
/*
Input to cgo -godefs. See README.md
*/
// +godefs map struct_in_addr [4]byte /* in_addr */
// +godefs map struct_in6_addr [16]byte /* in6_addr */
package unix
/*
#define KERNEL
#include <dirent.h>
#include <fcntl.h>
#include <poll.h>
#include <signal.h>
#include <termios.h>
#include <stdio.h>
#include <unistd.h>
#include <sys/event.h>
#include <sys/mman.h>
#include <sys/mount.h>
#include <sys/param.h>
#include <sys/ptrace.h>
#include <sys/resource.h>
#include <sys/select.h>
#include <sys/signal.h>
#include <sys/socket.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/un.h>
#include <sys/utsname.h>
#include <sys/wait.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_dl.h>
#include <net/route.h>
#include <netinet/in.h>
#include <netinet/icmp6.h>
#include <netinet/tcp.h>
enum {
sizeofPtr = sizeof(void*),
};
union sockaddr_all {
struct sockaddr s1; // this one gets used for fields
struct sockaddr_in s2; // these pad it out
struct sockaddr_in6 s3;
struct sockaddr_un s4;
struct sockaddr_dl s5;
};
struct sockaddr_any {
struct sockaddr addr;
char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)];
};
*/
import "C"
// Machine characteristics
const (
SizeofPtr = C.sizeofPtr
SizeofShort = C.sizeof_short
SizeofInt = C.sizeof_int
SizeofLong = C.sizeof_long
SizeofLongLong = C.sizeof_longlong
)
// Basic types
type (
_C_short C.short
_C_int C.int
_C_long C.long
_C_long_long C.longlong
)
// Time
type Timespec C.struct_timespec
type Timeval C.struct_timeval
// Processes
type Rusage C.struct_rusage
type Rlimit C.struct_rlimit
type _Gid_t C.gid_t
// Files
type Stat_t C.struct_stat
type Statfs_t C.struct_statfs
type Flock_t C.struct_flock
type Dirent C.struct_dirent
type Fsid C.struct_fsid
// File system limits
const (
PathMax = C.PATH_MAX
)
// Sockets
type RawSockaddrInet4 C.struct_sockaddr_in
type RawSockaddrInet6 C.struct_sockaddr_in6
type RawSockaddrUnix C.struct_sockaddr_un
type RawSockaddrDatalink C.struct_sockaddr_dl
type RawSockaddr C.struct_sockaddr
type RawSockaddrAny C.struct_sockaddr_any
type _Socklen C.socklen_t
type Linger C.struct_linger
type Iovec C.struct_iovec
type IPMreq C.struct_ip_mreq
type IPv6Mreq C.struct_ipv6_mreq
type Msghdr C.struct_msghdr
type Cmsghdr C.struct_cmsghdr
type Inet6Pktinfo C.struct_in6_pktinfo
type IPv6MTUInfo C.struct_ip6_mtuinfo
type ICMPv6Filter C.struct_icmp6_filter
const (
SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in
SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
SizeofSockaddrAny = C.sizeof_struct_sockaddr_any
SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un
SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl
SizeofLinger = C.sizeof_struct_linger
SizeofIPMreq = C.sizeof_struct_ip_mreq
SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
SizeofMsghdr = C.sizeof_struct_msghdr
SizeofCmsghdr = C.sizeof_struct_cmsghdr
SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo
SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
)
// Ptrace requests
const (
PTRACE_TRACEME = C.PT_TRACE_ME
PTRACE_CONT = C.PT_CONTINUE
PTRACE_KILL = C.PT_KILL
)
// Events (kqueue, kevent)
type Kevent_t C.struct_kevent
// Select
type FdSet C.fd_set
// Routing and interface messages
const (
SizeofIfMsghdr = C.sizeof_struct_if_msghdr
SizeofIfData = C.sizeof_struct_if_data
SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr
SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr
SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr
SizeofRtMsghdr = C.sizeof_struct_rt_msghdr
SizeofRtMetrics = C.sizeof_struct_rt_metrics
)
type IfMsghdr C.struct_if_msghdr
type IfData C.struct_if_data
type IfaMsghdr C.struct_ifa_msghdr
type IfmaMsghdr C.struct_ifma_msghdr
type IfAnnounceMsghdr C.struct_if_announcemsghdr
type RtMsghdr C.struct_rt_msghdr
type RtMetrics C.struct_rt_metrics
// Berkeley packet filter
const (
SizeofBpfVersion = C.sizeof_struct_bpf_version
SizeofBpfStat = C.sizeof_struct_bpf_stat
SizeofBpfProgram = C.sizeof_struct_bpf_program
SizeofBpfInsn = C.sizeof_struct_bpf_insn
SizeofBpfHdr = C.sizeof_struct_bpf_hdr
)
type BpfVersion C.struct_bpf_version
type BpfStat C.struct_bpf_stat
type BpfProgram C.struct_bpf_program
type BpfInsn C.struct_bpf_insn
type BpfHdr C.struct_bpf_hdr
// Terminal handling
type Termios C.struct_termios
type Winsize C.struct_winsize
// fchmodat-like syscalls.
const (
AT_FDCWD = C.AT_FDCWD
AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW
)
// poll
type PollFd C.struct_pollfd
const (
POLLERR = C.POLLERR
POLLHUP = C.POLLHUP
POLLIN = C.POLLIN
POLLNVAL = C.POLLNVAL
POLLOUT = C.POLLOUT
POLLPRI = C.POLLPRI
POLLRDBAND = C.POLLRDBAND
POLLRDNORM = C.POLLRDNORM
POLLWRBAND = C.POLLWRBAND
POLLWRNORM = C.POLLWRNORM
)
// Uname
type Utsname C.struct_utsname

400
vendor/golang.org/x/sys/unix/types_freebsd.go generated vendored Normal file
View File

@ -0,0 +1,400 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
/*
Input to cgo -godefs. See README.md
*/
// +godefs map struct_in_addr [4]byte /* in_addr */
// +godefs map struct_in6_addr [16]byte /* in6_addr */
package unix
/*
#define _WANT_FREEBSD11_STAT 1
#define _WANT_FREEBSD11_STATFS 1
#define _WANT_FREEBSD11_DIRENT 1
#define _WANT_FREEBSD11_KEVENT 1
#include <dirent.h>
#include <fcntl.h>
#include <poll.h>
#include <signal.h>
#include <termios.h>
#include <stdio.h>
#include <unistd.h>
#include <sys/capsicum.h>
#include <sys/event.h>
#include <sys/mman.h>
#include <sys/mount.h>
#include <sys/param.h>
#include <sys/ptrace.h>
#include <sys/resource.h>
#include <sys/select.h>
#include <sys/signal.h>
#include <sys/socket.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/un.h>
#include <sys/utsname.h>
#include <sys/wait.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_dl.h>
#include <net/route.h>
#include <netinet/in.h>
#include <netinet/icmp6.h>
#include <netinet/tcp.h>
enum {
sizeofPtr = sizeof(void*),
};
union sockaddr_all {
struct sockaddr s1; // this one gets used for fields
struct sockaddr_in s2; // these pad it out
struct sockaddr_in6 s3;
struct sockaddr_un s4;
struct sockaddr_dl s5;
};
struct sockaddr_any {
struct sockaddr addr;
char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)];
};
// This structure is a duplicate of if_data on FreeBSD 8-STABLE.
// See /usr/include/net/if.h.
struct if_data8 {
u_char ifi_type;
u_char ifi_physical;
u_char ifi_addrlen;
u_char ifi_hdrlen;
u_char ifi_link_state;
u_char ifi_spare_char1;
u_char ifi_spare_char2;
u_char ifi_datalen;
u_long ifi_mtu;
u_long ifi_metric;
u_long ifi_baudrate;
u_long ifi_ipackets;
u_long ifi_ierrors;
u_long ifi_opackets;
u_long ifi_oerrors;
u_long ifi_collisions;
u_long ifi_ibytes;
u_long ifi_obytes;
u_long ifi_imcasts;
u_long ifi_omcasts;
u_long ifi_iqdrops;
u_long ifi_noproto;
u_long ifi_hwassist;
// FIXME: these are now unions, so maybe need to change definitions?
#undef ifi_epoch
time_t ifi_epoch;
#undef ifi_lastchange
struct timeval ifi_lastchange;
};
// This structure is a duplicate of if_msghdr on FreeBSD 8-STABLE.
// See /usr/include/net/if.h.
struct if_msghdr8 {
u_short ifm_msglen;
u_char ifm_version;
u_char ifm_type;
int ifm_addrs;
int ifm_flags;
u_short ifm_index;
struct if_data8 ifm_data;
};
*/
import "C"
// Machine characteristics
const (
SizeofPtr = C.sizeofPtr
SizeofShort = C.sizeof_short
SizeofInt = C.sizeof_int
SizeofLong = C.sizeof_long
SizeofLongLong = C.sizeof_longlong
)
// Basic types
type (
_C_short C.short
_C_int C.int
_C_long C.long
_C_long_long C.longlong
)
// Time
type Timespec C.struct_timespec
type Timeval C.struct_timeval
// Processes
type Rusage C.struct_rusage
type Rlimit C.struct_rlimit
type _Gid_t C.gid_t
// Files
const (
_statfsVersion = C.STATFS_VERSION
_dirblksiz = C.DIRBLKSIZ
)
type Stat_t C.struct_stat
type stat_freebsd11_t C.struct_freebsd11_stat
type Statfs_t C.struct_statfs
type statfs_freebsd11_t C.struct_freebsd11_statfs
type Flock_t C.struct_flock
type Dirent C.struct_dirent
type dirent_freebsd11 C.struct_freebsd11_dirent
type Fsid C.struct_fsid
// File system limits
const (
PathMax = C.PATH_MAX
)
// Advice to Fadvise
const (
FADV_NORMAL = C.POSIX_FADV_NORMAL
FADV_RANDOM = C.POSIX_FADV_RANDOM
FADV_SEQUENTIAL = C.POSIX_FADV_SEQUENTIAL
FADV_WILLNEED = C.POSIX_FADV_WILLNEED
FADV_DONTNEED = C.POSIX_FADV_DONTNEED
FADV_NOREUSE = C.POSIX_FADV_NOREUSE
)
// Sockets
type RawSockaddrInet4 C.struct_sockaddr_in
type RawSockaddrInet6 C.struct_sockaddr_in6
type RawSockaddrUnix C.struct_sockaddr_un
type RawSockaddrDatalink C.struct_sockaddr_dl
type RawSockaddr C.struct_sockaddr
type RawSockaddrAny C.struct_sockaddr_any
type _Socklen C.socklen_t
type Linger C.struct_linger
type Iovec C.struct_iovec
type IPMreq C.struct_ip_mreq
type IPMreqn C.struct_ip_mreqn
type IPv6Mreq C.struct_ipv6_mreq
type Msghdr C.struct_msghdr
type Cmsghdr C.struct_cmsghdr
type Inet6Pktinfo C.struct_in6_pktinfo
type IPv6MTUInfo C.struct_ip6_mtuinfo
type ICMPv6Filter C.struct_icmp6_filter
const (
SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in
SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
SizeofSockaddrAny = C.sizeof_struct_sockaddr_any
SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un
SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl
SizeofLinger = C.sizeof_struct_linger
SizeofIPMreq = C.sizeof_struct_ip_mreq
SizeofIPMreqn = C.sizeof_struct_ip_mreqn
SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
SizeofMsghdr = C.sizeof_struct_msghdr
SizeofCmsghdr = C.sizeof_struct_cmsghdr
SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo
SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
)
// Ptrace requests
const (
PTRACE_ATTACH = C.PT_ATTACH
PTRACE_CONT = C.PT_CONTINUE
PTRACE_DETACH = C.PT_DETACH
PTRACE_GETFPREGS = C.PT_GETFPREGS
PTRACE_GETFSBASE = C.PT_GETFSBASE
PTRACE_GETLWPLIST = C.PT_GETLWPLIST
PTRACE_GETNUMLWPS = C.PT_GETNUMLWPS
PTRACE_GETREGS = C.PT_GETREGS
PTRACE_GETXSTATE = C.PT_GETXSTATE
PTRACE_IO = C.PT_IO
PTRACE_KILL = C.PT_KILL
PTRACE_LWPEVENTS = C.PT_LWP_EVENTS
PTRACE_LWPINFO = C.PT_LWPINFO
PTRACE_SETFPREGS = C.PT_SETFPREGS
PTRACE_SETREGS = C.PT_SETREGS
PTRACE_SINGLESTEP = C.PT_STEP
PTRACE_TRACEME = C.PT_TRACE_ME
)
const (
PIOD_READ_D = C.PIOD_READ_D
PIOD_WRITE_D = C.PIOD_WRITE_D
PIOD_READ_I = C.PIOD_READ_I
PIOD_WRITE_I = C.PIOD_WRITE_I
)
const (
PL_FLAG_BORN = C.PL_FLAG_BORN
PL_FLAG_EXITED = C.PL_FLAG_EXITED
PL_FLAG_SI = C.PL_FLAG_SI
)
const (
TRAP_BRKPT = C.TRAP_BRKPT
TRAP_TRACE = C.TRAP_TRACE
)
type PtraceLwpInfoStruct C.struct_ptrace_lwpinfo
type __Siginfo C.struct___siginfo
type Sigset_t C.sigset_t
type Reg C.struct_reg
type FpReg C.struct_fpreg
type PtraceIoDesc C.struct_ptrace_io_desc
// Events (kqueue, kevent)
type Kevent_t C.struct_kevent_freebsd11
// Select
type FdSet C.fd_set
// Routing and interface messages
const (
sizeofIfMsghdr = C.sizeof_struct_if_msghdr
SizeofIfMsghdr = C.sizeof_struct_if_msghdr8
sizeofIfData = C.sizeof_struct_if_data
SizeofIfData = C.sizeof_struct_if_data8
SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr
SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr
SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr
SizeofRtMsghdr = C.sizeof_struct_rt_msghdr
SizeofRtMetrics = C.sizeof_struct_rt_metrics
)
type ifMsghdr C.struct_if_msghdr
type IfMsghdr C.struct_if_msghdr8
type ifData C.struct_if_data
type IfData C.struct_if_data8
type IfaMsghdr C.struct_ifa_msghdr
type IfmaMsghdr C.struct_ifma_msghdr
type IfAnnounceMsghdr C.struct_if_announcemsghdr
type RtMsghdr C.struct_rt_msghdr
type RtMetrics C.struct_rt_metrics
// Berkeley packet filter
const (
SizeofBpfVersion = C.sizeof_struct_bpf_version
SizeofBpfStat = C.sizeof_struct_bpf_stat
SizeofBpfZbuf = C.sizeof_struct_bpf_zbuf
SizeofBpfProgram = C.sizeof_struct_bpf_program
SizeofBpfInsn = C.sizeof_struct_bpf_insn
SizeofBpfHdr = C.sizeof_struct_bpf_hdr
SizeofBpfZbufHeader = C.sizeof_struct_bpf_zbuf_header
)
type BpfVersion C.struct_bpf_version
type BpfStat C.struct_bpf_stat
type BpfZbuf C.struct_bpf_zbuf
type BpfProgram C.struct_bpf_program
type BpfInsn C.struct_bpf_insn
type BpfHdr C.struct_bpf_hdr
type BpfZbufHeader C.struct_bpf_zbuf_header
// Terminal handling
type Termios C.struct_termios
type Winsize C.struct_winsize
// fchmodat-like syscalls.
const (
AT_FDCWD = C.AT_FDCWD
AT_REMOVEDIR = C.AT_REMOVEDIR
AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW
AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW
)
// poll
type PollFd C.struct_pollfd
const (
POLLERR = C.POLLERR
POLLHUP = C.POLLHUP
POLLIN = C.POLLIN
POLLINIGNEOF = C.POLLINIGNEOF
POLLNVAL = C.POLLNVAL
POLLOUT = C.POLLOUT
POLLPRI = C.POLLPRI
POLLRDBAND = C.POLLRDBAND
POLLRDNORM = C.POLLRDNORM
POLLWRBAND = C.POLLWRBAND
POLLWRNORM = C.POLLWRNORM
)
// Capabilities
type CapRights C.struct_cap_rights
// Uname
type Utsname C.struct_utsname

290
vendor/golang.org/x/sys/unix/types_netbsd.go generated vendored Normal file
View File

@ -0,0 +1,290 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
/*
Input to cgo -godefs. See README.md
*/
// +godefs map struct_in_addr [4]byte /* in_addr */
// +godefs map struct_in6_addr [16]byte /* in6_addr */
package unix
/*
#define KERNEL
#include <dirent.h>
#include <fcntl.h>
#include <poll.h>
#include <signal.h>
#include <termios.h>
#include <stdio.h>
#include <unistd.h>
#include <sys/param.h>
#include <sys/types.h>
#include <sys/event.h>
#include <sys/mman.h>
#include <sys/mount.h>
#include <sys/ptrace.h>
#include <sys/resource.h>
#include <sys/select.h>
#include <sys/signal.h>
#include <sys/socket.h>
#include <sys/stat.h>
#include <sys/sysctl.h>
#include <sys/time.h>
#include <sys/uio.h>
#include <sys/un.h>
#include <sys/utsname.h>
#include <sys/wait.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_dl.h>
#include <net/route.h>
#include <netinet/in.h>
#include <netinet/icmp6.h>
#include <netinet/tcp.h>
enum {
sizeofPtr = sizeof(void*),
};
union sockaddr_all {
struct sockaddr s1; // this one gets used for fields
struct sockaddr_in s2; // these pad it out
struct sockaddr_in6 s3;
struct sockaddr_un s4;
struct sockaddr_dl s5;
};
struct sockaddr_any {
struct sockaddr addr;
char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)];
};
*/
import "C"
// Machine characteristics
const (
SizeofPtr = C.sizeofPtr
SizeofShort = C.sizeof_short
SizeofInt = C.sizeof_int
SizeofLong = C.sizeof_long
SizeofLongLong = C.sizeof_longlong
)
// Basic types
type (
_C_short C.short
_C_int C.int
_C_long C.long
_C_long_long C.longlong
)
// Time
type Timespec C.struct_timespec
type Timeval C.struct_timeval
// Processes
type Rusage C.struct_rusage
type Rlimit C.struct_rlimit
type _Gid_t C.gid_t
// Files
type Stat_t C.struct_stat
type Statfs_t C.struct_statfs
type Flock_t C.struct_flock
type Dirent C.struct_dirent
type Fsid C.fsid_t
// File system limits
const (
PathMax = C.PATH_MAX
)
// Advice to Fadvise
const (
FADV_NORMAL = C.POSIX_FADV_NORMAL
FADV_RANDOM = C.POSIX_FADV_RANDOM
FADV_SEQUENTIAL = C.POSIX_FADV_SEQUENTIAL
FADV_WILLNEED = C.POSIX_FADV_WILLNEED
FADV_DONTNEED = C.POSIX_FADV_DONTNEED
FADV_NOREUSE = C.POSIX_FADV_NOREUSE
)
// Sockets
type RawSockaddrInet4 C.struct_sockaddr_in
type RawSockaddrInet6 C.struct_sockaddr_in6
type RawSockaddrUnix C.struct_sockaddr_un
type RawSockaddrDatalink C.struct_sockaddr_dl
type RawSockaddr C.struct_sockaddr
type RawSockaddrAny C.struct_sockaddr_any
type _Socklen C.socklen_t
type Linger C.struct_linger
type Iovec C.struct_iovec
type IPMreq C.struct_ip_mreq
type IPv6Mreq C.struct_ipv6_mreq
type Msghdr C.struct_msghdr
type Cmsghdr C.struct_cmsghdr
type Inet6Pktinfo C.struct_in6_pktinfo
type IPv6MTUInfo C.struct_ip6_mtuinfo
type ICMPv6Filter C.struct_icmp6_filter
const (
SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in
SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
SizeofSockaddrAny = C.sizeof_struct_sockaddr_any
SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un
SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl
SizeofLinger = C.sizeof_struct_linger
SizeofIPMreq = C.sizeof_struct_ip_mreq
SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
SizeofMsghdr = C.sizeof_struct_msghdr
SizeofCmsghdr = C.sizeof_struct_cmsghdr
SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo
SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
)
// Ptrace requests
const (
PTRACE_TRACEME = C.PT_TRACE_ME
PTRACE_CONT = C.PT_CONTINUE
PTRACE_KILL = C.PT_KILL
)
// Events (kqueue, kevent)
type Kevent_t C.struct_kevent
// Select
type FdSet C.fd_set
// Routing and interface messages
const (
SizeofIfMsghdr = C.sizeof_struct_if_msghdr
SizeofIfData = C.sizeof_struct_if_data
SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr
SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr
SizeofRtMsghdr = C.sizeof_struct_rt_msghdr
SizeofRtMetrics = C.sizeof_struct_rt_metrics
)
type IfMsghdr C.struct_if_msghdr
type IfData C.struct_if_data
type IfaMsghdr C.struct_ifa_msghdr
type IfAnnounceMsghdr C.struct_if_announcemsghdr
type RtMsghdr C.struct_rt_msghdr
type RtMetrics C.struct_rt_metrics
type Mclpool C.struct_mclpool
// Berkeley packet filter
const (
SizeofBpfVersion = C.sizeof_struct_bpf_version
SizeofBpfStat = C.sizeof_struct_bpf_stat
SizeofBpfProgram = C.sizeof_struct_bpf_program
SizeofBpfInsn = C.sizeof_struct_bpf_insn
SizeofBpfHdr = C.sizeof_struct_bpf_hdr
)
type BpfVersion C.struct_bpf_version
type BpfStat C.struct_bpf_stat
type BpfProgram C.struct_bpf_program
type BpfInsn C.struct_bpf_insn
type BpfHdr C.struct_bpf_hdr
type BpfTimeval C.struct_bpf_timeval
// Terminal handling
type Termios C.struct_termios
type Winsize C.struct_winsize
type Ptmget C.struct_ptmget
// fchmodat-like syscalls.
const (
AT_FDCWD = C.AT_FDCWD
AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW
AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW
)
// poll
type PollFd C.struct_pollfd
const (
POLLERR = C.POLLERR
POLLHUP = C.POLLHUP
POLLIN = C.POLLIN
POLLNVAL = C.POLLNVAL
POLLOUT = C.POLLOUT
POLLPRI = C.POLLPRI
POLLRDBAND = C.POLLRDBAND
POLLRDNORM = C.POLLRDNORM
POLLWRBAND = C.POLLWRBAND
POLLWRNORM = C.POLLWRNORM
)
// Sysctl
type Sysctlnode C.struct_sysctlnode
// Uname
type Utsname C.struct_utsname
// Clockinfo
const SizeofClockinfo = C.sizeof_struct_clockinfo
type Clockinfo C.struct_clockinfo

283
vendor/golang.org/x/sys/unix/types_openbsd.go generated vendored Normal file
View File

@ -0,0 +1,283 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
/*
Input to cgo -godefs. See README.md
*/
// +godefs map struct_in_addr [4]byte /* in_addr */
// +godefs map struct_in6_addr [16]byte /* in6_addr */
package unix
/*
#define KERNEL
#include <dirent.h>
#include <fcntl.h>
#include <poll.h>
#include <signal.h>
#include <termios.h>
#include <stdio.h>
#include <unistd.h>
#include <sys/param.h>
#include <sys/types.h>
#include <sys/event.h>
#include <sys/mman.h>
#include <sys/mount.h>
#include <sys/ptrace.h>
#include <sys/resource.h>
#include <sys/select.h>
#include <sys/signal.h>
#include <sys/socket.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/uio.h>
#include <sys/un.h>
#include <sys/utsname.h>
#include <sys/wait.h>
#include <uvm/uvmexp.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_dl.h>
#include <net/route.h>
#include <netinet/in.h>
#include <netinet/icmp6.h>
#include <netinet/tcp.h>
enum {
sizeofPtr = sizeof(void*),
};
union sockaddr_all {
struct sockaddr s1; // this one gets used for fields
struct sockaddr_in s2; // these pad it out
struct sockaddr_in6 s3;
struct sockaddr_un s4;
struct sockaddr_dl s5;
};
struct sockaddr_any {
struct sockaddr addr;
char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)];
};
*/
import "C"
// Machine characteristics
const (
SizeofPtr = C.sizeofPtr
SizeofShort = C.sizeof_short
SizeofInt = C.sizeof_int
SizeofLong = C.sizeof_long
SizeofLongLong = C.sizeof_longlong
)
// Basic types
type (
_C_short C.short
_C_int C.int
_C_long C.long
_C_long_long C.longlong
)
// Time
type Timespec C.struct_timespec
type Timeval C.struct_timeval
// Processes
type Rusage C.struct_rusage
type Rlimit C.struct_rlimit
type _Gid_t C.gid_t
// Files
type Stat_t C.struct_stat
type Statfs_t C.struct_statfs
type Flock_t C.struct_flock
type Dirent C.struct_dirent
type Fsid C.fsid_t
// File system limits
const (
PathMax = C.PATH_MAX
)
// Sockets
type RawSockaddrInet4 C.struct_sockaddr_in
type RawSockaddrInet6 C.struct_sockaddr_in6
type RawSockaddrUnix C.struct_sockaddr_un
type RawSockaddrDatalink C.struct_sockaddr_dl
type RawSockaddr C.struct_sockaddr
type RawSockaddrAny C.struct_sockaddr_any
type _Socklen C.socklen_t
type Linger C.struct_linger
type Iovec C.struct_iovec
type IPMreq C.struct_ip_mreq
type IPv6Mreq C.struct_ipv6_mreq
type Msghdr C.struct_msghdr
type Cmsghdr C.struct_cmsghdr
type Inet6Pktinfo C.struct_in6_pktinfo
type IPv6MTUInfo C.struct_ip6_mtuinfo
type ICMPv6Filter C.struct_icmp6_filter
const (
SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in
SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
SizeofSockaddrAny = C.sizeof_struct_sockaddr_any
SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un
SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl
SizeofLinger = C.sizeof_struct_linger
SizeofIPMreq = C.sizeof_struct_ip_mreq
SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
SizeofMsghdr = C.sizeof_struct_msghdr
SizeofCmsghdr = C.sizeof_struct_cmsghdr
SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo
SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
)
// Ptrace requests
const (
PTRACE_TRACEME = C.PT_TRACE_ME
PTRACE_CONT = C.PT_CONTINUE
PTRACE_KILL = C.PT_KILL
)
// Events (kqueue, kevent)
type Kevent_t C.struct_kevent
// Select
type FdSet C.fd_set
// Routing and interface messages
const (
SizeofIfMsghdr = C.sizeof_struct_if_msghdr
SizeofIfData = C.sizeof_struct_if_data
SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr
SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr
SizeofRtMsghdr = C.sizeof_struct_rt_msghdr
SizeofRtMetrics = C.sizeof_struct_rt_metrics
)
type IfMsghdr C.struct_if_msghdr
type IfData C.struct_if_data
type IfaMsghdr C.struct_ifa_msghdr
type IfAnnounceMsghdr C.struct_if_announcemsghdr
type RtMsghdr C.struct_rt_msghdr
type RtMetrics C.struct_rt_metrics
type Mclpool C.struct_mclpool
// Berkeley packet filter
const (
SizeofBpfVersion = C.sizeof_struct_bpf_version
SizeofBpfStat = C.sizeof_struct_bpf_stat
SizeofBpfProgram = C.sizeof_struct_bpf_program
SizeofBpfInsn = C.sizeof_struct_bpf_insn
SizeofBpfHdr = C.sizeof_struct_bpf_hdr
)
type BpfVersion C.struct_bpf_version
type BpfStat C.struct_bpf_stat
type BpfProgram C.struct_bpf_program
type BpfInsn C.struct_bpf_insn
type BpfHdr C.struct_bpf_hdr
type BpfTimeval C.struct_bpf_timeval
// Terminal handling
type Termios C.struct_termios
type Winsize C.struct_winsize
// fchmodat-like syscalls.
const (
AT_FDCWD = C.AT_FDCWD
AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW
AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW
)
// poll
type PollFd C.struct_pollfd
const (
POLLERR = C.POLLERR
POLLHUP = C.POLLHUP
POLLIN = C.POLLIN
POLLNVAL = C.POLLNVAL
POLLOUT = C.POLLOUT
POLLPRI = C.POLLPRI
POLLRDBAND = C.POLLRDBAND
POLLRDNORM = C.POLLRDNORM
POLLWRBAND = C.POLLWRBAND
POLLWRNORM = C.POLLWRNORM
)
// Signal Sets
type Sigset_t C.sigset_t
// Uname
type Utsname C.struct_utsname
// Uvmexp
const SizeofUvmexp = C.sizeof_struct_uvmexp
type Uvmexp C.struct_uvmexp
// Clockinfo
const SizeofClockinfo = C.sizeof_struct_clockinfo
type Clockinfo C.struct_clockinfo

266
vendor/golang.org/x/sys/unix/types_solaris.go generated vendored Normal file
View File

@ -0,0 +1,266 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
/*
Input to cgo -godefs. See README.md
*/
// +godefs map struct_in_addr [4]byte /* in_addr */
// +godefs map struct_in6_addr [16]byte /* in6_addr */
package unix
/*
#define KERNEL
// These defines ensure that builds done on newer versions of Solaris are
// backwards-compatible with older versions of Solaris and
// OpenSolaris-based derivatives.
#define __USE_SUNOS_SOCKETS__ // msghdr
#define __USE_LEGACY_PROTOTYPES__ // iovec
#include <dirent.h>
#include <fcntl.h>
#include <netdb.h>
#include <limits.h>
#include <poll.h>
#include <signal.h>
#include <termios.h>
#include <termio.h>
#include <stdio.h>
#include <unistd.h>
#include <sys/mman.h>
#include <sys/mount.h>
#include <sys/param.h>
#include <sys/resource.h>
#include <sys/select.h>
#include <sys/signal.h>
#include <sys/socket.h>
#include <sys/stat.h>
#include <sys/statvfs.h>
#include <sys/time.h>
#include <sys/times.h>
#include <sys/types.h>
#include <sys/utsname.h>
#include <sys/un.h>
#include <sys/wait.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_dl.h>
#include <net/route.h>
#include <netinet/in.h>
#include <netinet/icmp6.h>
#include <netinet/tcp.h>
#include <ustat.h>
#include <utime.h>
enum {
sizeofPtr = sizeof(void*),
};
union sockaddr_all {
struct sockaddr s1; // this one gets used for fields
struct sockaddr_in s2; // these pad it out
struct sockaddr_in6 s3;
struct sockaddr_un s4;
struct sockaddr_dl s5;
};
struct sockaddr_any {
struct sockaddr addr;
char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)];
};
*/
import "C"
// Machine characteristics
const (
SizeofPtr = C.sizeofPtr
SizeofShort = C.sizeof_short
SizeofInt = C.sizeof_int
SizeofLong = C.sizeof_long
SizeofLongLong = C.sizeof_longlong
PathMax = C.PATH_MAX
MaxHostNameLen = C.MAXHOSTNAMELEN
)
// Basic types
type (
_C_short C.short
_C_int C.int
_C_long C.long
_C_long_long C.longlong
)
// Time
type Timespec C.struct_timespec
type Timeval C.struct_timeval
type Timeval32 C.struct_timeval32
type Tms C.struct_tms
type Utimbuf C.struct_utimbuf
// Processes
type Rusage C.struct_rusage
type Rlimit C.struct_rlimit
type _Gid_t C.gid_t
// Files
type Stat_t C.struct_stat
type Flock_t C.struct_flock
type Dirent C.struct_dirent
// Filesystems
type _Fsblkcnt_t C.fsblkcnt_t
type Statvfs_t C.struct_statvfs
// Sockets
type RawSockaddrInet4 C.struct_sockaddr_in
type RawSockaddrInet6 C.struct_sockaddr_in6
type RawSockaddrUnix C.struct_sockaddr_un
type RawSockaddrDatalink C.struct_sockaddr_dl
type RawSockaddr C.struct_sockaddr
type RawSockaddrAny C.struct_sockaddr_any
type _Socklen C.socklen_t
type Linger C.struct_linger
type Iovec C.struct_iovec
type IPMreq C.struct_ip_mreq
type IPv6Mreq C.struct_ipv6_mreq
type Msghdr C.struct_msghdr
type Cmsghdr C.struct_cmsghdr
type Inet6Pktinfo C.struct_in6_pktinfo
type IPv6MTUInfo C.struct_ip6_mtuinfo
type ICMPv6Filter C.struct_icmp6_filter
const (
SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in
SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
SizeofSockaddrAny = C.sizeof_struct_sockaddr_any
SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un
SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl
SizeofLinger = C.sizeof_struct_linger
SizeofIPMreq = C.sizeof_struct_ip_mreq
SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
SizeofMsghdr = C.sizeof_struct_msghdr
SizeofCmsghdr = C.sizeof_struct_cmsghdr
SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo
SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
)
// Select
type FdSet C.fd_set
// Misc
type Utsname C.struct_utsname
type Ustat_t C.struct_ustat
const (
AT_FDCWD = C.AT_FDCWD
AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW
AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW
AT_REMOVEDIR = C.AT_REMOVEDIR
AT_EACCESS = C.AT_EACCESS
)
// Routing and interface messages
const (
SizeofIfMsghdr = C.sizeof_struct_if_msghdr
SizeofIfData = C.sizeof_struct_if_data
SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr
SizeofRtMsghdr = C.sizeof_struct_rt_msghdr
SizeofRtMetrics = C.sizeof_struct_rt_metrics
)
type IfMsghdr C.struct_if_msghdr
type IfData C.struct_if_data
type IfaMsghdr C.struct_ifa_msghdr
type RtMsghdr C.struct_rt_msghdr
type RtMetrics C.struct_rt_metrics
// Berkeley packet filter
const (
SizeofBpfVersion = C.sizeof_struct_bpf_version
SizeofBpfStat = C.sizeof_struct_bpf_stat
SizeofBpfProgram = C.sizeof_struct_bpf_program
SizeofBpfInsn = C.sizeof_struct_bpf_insn
SizeofBpfHdr = C.sizeof_struct_bpf_hdr
)
type BpfVersion C.struct_bpf_version
type BpfStat C.struct_bpf_stat
type BpfProgram C.struct_bpf_program
type BpfInsn C.struct_bpf_insn
type BpfTimeval C.struct_bpf_timeval
type BpfHdr C.struct_bpf_hdr
// Terminal handling
type Termios C.struct_termios
type Termio C.struct_termio
type Winsize C.struct_winsize
// poll
type PollFd C.struct_pollfd
const (
POLLERR = C.POLLERR
POLLHUP = C.POLLHUP
POLLIN = C.POLLIN
POLLNVAL = C.POLLNVAL
POLLOUT = C.POLLOUT
POLLPRI = C.POLLPRI
POLLRDBAND = C.POLLRDBAND
POLLRDNORM = C.POLLRDNORM
POLLWRBAND = C.POLLWRBAND
POLLWRNORM = C.POLLWRNORM
)

133
vendor/golang.org/x/text/unicode/bidi/gen.go generated vendored Normal file
View File

@ -0,0 +1,133 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
package main
import (
"flag"
"log"
"golang.org/x/text/internal/gen"
"golang.org/x/text/internal/triegen"
"golang.org/x/text/internal/ucd"
)
var outputFile = flag.String("out", "tables.go", "output file")
func main() {
gen.Init()
gen.Repackage("gen_trieval.go", "trieval.go", "bidi")
gen.Repackage("gen_ranges.go", "ranges_test.go", "bidi")
genTables()
}
// bidiClass names and codes taken from class "bc" in
// https://www.unicode.org/Public/8.0.0/ucd/PropertyValueAliases.txt
var bidiClass = map[string]Class{
"AL": AL, // ArabicLetter
"AN": AN, // ArabicNumber
"B": B, // ParagraphSeparator
"BN": BN, // BoundaryNeutral
"CS": CS, // CommonSeparator
"EN": EN, // EuropeanNumber
"ES": ES, // EuropeanSeparator
"ET": ET, // EuropeanTerminator
"L": L, // LeftToRight
"NSM": NSM, // NonspacingMark
"ON": ON, // OtherNeutral
"R": R, // RightToLeft
"S": S, // SegmentSeparator
"WS": WS, // WhiteSpace
"FSI": Control,
"PDF": Control,
"PDI": Control,
"LRE": Control,
"LRI": Control,
"LRO": Control,
"RLE": Control,
"RLI": Control,
"RLO": Control,
}
func genTables() {
if numClass > 0x0F {
log.Fatalf("Too many Class constants (%#x > 0x0F).", numClass)
}
w := gen.NewCodeWriter()
defer w.WriteVersionedGoFile(*outputFile, "bidi")
gen.WriteUnicodeVersion(w)
t := triegen.NewTrie("bidi")
// Build data about bracket mapping. These bits need to be or-ed with
// any other bits.
orMask := map[rune]uint64{}
xorMap := map[rune]int{}
xorMasks := []rune{0} // First value is no-op.
ucd.Parse(gen.OpenUCDFile("BidiBrackets.txt"), func(p *ucd.Parser) {
r1 := p.Rune(0)
r2 := p.Rune(1)
xor := r1 ^ r2
if _, ok := xorMap[xor]; !ok {
xorMap[xor] = len(xorMasks)
xorMasks = append(xorMasks, xor)
}
entry := uint64(xorMap[xor]) << xorMaskShift
switch p.String(2) {
case "o":
entry |= openMask
case "c", "n":
default:
log.Fatalf("Unknown bracket class %q.", p.String(2))
}
orMask[r1] = entry
})
w.WriteComment(`
xorMasks contains masks to be xor-ed with brackets to get the reverse
version.`)
w.WriteVar("xorMasks", xorMasks)
done := map[rune]bool{}
insert := func(r rune, c Class) {
if !done[r] {
t.Insert(r, orMask[r]|uint64(c))
done[r] = true
}
}
// Insert the derived BiDi properties.
ucd.Parse(gen.OpenUCDFile("extracted/DerivedBidiClass.txt"), func(p *ucd.Parser) {
r := p.Rune(0)
class, ok := bidiClass[p.String(1)]
if !ok {
log.Fatalf("%U: Unknown BiDi class %q", r, p.String(1))
}
insert(r, class)
})
visitDefaults(insert)
// TODO: use sparse blocks. This would reduce table size considerably
// from the looks of it.
sz, err := t.Gen(w)
if err != nil {
log.Fatal(err)
}
w.Size += sz
}
// dummy values to make methods in gen_common compile. The real versions
// will be generated by this file to tables.go.
var (
xorMasks []rune
)

57
vendor/golang.org/x/text/unicode/bidi/gen_ranges.go generated vendored Normal file
View File

@ -0,0 +1,57 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
package main
import (
"unicode"
"golang.org/x/text/internal/gen"
"golang.org/x/text/internal/ucd"
"golang.org/x/text/unicode/rangetable"
)
// These tables are hand-extracted from:
// https://www.unicode.org/Public/8.0.0/ucd/extracted/DerivedBidiClass.txt
func visitDefaults(fn func(r rune, c Class)) {
// first write default values for ranges listed above.
visitRunes(fn, AL, []rune{
0x0600, 0x07BF, // Arabic
0x08A0, 0x08FF, // Arabic Extended-A
0xFB50, 0xFDCF, // Arabic Presentation Forms
0xFDF0, 0xFDFF,
0xFE70, 0xFEFF,
0x0001EE00, 0x0001EEFF, // Arabic Mathematical Alpha Symbols
})
visitRunes(fn, R, []rune{
0x0590, 0x05FF, // Hebrew
0x07C0, 0x089F, // Nko et al.
0xFB1D, 0xFB4F,
0x00010800, 0x00010FFF, // Cypriot Syllabary et. al.
0x0001E800, 0x0001EDFF,
0x0001EF00, 0x0001EFFF,
})
visitRunes(fn, ET, []rune{ // European Terminator
0x20A0, 0x20Cf, // Currency symbols
})
rangetable.Visit(unicode.Noncharacter_Code_Point, func(r rune) {
fn(r, BN) // Boundary Neutral
})
ucd.Parse(gen.OpenUCDFile("DerivedCoreProperties.txt"), func(p *ucd.Parser) {
if p.String(1) == "Default_Ignorable_Code_Point" {
fn(p.Rune(0), BN) // Boundary Neutral
}
})
}
func visitRunes(fn func(r rune, c Class), c Class, runes []rune) {
for i := 0; i < len(runes); i += 2 {
lo, hi := runes[i], runes[i+1]
for j := lo; j <= hi; j++ {
fn(j, c)
}
}
}

64
vendor/golang.org/x/text/unicode/bidi/gen_trieval.go generated vendored Normal file
View File

@ -0,0 +1,64 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
package main
// Class is the Unicode BiDi class. Each rune has a single class.
type Class uint
const (
L Class = iota // LeftToRight
R // RightToLeft
EN // EuropeanNumber
ES // EuropeanSeparator
ET // EuropeanTerminator
AN // ArabicNumber
CS // CommonSeparator
B // ParagraphSeparator
S // SegmentSeparator
WS // WhiteSpace
ON // OtherNeutral
BN // BoundaryNeutral
NSM // NonspacingMark
AL // ArabicLetter
Control // Control LRO - PDI
numClass
LRO // LeftToRightOverride
RLO // RightToLeftOverride
LRE // LeftToRightEmbedding
RLE // RightToLeftEmbedding
PDF // PopDirectionalFormat
LRI // LeftToRightIsolate
RLI // RightToLeftIsolate
FSI // FirstStrongIsolate
PDI // PopDirectionalIsolate
unknownClass = ^Class(0)
)
var controlToClass = map[rune]Class{
0x202D: LRO, // LeftToRightOverride,
0x202E: RLO, // RightToLeftOverride,
0x202A: LRE, // LeftToRightEmbedding,
0x202B: RLE, // RightToLeftEmbedding,
0x202C: PDF, // PopDirectionalFormat,
0x2066: LRI, // LeftToRightIsolate,
0x2067: RLI, // RightToLeftIsolate,
0x2068: FSI, // FirstStrongIsolate,
0x2069: PDI, // PopDirectionalIsolate,
}
// A trie entry has the following bits:
// 7..5 XOR mask for brackets
// 4 1: Bracket open, 0: Bracket close
// 3..0 Class type
const (
openMask = 0x10
xorMaskShift = 5
)

986
vendor/golang.org/x/text/unicode/norm/maketables.go generated vendored Normal file
View File

@ -0,0 +1,986 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// Normalization table generator.
// Data read from the web.
// See forminfo.go for a description of the trie values associated with each rune.
package main
import (
"bytes"
"encoding/binary"
"flag"
"fmt"
"io"
"log"
"sort"
"strconv"
"strings"
"golang.org/x/text/internal/gen"
"golang.org/x/text/internal/triegen"
"golang.org/x/text/internal/ucd"
)
func main() {
gen.Init()
loadUnicodeData()
compactCCC()
loadCompositionExclusions()
completeCharFields(FCanonical)
completeCharFields(FCompatibility)
computeNonStarterCounts()
verifyComputed()
printChars()
testDerived()
printTestdata()
makeTables()
}
var (
tablelist = flag.String("tables",
"all",
"comma-separated list of which tables to generate; "+
"can be 'decomp', 'recomp', 'info' and 'all'")
test = flag.Bool("test",
false,
"test existing tables against DerivedNormalizationProps and generate test data for regression testing")
verbose = flag.Bool("verbose",
false,
"write data to stdout as it is parsed")
)
const MaxChar = 0x10FFFF // anything above this shouldn't exist
// Quick Check properties of runes allow us to quickly
// determine whether a rune may occur in a normal form.
// For a given normal form, a rune may be guaranteed to occur
// verbatim (QC=Yes), may or may not combine with another
// rune (QC=Maybe), or may not occur (QC=No).
type QCResult int
const (
QCUnknown QCResult = iota
QCYes
QCNo
QCMaybe
)
func (r QCResult) String() string {
switch r {
case QCYes:
return "Yes"
case QCNo:
return "No"
case QCMaybe:
return "Maybe"
}
return "***UNKNOWN***"
}
const (
FCanonical = iota // NFC or NFD
FCompatibility // NFKC or NFKD
FNumberOfFormTypes
)
const (
MComposed = iota // NFC or NFKC
MDecomposed // NFD or NFKD
MNumberOfModes
)
// This contains only the properties we're interested in.
type Char struct {
name string
codePoint rune // if zero, this index is not a valid code point.
ccc uint8 // canonical combining class
origCCC uint8
excludeInComp bool // from CompositionExclusions.txt
compatDecomp bool // it has a compatibility expansion
nTrailingNonStarters uint8
nLeadingNonStarters uint8 // must be equal to trailing if non-zero
forms [FNumberOfFormTypes]FormInfo // For FCanonical and FCompatibility
state State
}
var chars = make([]Char, MaxChar+1)
var cccMap = make(map[uint8]uint8)
func (c Char) String() string {
buf := new(bytes.Buffer)
fmt.Fprintf(buf, "%U [%s]:\n", c.codePoint, c.name)
fmt.Fprintf(buf, " ccc: %v\n", c.ccc)
fmt.Fprintf(buf, " excludeInComp: %v\n", c.excludeInComp)
fmt.Fprintf(buf, " compatDecomp: %v\n", c.compatDecomp)
fmt.Fprintf(buf, " state: %v\n", c.state)
fmt.Fprintf(buf, " NFC:\n")
fmt.Fprint(buf, c.forms[FCanonical])
fmt.Fprintf(buf, " NFKC:\n")
fmt.Fprint(buf, c.forms[FCompatibility])
return buf.String()
}
// In UnicodeData.txt, some ranges are marked like this:
// 3400;<CJK Ideograph Extension A, First>;Lo;0;L;;;;;N;;;;;
// 4DB5;<CJK Ideograph Extension A, Last>;Lo;0;L;;;;;N;;;;;
// parseCharacter keeps a state variable indicating the weirdness.
type State int
const (
SNormal State = iota // known to be zero for the type
SFirst
SLast
SMissing
)
var lastChar = rune('\u0000')
func (c Char) isValid() bool {
return c.codePoint != 0 && c.state != SMissing
}
type FormInfo struct {
quickCheck [MNumberOfModes]QCResult // index: MComposed or MDecomposed
verified [MNumberOfModes]bool // index: MComposed or MDecomposed
combinesForward bool // May combine with rune on the right
combinesBackward bool // May combine with rune on the left
isOneWay bool // Never appears in result
inDecomp bool // Some decompositions result in this char.
decomp Decomposition
expandedDecomp Decomposition
}
func (f FormInfo) String() string {
buf := bytes.NewBuffer(make([]byte, 0))
fmt.Fprintf(buf, " quickCheck[C]: %v\n", f.quickCheck[MComposed])
fmt.Fprintf(buf, " quickCheck[D]: %v\n", f.quickCheck[MDecomposed])
fmt.Fprintf(buf, " cmbForward: %v\n", f.combinesForward)
fmt.Fprintf(buf, " cmbBackward: %v\n", f.combinesBackward)
fmt.Fprintf(buf, " isOneWay: %v\n", f.isOneWay)
fmt.Fprintf(buf, " inDecomp: %v\n", f.inDecomp)
fmt.Fprintf(buf, " decomposition: %X\n", f.decomp)
fmt.Fprintf(buf, " expandedDecomp: %X\n", f.expandedDecomp)
return buf.String()
}
type Decomposition []rune
func parseDecomposition(s string, skipfirst bool) (a []rune, err error) {
decomp := strings.Split(s, " ")
if len(decomp) > 0 && skipfirst {
decomp = decomp[1:]
}
for _, d := range decomp {
point, err := strconv.ParseUint(d, 16, 64)
if err != nil {
return a, err
}
a = append(a, rune(point))
}
return a, nil
}
func loadUnicodeData() {
f := gen.OpenUCDFile("UnicodeData.txt")
defer f.Close()
p := ucd.New(f)
for p.Next() {
r := p.Rune(ucd.CodePoint)
char := &chars[r]
char.ccc = uint8(p.Uint(ucd.CanonicalCombiningClass))
decmap := p.String(ucd.DecompMapping)
exp, err := parseDecomposition(decmap, false)
isCompat := false
if err != nil {
if len(decmap) > 0 {
exp, err = parseDecomposition(decmap, true)
if err != nil {
log.Fatalf(`%U: bad decomp |%v|: "%s"`, r, decmap, err)
}
isCompat = true
}
}
char.name = p.String(ucd.Name)
char.codePoint = r
char.forms[FCompatibility].decomp = exp
if !isCompat {
char.forms[FCanonical].decomp = exp
} else {
char.compatDecomp = true
}
if len(decmap) > 0 {
char.forms[FCompatibility].decomp = exp
}
}
if err := p.Err(); err != nil {
log.Fatal(err)
}
}
// compactCCC converts the sparse set of CCC values to a continguous one,
// reducing the number of bits needed from 8 to 6.
func compactCCC() {
m := make(map[uint8]uint8)
for i := range chars {
c := &chars[i]
m[c.ccc] = 0
}
cccs := []int{}
for v, _ := range m {
cccs = append(cccs, int(v))
}
sort.Ints(cccs)
for i, c := range cccs {
cccMap[uint8(i)] = uint8(c)
m[uint8(c)] = uint8(i)
}
for i := range chars {
c := &chars[i]
c.origCCC = c.ccc
c.ccc = m[c.ccc]
}
if len(m) >= 1<<6 {
log.Fatalf("too many difference CCC values: %d >= 64", len(m))
}
}
// CompositionExclusions.txt has form:
// 0958 # ...
// See https://unicode.org/reports/tr44/ for full explanation
func loadCompositionExclusions() {
f := gen.OpenUCDFile("CompositionExclusions.txt")
defer f.Close()
p := ucd.New(f)
for p.Next() {
c := &chars[p.Rune(0)]
if c.excludeInComp {
log.Fatalf("%U: Duplicate entry in exclusions.", c.codePoint)
}
c.excludeInComp = true
}
if e := p.Err(); e != nil {
log.Fatal(e)
}
}
// hasCompatDecomp returns true if any of the recursive
// decompositions contains a compatibility expansion.
// In this case, the character may not occur in NFK*.
func hasCompatDecomp(r rune) bool {
c := &chars[r]
if c.compatDecomp {
return true
}
for _, d := range c.forms[FCompatibility].decomp {
if hasCompatDecomp(d) {
return true
}
}
return false
}
// Hangul related constants.
const (
HangulBase = 0xAC00
HangulEnd = 0xD7A4 // hangulBase + Jamo combinations (19 * 21 * 28)
JamoLBase = 0x1100
JamoLEnd = 0x1113
JamoVBase = 0x1161
JamoVEnd = 0x1176
JamoTBase = 0x11A8
JamoTEnd = 0x11C3
JamoLVTCount = 19 * 21 * 28
JamoTCount = 28
)
func isHangul(r rune) bool {
return HangulBase <= r && r < HangulEnd
}
func isHangulWithoutJamoT(r rune) bool {
if !isHangul(r) {
return false
}
r -= HangulBase
return r < JamoLVTCount && r%JamoTCount == 0
}
func ccc(r rune) uint8 {
return chars[r].ccc
}
// Insert a rune in a buffer, ordered by Canonical Combining Class.
func insertOrdered(b Decomposition, r rune) Decomposition {
n := len(b)
b = append(b, 0)
cc := ccc(r)
if cc > 0 {
// Use bubble sort.
for ; n > 0; n-- {
if ccc(b[n-1]) <= cc {
break
}
b[n] = b[n-1]
}
}
b[n] = r
return b
}
// Recursively decompose.
func decomposeRecursive(form int, r rune, d Decomposition) Decomposition {
dcomp := chars[r].forms[form].decomp
if len(dcomp) == 0 {
return insertOrdered(d, r)
}
for _, c := range dcomp {
d = decomposeRecursive(form, c, d)
}
return d
}
func completeCharFields(form int) {
// Phase 0: pre-expand decomposition.
for i := range chars {
f := &chars[i].forms[form]
if len(f.decomp) == 0 {
continue
}
exp := make(Decomposition, 0)
for _, c := range f.decomp {
exp = decomposeRecursive(form, c, exp)
}
f.expandedDecomp = exp
}
// Phase 1: composition exclusion, mark decomposition.
for i := range chars {
c := &chars[i]
f := &c.forms[form]
// Marks script-specific exclusions and version restricted.
f.isOneWay = c.excludeInComp
// Singletons
f.isOneWay = f.isOneWay || len(f.decomp) == 1
// Non-starter decompositions
if len(f.decomp) > 1 {
chk := c.ccc != 0 || chars[f.decomp[0]].ccc != 0
f.isOneWay = f.isOneWay || chk
}
// Runes that decompose into more than two runes.
f.isOneWay = f.isOneWay || len(f.decomp) > 2
if form == FCompatibility {
f.isOneWay = f.isOneWay || hasCompatDecomp(c.codePoint)
}
for _, r := range f.decomp {
chars[r].forms[form].inDecomp = true
}
}
// Phase 2: forward and backward combining.
for i := range chars {
c := &chars[i]
f := &c.forms[form]
if !f.isOneWay && len(f.decomp) == 2 {
f0 := &chars[f.decomp[0]].forms[form]
f1 := &chars[f.decomp[1]].forms[form]
if !f0.isOneWay {
f0.combinesForward = true
}
if !f1.isOneWay {
f1.combinesBackward = true
}
}
if isHangulWithoutJamoT(rune(i)) {
f.combinesForward = true
}
}
// Phase 3: quick check values.
for i := range chars {
c := &chars[i]
f := &c.forms[form]
switch {
case len(f.decomp) > 0:
f.quickCheck[MDecomposed] = QCNo
case isHangul(rune(i)):
f.quickCheck[MDecomposed] = QCNo
default:
f.quickCheck[MDecomposed] = QCYes
}
switch {
case f.isOneWay:
f.quickCheck[MComposed] = QCNo
case (i & 0xffff00) == JamoLBase:
f.quickCheck[MComposed] = QCYes
if JamoLBase <= i && i < JamoLEnd {
f.combinesForward = true
}
if JamoVBase <= i && i < JamoVEnd {
f.quickCheck[MComposed] = QCMaybe
f.combinesBackward = true
f.combinesForward = true
}
if JamoTBase <= i && i < JamoTEnd {
f.quickCheck[MComposed] = QCMaybe
f.combinesBackward = true
}
case !f.combinesBackward:
f.quickCheck[MComposed] = QCYes
default:
f.quickCheck[MComposed] = QCMaybe
}
}
}
func computeNonStarterCounts() {
// Phase 4: leading and trailing non-starter count
for i := range chars {
c := &chars[i]
runes := []rune{rune(i)}
// We always use FCompatibility so that the CGJ insertion points do not
// change for repeated normalizations with different forms.
if exp := c.forms[FCompatibility].expandedDecomp; len(exp) > 0 {
runes = exp
}
// We consider runes that combine backwards to be non-starters for the
// purpose of Stream-Safe Text Processing.
for _, r := range runes {
if cr := &chars[r]; cr.ccc == 0 && !cr.forms[FCompatibility].combinesBackward {
break
}
c.nLeadingNonStarters++
}
for i := len(runes) - 1; i >= 0; i-- {
if cr := &chars[runes[i]]; cr.ccc == 0 && !cr.forms[FCompatibility].combinesBackward {
break
}
c.nTrailingNonStarters++
}
if c.nTrailingNonStarters > 3 {
log.Fatalf("%U: Decomposition with more than 3 (%d) trailing modifiers (%U)", i, c.nTrailingNonStarters, runes)
}
if isHangul(rune(i)) {
c.nTrailingNonStarters = 2
if isHangulWithoutJamoT(rune(i)) {
c.nTrailingNonStarters = 1
}
}
if l, t := c.nLeadingNonStarters, c.nTrailingNonStarters; l > 0 && l != t {
log.Fatalf("%U: number of leading and trailing non-starters should be equal (%d vs %d)", i, l, t)
}
if t := c.nTrailingNonStarters; t > 3 {
log.Fatalf("%U: number of trailing non-starters is %d > 3", t)
}
}
}
func printBytes(w io.Writer, b []byte, name string) {
fmt.Fprintf(w, "// %s: %d bytes\n", name, len(b))
fmt.Fprintf(w, "var %s = [...]byte {", name)
for i, c := range b {
switch {
case i%64 == 0:
fmt.Fprintf(w, "\n// Bytes %x - %x\n", i, i+63)
case i%8 == 0:
fmt.Fprintf(w, "\n")
}
fmt.Fprintf(w, "0x%.2X, ", c)
}
fmt.Fprint(w, "\n}\n\n")
}
// See forminfo.go for format.
func makeEntry(f *FormInfo, c *Char) uint16 {
e := uint16(0)
if r := c.codePoint; HangulBase <= r && r < HangulEnd {
e |= 0x40
}
if f.combinesForward {
e |= 0x20
}
if f.quickCheck[MDecomposed] == QCNo {
e |= 0x4
}
switch f.quickCheck[MComposed] {
case QCYes:
case QCNo:
e |= 0x10
case QCMaybe:
e |= 0x18
default:
log.Fatalf("Illegal quickcheck value %v.", f.quickCheck[MComposed])
}
e |= uint16(c.nTrailingNonStarters)
return e
}
// decompSet keeps track of unique decompositions, grouped by whether
// the decomposition is followed by a trailing and/or leading CCC.
type decompSet [7]map[string]bool
const (
normalDecomp = iota
firstMulti
firstCCC
endMulti
firstLeadingCCC
firstCCCZeroExcept
firstStarterWithNLead
lastDecomp
)
var cname = []string{"firstMulti", "firstCCC", "endMulti", "firstLeadingCCC", "firstCCCZeroExcept", "firstStarterWithNLead", "lastDecomp"}
func makeDecompSet() decompSet {
m := decompSet{}
for i := range m {
m[i] = make(map[string]bool)
}
return m
}
func (m *decompSet) insert(key int, s string) {
m[key][s] = true
}
func printCharInfoTables(w io.Writer) int {
mkstr := func(r rune, f *FormInfo) (int, string) {
d := f.expandedDecomp
s := string([]rune(d))
if max := 1 << 6; len(s) >= max {
const msg = "%U: too many bytes in decomposition: %d >= %d"
log.Fatalf(msg, r, len(s), max)
}
head := uint8(len(s))
if f.quickCheck[MComposed] != QCYes {
head |= 0x40
}
if f.combinesForward {
head |= 0x80
}
s = string([]byte{head}) + s
lccc := ccc(d[0])
tccc := ccc(d[len(d)-1])
cc := ccc(r)
if cc != 0 && lccc == 0 && tccc == 0 {
log.Fatalf("%U: trailing and leading ccc are 0 for non-zero ccc %d", r, cc)
}
if tccc < lccc && lccc != 0 {
const msg = "%U: lccc (%d) must be <= tcc (%d)"
log.Fatalf(msg, r, lccc, tccc)
}
index := normalDecomp
nTrail := chars[r].nTrailingNonStarters
nLead := chars[r].nLeadingNonStarters
if tccc > 0 || lccc > 0 || nTrail > 0 {
tccc <<= 2
tccc |= nTrail
s += string([]byte{tccc})
index = endMulti
for _, r := range d[1:] {
if ccc(r) == 0 {
index = firstCCC
}
}
if lccc > 0 || nLead > 0 {
s += string([]byte{lccc})
if index == firstCCC {
log.Fatalf("%U: multi-segment decomposition not supported for decompositions with leading CCC != 0", r)
}
index = firstLeadingCCC
}
if cc != lccc {
if cc != 0 {
log.Fatalf("%U: for lccc != ccc, expected ccc to be 0; was %d", r, cc)
}
index = firstCCCZeroExcept
}
} else if len(d) > 1 {
index = firstMulti
}
return index, s
}
decompSet := makeDecompSet()
const nLeadStr = "\x00\x01" // 0-byte length and tccc with nTrail.
decompSet.insert(firstStarterWithNLead, nLeadStr)
// Store the uniqued decompositions in a byte buffer,
// preceded by their byte length.
for _, c := range chars {
for _, f := range c.forms {
if len(f.expandedDecomp) == 0 {
continue
}
if f.combinesBackward {
log.Fatalf("%U: combinesBackward and decompose", c.codePoint)
}
index, s := mkstr(c.codePoint, &f)
decompSet.insert(index, s)
}
}
decompositions := bytes.NewBuffer(make([]byte, 0, 10000))
size := 0
positionMap := make(map[string]uint16)
decompositions.WriteString("\000")
fmt.Fprintln(w, "const (")
for i, m := range decompSet {
sa := []string{}
for s := range m {
sa = append(sa, s)
}
sort.Strings(sa)
for _, s := range sa {
p := decompositions.Len()
decompositions.WriteString(s)
positionMap[s] = uint16(p)
}
if cname[i] != "" {
fmt.Fprintf(w, "%s = 0x%X\n", cname[i], decompositions.Len())
}
}
fmt.Fprintln(w, "maxDecomp = 0x8000")
fmt.Fprintln(w, ")")
b := decompositions.Bytes()
printBytes(w, b, "decomps")
size += len(b)
varnames := []string{"nfc", "nfkc"}
for i := 0; i < FNumberOfFormTypes; i++ {
trie := triegen.NewTrie(varnames[i])
for r, c := range chars {
f := c.forms[i]
d := f.expandedDecomp
if len(d) != 0 {
_, key := mkstr(c.codePoint, &f)
trie.Insert(rune(r), uint64(positionMap[key]))
if c.ccc != ccc(d[0]) {
// We assume the lead ccc of a decomposition !=0 in this case.
if ccc(d[0]) == 0 {
log.Fatalf("Expected leading CCC to be non-zero; ccc is %d", c.ccc)
}
}
} else if c.nLeadingNonStarters > 0 && len(f.expandedDecomp) == 0 && c.ccc == 0 && !f.combinesBackward {
// Handle cases where it can't be detected that the nLead should be equal
// to nTrail.
trie.Insert(c.codePoint, uint64(positionMap[nLeadStr]))
} else if v := makeEntry(&f, &c)<<8 | uint16(c.ccc); v != 0 {
trie.Insert(c.codePoint, uint64(0x8000|v))
}
}
sz, err := trie.Gen(w, triegen.Compact(&normCompacter{name: varnames[i]}))
if err != nil {
log.Fatal(err)
}
size += sz
}
return size
}
func contains(sa []string, s string) bool {
for _, a := range sa {
if a == s {
return true
}
}
return false
}
func makeTables() {
w := &bytes.Buffer{}
size := 0
if *tablelist == "" {
return
}
list := strings.Split(*tablelist, ",")
if *tablelist == "all" {
list = []string{"recomp", "info"}
}
// Compute maximum decomposition size.
max := 0
for _, c := range chars {
if n := len(string(c.forms[FCompatibility].expandedDecomp)); n > max {
max = n
}
}
fmt.Fprintln(w, `import "sync"`)
fmt.Fprintln(w)
fmt.Fprintln(w, "const (")
fmt.Fprintln(w, "\t// Version is the Unicode edition from which the tables are derived.")
fmt.Fprintf(w, "\tVersion = %q\n", gen.UnicodeVersion())
fmt.Fprintln(w)
fmt.Fprintln(w, "\t// MaxTransformChunkSize indicates the maximum number of bytes that Transform")
fmt.Fprintln(w, "\t// may need to write atomically for any Form. Making a destination buffer at")
fmt.Fprintln(w, "\t// least this size ensures that Transform can always make progress and that")
fmt.Fprintln(w, "\t// the user does not need to grow the buffer on an ErrShortDst.")
fmt.Fprintf(w, "\tMaxTransformChunkSize = %d+maxNonStarters*4\n", len(string(0x034F))+max)
fmt.Fprintln(w, ")\n")
// Print the CCC remap table.
size += len(cccMap)
fmt.Fprintf(w, "var ccc = [%d]uint8{", len(cccMap))
for i := 0; i < len(cccMap); i++ {
if i%8 == 0 {
fmt.Fprintln(w)
}
fmt.Fprintf(w, "%3d, ", cccMap[uint8(i)])
}
fmt.Fprintln(w, "\n}\n")
if contains(list, "info") {
size += printCharInfoTables(w)
}
if contains(list, "recomp") {
// Note that we use 32 bit keys, instead of 64 bit.
// This clips the bits of three entries, but we know
// this won't cause a collision. The compiler will catch
// any changes made to UnicodeData.txt that introduces
// a collision.
// Note that the recomposition map for NFC and NFKC
// are identical.
// Recomposition map
nrentries := 0
for _, c := range chars {
f := c.forms[FCanonical]
if !f.isOneWay && len(f.decomp) > 0 {
nrentries++
}
}
sz := nrentries * 8
size += sz
fmt.Fprintf(w, "// recompMap: %d bytes (entries only)\n", sz)
fmt.Fprintln(w, "var recompMap map[uint32]rune")
fmt.Fprintln(w, "var recompMapOnce sync.Once\n")
fmt.Fprintln(w, `const recompMapPacked = "" +`)
var buf [8]byte
for i, c := range chars {
f := c.forms[FCanonical]
d := f.decomp
if !f.isOneWay && len(d) > 0 {
key := uint32(uint16(d[0]))<<16 + uint32(uint16(d[1]))
binary.BigEndian.PutUint32(buf[:4], key)
binary.BigEndian.PutUint32(buf[4:], uint32(i))
fmt.Fprintf(w, "\t\t%q + // 0x%.8X: 0x%.8X\n", string(buf[:]), key, uint32(i))
}
}
// hack so we don't have to special case the trailing plus sign
fmt.Fprintf(w, ` ""`)
fmt.Fprintln(w)
}
fmt.Fprintf(w, "// Total size of tables: %dKB (%d bytes)\n", (size+512)/1024, size)
gen.WriteVersionedGoFile("tables.go", "norm", w.Bytes())
}
func printChars() {
if *verbose {
for _, c := range chars {
if !c.isValid() || c.state == SMissing {
continue
}
fmt.Println(c)
}
}
}
// verifyComputed does various consistency tests.
func verifyComputed() {
for i, c := range chars {
for _, f := range c.forms {
isNo := (f.quickCheck[MDecomposed] == QCNo)
if (len(f.decomp) > 0) != isNo && !isHangul(rune(i)) {
log.Fatalf("%U: NF*D QC must be No if rune decomposes", i)
}
isMaybe := f.quickCheck[MComposed] == QCMaybe
if f.combinesBackward != isMaybe {
log.Fatalf("%U: NF*C QC must be Maybe if combinesBackward", i)
}
if len(f.decomp) > 0 && f.combinesForward && isMaybe {
log.Fatalf("%U: NF*C QC must be Yes or No if combinesForward and decomposes", i)
}
if len(f.expandedDecomp) != 0 {
continue
}
if a, b := c.nLeadingNonStarters > 0, (c.ccc > 0 || f.combinesBackward); a != b {
// We accept these runes to be treated differently (it only affects
// segment breaking in iteration, most likely on improper use), but
// reconsider if more characters are added.
// U+FF9E HALFWIDTH KATAKANA VOICED SOUND MARK;Lm;0;L;<narrow> 3099;;;;N;;;;;
// U+FF9F HALFWIDTH KATAKANA SEMI-VOICED SOUND MARK;Lm;0;L;<narrow> 309A;;;;N;;;;;
// U+3133 HANGUL LETTER KIYEOK-SIOS;Lo;0;L;<compat> 11AA;;;;N;HANGUL LETTER GIYEOG SIOS;;;;
// U+318E HANGUL LETTER ARAEAE;Lo;0;L;<compat> 11A1;;;;N;HANGUL LETTER ALAE AE;;;;
// U+FFA3 HALFWIDTH HANGUL LETTER KIYEOK-SIOS;Lo;0;L;<narrow> 3133;;;;N;HALFWIDTH HANGUL LETTER GIYEOG SIOS;;;;
// U+FFDC HALFWIDTH HANGUL LETTER I;Lo;0;L;<narrow> 3163;;;;N;;;;;
if i != 0xFF9E && i != 0xFF9F && !(0x3133 <= i && i <= 0x318E) && !(0xFFA3 <= i && i <= 0xFFDC) {
log.Fatalf("%U: nLead was %v; want %v", i, a, b)
}
}
}
nfc := c.forms[FCanonical]
nfkc := c.forms[FCompatibility]
if nfc.combinesBackward != nfkc.combinesBackward {
log.Fatalf("%U: Cannot combine combinesBackward\n", c.codePoint)
}
}
}
// Use values in DerivedNormalizationProps.txt to compare against the
// values we computed.
// DerivedNormalizationProps.txt has form:
// 00C0..00C5 ; NFD_QC; N # ...
// 0374 ; NFD_QC; N # ...
// See https://unicode.org/reports/tr44/ for full explanation
func testDerived() {
f := gen.OpenUCDFile("DerivedNormalizationProps.txt")
defer f.Close()
p := ucd.New(f)
for p.Next() {
r := p.Rune(0)
c := &chars[r]
var ftype, mode int
qt := p.String(1)
switch qt {
case "NFC_QC":
ftype, mode = FCanonical, MComposed
case "NFD_QC":
ftype, mode = FCanonical, MDecomposed
case "NFKC_QC":
ftype, mode = FCompatibility, MComposed
case "NFKD_QC":
ftype, mode = FCompatibility, MDecomposed
default:
continue
}
var qr QCResult
switch p.String(2) {
case "Y":
qr = QCYes
case "N":
qr = QCNo
case "M":
qr = QCMaybe
default:
log.Fatalf(`Unexpected quick check value "%s"`, p.String(2))
}
if got := c.forms[ftype].quickCheck[mode]; got != qr {
log.Printf("%U: FAILED %s (was %v need %v)\n", r, qt, got, qr)
}
c.forms[ftype].verified[mode] = true
}
if err := p.Err(); err != nil {
log.Fatal(err)
}
// Any unspecified value must be QCYes. Verify this.
for i, c := range chars {
for j, fd := range c.forms {
for k, qr := range fd.quickCheck {
if !fd.verified[k] && qr != QCYes {
m := "%U: FAIL F:%d M:%d (was %v need Yes) %s\n"
log.Printf(m, i, j, k, qr, c.name)
}
}
}
}
}
var testHeader = `const (
Yes = iota
No
Maybe
)
type formData struct {
qc uint8
combinesForward bool
decomposition string
}
type runeData struct {
r rune
ccc uint8
nLead uint8
nTrail uint8
f [2]formData // 0: canonical; 1: compatibility
}
func f(qc uint8, cf bool, dec string) [2]formData {
return [2]formData{{qc, cf, dec}, {qc, cf, dec}}
}
func g(qc, qck uint8, cf, cfk bool, d, dk string) [2]formData {
return [2]formData{{qc, cf, d}, {qck, cfk, dk}}
}
var testData = []runeData{
`
func printTestdata() {
type lastInfo struct {
ccc uint8
nLead uint8
nTrail uint8
f string
}
last := lastInfo{}
w := &bytes.Buffer{}
fmt.Fprintf(w, testHeader)
for r, c := range chars {
f := c.forms[FCanonical]
qc, cf, d := f.quickCheck[MComposed], f.combinesForward, string(f.expandedDecomp)
f = c.forms[FCompatibility]
qck, cfk, dk := f.quickCheck[MComposed], f.combinesForward, string(f.expandedDecomp)
s := ""
if d == dk && qc == qck && cf == cfk {
s = fmt.Sprintf("f(%s, %v, %q)", qc, cf, d)
} else {
s = fmt.Sprintf("g(%s, %s, %v, %v, %q, %q)", qc, qck, cf, cfk, d, dk)
}
current := lastInfo{c.ccc, c.nLeadingNonStarters, c.nTrailingNonStarters, s}
if last != current {
fmt.Fprintf(w, "\t{0x%x, %d, %d, %d, %s},\n", r, c.origCCC, c.nLeadingNonStarters, c.nTrailingNonStarters, s)
last = current
}
}
fmt.Fprintln(w, "}")
gen.WriteVersionedGoFile("data_test.go", "norm", w.Bytes())
}

117
vendor/golang.org/x/text/unicode/norm/triegen.go generated vendored Normal file
View File

@ -0,0 +1,117 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// Trie table generator.
// Used by make*tables tools to generate a go file with trie data structures
// for mapping UTF-8 to a 16-bit value. All but the last byte in a UTF-8 byte
// sequence are used to lookup offsets in the index table to be used for the
// next byte. The last byte is used to index into a table with 16-bit values.
package main
import (
"fmt"
"io"
)
const maxSparseEntries = 16
type normCompacter struct {
sparseBlocks [][]uint64
sparseOffset []uint16
sparseCount int
name string
}
func mostFrequentStride(a []uint64) int {
counts := make(map[int]int)
var v int
for _, x := range a {
if stride := int(x) - v; v != 0 && stride >= 0 {
counts[stride]++
}
v = int(x)
}
var maxs, maxc int
for stride, cnt := range counts {
if cnt > maxc || (cnt == maxc && stride < maxs) {
maxs, maxc = stride, cnt
}
}
return maxs
}
func countSparseEntries(a []uint64) int {
stride := mostFrequentStride(a)
var v, count int
for _, tv := range a {
if int(tv)-v != stride {
if tv != 0 {
count++
}
}
v = int(tv)
}
return count
}
func (c *normCompacter) Size(v []uint64) (sz int, ok bool) {
if n := countSparseEntries(v); n <= maxSparseEntries {
return (n+1)*4 + 2, true
}
return 0, false
}
func (c *normCompacter) Store(v []uint64) uint32 {
h := uint32(len(c.sparseOffset))
c.sparseBlocks = append(c.sparseBlocks, v)
c.sparseOffset = append(c.sparseOffset, uint16(c.sparseCount))
c.sparseCount += countSparseEntries(v) + 1
return h
}
func (c *normCompacter) Handler() string {
return c.name + "Sparse.lookup"
}
func (c *normCompacter) Print(w io.Writer) (retErr error) {
p := func(f string, x ...interface{}) {
if _, err := fmt.Fprintf(w, f, x...); retErr == nil && err != nil {
retErr = err
}
}
ls := len(c.sparseBlocks)
p("// %sSparseOffset: %d entries, %d bytes\n", c.name, ls, ls*2)
p("var %sSparseOffset = %#v\n\n", c.name, c.sparseOffset)
ns := c.sparseCount
p("// %sSparseValues: %d entries, %d bytes\n", c.name, ns, ns*4)
p("var %sSparseValues = [%d]valueRange {", c.name, ns)
for i, b := range c.sparseBlocks {
p("\n// Block %#x, offset %#x", i, c.sparseOffset[i])
var v int
stride := mostFrequentStride(b)
n := countSparseEntries(b)
p("\n{value:%#04x,lo:%#02x},", stride, uint8(n))
for i, nv := range b {
if int(nv)-v != stride {
if v != 0 {
p(",hi:%#02x},", 0x80+i-1)
}
if nv != 0 {
p("\n{value:%#04x,lo:%#02x", nv, 0x80+i)
}
}
v = int(nv)
}
if v != 0 {
p(",hi:%#02x},", 0x80+len(b)-1)
}
}
p("\n}\n\n")
return
}

99
vendor/golang.org/x/tools/go/gcexportdata/main.go generated vendored Normal file
View File

@ -0,0 +1,99 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// The gcexportdata command is a diagnostic tool that displays the
// contents of gc export data files.
package main
import (
"flag"
"fmt"
"go/token"
"go/types"
"log"
"os"
"golang.org/x/tools/go/gcexportdata"
"golang.org/x/tools/go/types/typeutil"
)
var packageFlag = flag.String("package", "", "alternative package to print")
func main() {
log.SetPrefix("gcexportdata: ")
log.SetFlags(0)
flag.Usage = func() {
fmt.Fprintln(os.Stderr, "usage: gcexportdata [-package path] file.a")
}
flag.Parse()
if flag.NArg() != 1 {
flag.Usage()
os.Exit(2)
}
filename := flag.Args()[0]
f, err := os.Open(filename)
if err != nil {
log.Fatal(err)
}
r, err := gcexportdata.NewReader(f)
if err != nil {
log.Fatalf("%s: %s", filename, err)
}
// Decode the package.
const primary = "<primary>"
imports := make(map[string]*types.Package)
fset := token.NewFileSet()
pkg, err := gcexportdata.Read(r, fset, imports, primary)
if err != nil {
log.Fatalf("%s: %s", filename, err)
}
// Optionally select an indirectly mentioned package.
if *packageFlag != "" {
pkg = imports[*packageFlag]
if pkg == nil {
fmt.Fprintf(os.Stderr, "export data file %s does not mention %s; has:\n",
filename, *packageFlag)
for p := range imports {
if p != primary {
fmt.Fprintf(os.Stderr, "\t%s\n", p)
}
}
os.Exit(1)
}
}
// Print all package-level declarations, including non-exported ones.
fmt.Printf("package %s\n", pkg.Name())
for _, imp := range pkg.Imports() {
fmt.Printf("import %q\n", imp.Path())
}
qual := func(p *types.Package) string {
if pkg == p {
return ""
}
return p.Name()
}
scope := pkg.Scope()
for _, name := range scope.Names() {
obj := scope.Lookup(name)
fmt.Printf("%s: %s\n",
fset.Position(obj.Pos()),
types.ObjectString(obj, qual))
// For types, print each method.
if _, ok := obj.(*types.TypeName); ok {
for _, method := range typeutil.IntuitiveMethodSet(obj.Type(), nil) {
fmt.Printf("%s: %s\n",
fset.Position(method.Obj().Pos()),
types.SelectionString(method, qual))
}
}
}
}

173
vendor/golang.org/x/tools/internal/imports/mkindex.go generated vendored Normal file
View File

@ -0,0 +1,173 @@
// +build ignore
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Command mkindex creates the file "pkgindex.go" containing an index of the Go
// standard library. The file is intended to be built as part of the imports
// package, so that the package may be used in environments where a GOROOT is
// not available (such as App Engine).
package imports
import (
"bytes"
"fmt"
"go/ast"
"go/build"
"go/format"
"go/parser"
"go/token"
"io/ioutil"
"log"
"os"
"path"
"path/filepath"
"strings"
)
var (
pkgIndex = make(map[string][]pkg)
exports = make(map[string]map[string]bool)
)
func main() {
// Don't use GOPATH.
ctx := build.Default
ctx.GOPATH = ""
// Populate pkgIndex global from GOROOT.
for _, path := range ctx.SrcDirs() {
f, err := os.Open(path)
if err != nil {
log.Print(err)
continue
}
children, err := f.Readdir(-1)
f.Close()
if err != nil {
log.Print(err)
continue
}
for _, child := range children {
if child.IsDir() {
loadPkg(path, child.Name())
}
}
}
// Populate exports global.
for _, ps := range pkgIndex {
for _, p := range ps {
e := loadExports(p.dir)
if e != nil {
exports[p.dir] = e
}
}
}
// Construct source file.
var buf bytes.Buffer
fmt.Fprint(&buf, pkgIndexHead)
fmt.Fprintf(&buf, "var pkgIndexMaster = %#v\n", pkgIndex)
fmt.Fprintf(&buf, "var exportsMaster = %#v\n", exports)
src := buf.Bytes()
// Replace main.pkg type name with pkg.
src = bytes.Replace(src, []byte("main.pkg"), []byte("pkg"), -1)
// Replace actual GOROOT with "/go".
src = bytes.Replace(src, []byte(ctx.GOROOT), []byte("/go"), -1)
// Add some line wrapping.
src = bytes.Replace(src, []byte("}, "), []byte("},\n"), -1)
src = bytes.Replace(src, []byte("true, "), []byte("true,\n"), -1)
var err error
src, err = format.Source(src)
if err != nil {
log.Fatal(err)
}
// Write out source file.
err = ioutil.WriteFile("pkgindex.go", src, 0644)
if err != nil {
log.Fatal(err)
}
}
const pkgIndexHead = `package imports
func init() {
pkgIndexOnce.Do(func() {
pkgIndex.m = pkgIndexMaster
})
loadExports = func(dir string) map[string]bool {
return exportsMaster[dir]
}
}
`
type pkg struct {
importpath string // full pkg import path, e.g. "net/http"
dir string // absolute file path to pkg directory e.g. "/usr/lib/go/src/fmt"
}
var fset = token.NewFileSet()
func loadPkg(root, importpath string) {
shortName := path.Base(importpath)
if shortName == "testdata" {
return
}
dir := filepath.Join(root, importpath)
pkgIndex[shortName] = append(pkgIndex[shortName], pkg{
importpath: importpath,
dir: dir,
})
pkgDir, err := os.Open(dir)
if err != nil {
return
}
children, err := pkgDir.Readdir(-1)
pkgDir.Close()
if err != nil {
return
}
for _, child := range children {
name := child.Name()
if name == "" {
continue
}
if c := name[0]; c == '.' || ('0' <= c && c <= '9') {
continue
}
if child.IsDir() {
loadPkg(root, filepath.Join(importpath, name))
}
}
}
func loadExports(dir string) map[string]bool {
exports := make(map[string]bool)
buildPkg, err := build.ImportDir(dir, 0)
if err != nil {
if strings.Contains(err.Error(), "no buildable Go source files in") {
return nil
}
log.Printf("could not import %q: %v", dir, err)
return nil
}
for _, file := range buildPkg.GoFiles {
f, err := parser.ParseFile(fset, filepath.Join(dir, file), nil, 0)
if err != nil {
log.Printf("could not parse %q: %v", file, err)
continue
}
for name := range f.Scope.Objects {
if ast.IsExported(name) {
exports[name] = true
}
}
}
return exports
}

128
vendor/golang.org/x/tools/internal/imports/mkstdlib.go generated vendored Normal file
View File

@ -0,0 +1,128 @@
// +build ignore
// mkstdlib generates the zstdlib.go file, containing the Go standard
// library API symbols. It's baked into the binary to avoid scanning
// GOPATH in the common case.
package main
import (
"bufio"
"bytes"
"fmt"
"go/format"
"io"
"io/ioutil"
"log"
"os"
"os/exec"
"path/filepath"
"regexp"
"runtime"
"sort"
)
func mustOpen(name string) io.Reader {
f, err := os.Open(name)
if err != nil {
log.Fatal(err)
}
return f
}
func api(base string) string {
return filepath.Join(runtime.GOROOT(), "api", base)
}
var sym = regexp.MustCompile(`^pkg (\S+).*?, (?:var|func|type|const) ([A-Z]\w*)`)
var unsafeSyms = map[string]bool{"Alignof": true, "ArbitraryType": true, "Offsetof": true, "Pointer": true, "Sizeof": true}
func main() {
var buf bytes.Buffer
outf := func(format string, args ...interface{}) {
fmt.Fprintf(&buf, format, args...)
}
outf("// Code generated by mkstdlib.go. DO NOT EDIT.\n\n")
outf("package imports\n")
outf("var stdlib = map[string]map[string]bool{\n")
f := io.MultiReader(
mustOpen(api("go1.txt")),
mustOpen(api("go1.1.txt")),
mustOpen(api("go1.2.txt")),
mustOpen(api("go1.3.txt")),
mustOpen(api("go1.4.txt")),
mustOpen(api("go1.5.txt")),
mustOpen(api("go1.6.txt")),
mustOpen(api("go1.7.txt")),
mustOpen(api("go1.8.txt")),
mustOpen(api("go1.9.txt")),
mustOpen(api("go1.10.txt")),
mustOpen(api("go1.11.txt")),
mustOpen(api("go1.12.txt")),
mustOpen(api("go1.13.txt")),
// The API of the syscall/js package needs to be computed explicitly,
// because it's not included in the GOROOT/api/go1.*.txt files at this time.
syscallJSAPI(),
)
sc := bufio.NewScanner(f)
pkgs := map[string]map[string]bool{
"unsafe": unsafeSyms,
}
paths := []string{"unsafe"}
for sc.Scan() {
l := sc.Text()
if m := sym.FindStringSubmatch(l); m != nil {
path, sym := m[1], m[2]
if _, ok := pkgs[path]; !ok {
pkgs[path] = map[string]bool{}
paths = append(paths, path)
}
pkgs[path][sym] = true
}
}
if err := sc.Err(); err != nil {
log.Fatal(err)
}
sort.Strings(paths)
for _, path := range paths {
outf("\t%q: map[string]bool{\n", path)
pkg := pkgs[path]
var syms []string
for sym := range pkg {
syms = append(syms, sym)
}
sort.Strings(syms)
for _, sym := range syms {
outf("\t\t%q: true,\n", sym)
}
outf("},\n")
}
outf("}\n")
fmtbuf, err := format.Source(buf.Bytes())
if err != nil {
log.Fatal(err)
}
err = ioutil.WriteFile("zstdlib.go", fmtbuf, 0666)
if err != nil {
log.Fatal(err)
}
}
// syscallJSAPI returns the API of the syscall/js package.
// It's computed from the contents of $(go env GOROOT)/src/syscall/js.
func syscallJSAPI() io.Reader {
var exeSuffix string
if runtime.GOOS == "windows" {
exeSuffix = ".exe"
}
cmd := exec.Command("go"+exeSuffix, "run", "cmd/api", "-contexts", "js-wasm", "syscall/js")
out, err := cmd.Output()
if err != nil {
log.Fatalln(err)
}
return bytes.NewReader(out)
}

3
vendor/gopkg.in/couchbase/gocb.v1/.gitignore generated vendored Normal file
View File

@ -0,0 +1,3 @@
*~
.project

202
vendor/gopkg.in/couchbase/gocb.v1/LICENSE generated vendored Normal file
View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

39
vendor/gopkg.in/couchbase/gocb.v1/Makefile generated vendored Normal file
View File

@ -0,0 +1,39 @@
devsetup:
go get "github.com/kisielk/errcheck"
go get "golang.org/x/lint/golint"
go get "github.com/gordonklaus/ineffassign"
go get "github.com/client9/misspell/cmd/misspell"
test:
go test ./ ./cbft
fasttest:
go test -short ./ ./cbft
cover:
go test -coverprofile=cover.out ./ ./cbft
checkerrs:
errcheck -blank -asserts -ignoretests ./ ./cbft
checkfmt:
! gofmt -l -d ./ ./cbft 2>&1 | read
checkvet:
go vet
checkiea:
ineffassign ./
ineffassign ./cbft
checkspell:
misspell -error ./
misspell -error ./cbft
lint: checkfmt checkerrs checkvet checkiea checkspell
golint -set_exit_status -min_confidence 0.81 ./
golint -set_exit_status -min_confidence 0.81 ./cbft
check: lint
go test -cover -race ./ ./cbft
.PHONY: all test devsetup fasttest lint cover checkerrs checkfmt checkvet checkiea checkspell check

53
vendor/gopkg.in/couchbase/gocb.v1/README.md generated vendored Normal file
View File

@ -0,0 +1,53 @@
[![GoDoc](https://godoc.org/github.com/couchbase/gocb?status.png)](https://godoc.org/github.com/couchbase/gocb)
# Couchbase Go Client
This is the official Couchbase Go SDK. If you are looking for our
previous unofficial prototype Go client library, please see:
[http://www.github.com/couchbase/go-couchbase](go-couchbase).
The Go SDK library allows you to connect to a Couchbase cluster from
Go. It is written in pure Go, and uses the included gocbcore library to
handle communicating to the cluster over the Couchbase binary
protocol.
## Useful Links
### Source
The project source is hosted at [http://github.com/couchbase/gocb](http://github.com/couchbase/gocb).
### Documentation
You can explore our API reference through godoc at [https://godoc.org/github.com/couchbase/gocb](https://godoc.org/github.com/couchbase/gocb).
You can also find documentation for the Go SDK at the Couchbase [Developer Portal](https://developer.couchbase.com/documentation/server/current/sdk/go/start-using-sdk.html).
### Bug Tracker
Issues are tracked on Couchbase's public [issues.couchbase.com](http://www.couchbase.com/issues/browse/GOCBC).
Contact [the site admins](https://issues.couchbase.com/secure/ContactAdministrators!default.jspa)
regarding login or other problems at issues.couchbase.com (officially) or ask
around in [couchbase/discuss on gitter.im](https://gitter.im/couchbase/discuss)
(unofficially).
## Installing
To install the latest stable version, run:
```bash
go get gopkg.in/couchbase/gocb.v1
```
To install the latest developer version, run:
```bash
go get github.com/couchbase/gocb
```
## License
Copyright 2016 Couchbase Inc.
Licensed under the Apache License, Version 2.0.
See
[LICENSE](https://github.com/couchbase/gocb/blob/master/LICENSE)
for further details.

240
vendor/gopkg.in/couchbase/gocb.v1/analyticsingest.go generated vendored Normal file
View File

@ -0,0 +1,240 @@
package gocb
// UNCOMMITTED: This API may change.
import (
"encoding/json"
"errors"
"time"
"github.com/google/uuid"
)
type ingestMethod func(bucket *Bucket, key string, val interface{}) error
// IngestMethodInsert indicates that the Insert function should be used for kv ingest.
func IngestMethodInsert(bucket *Bucket, key string, val interface{}) error {
_, err := bucket.Insert(key, val, 0)
return err
}
// IngestMethodUpsert indicates that the Upsert function should be used for kv ingest.
func IngestMethodUpsert(bucket *Bucket, key string, val interface{}) error {
_, err := bucket.Upsert(key, val, 0)
return err
}
// IngestMethodReplace indicates that the Replace function should be used for kv ingest.
func IngestMethodReplace(bucket *Bucket, key string, val interface{}) error {
_, err := bucket.Replace(key, val, 0, 0)
return err
}
// IdGeneratorFunction is called to create an ID for a document.
type IdGeneratorFunction func(doc interface{}) (string, error)
// DataConverterFunction is called to convert from analytics document format
// to kv document
type DataConverterFunction func(docBytes []byte) (interface{}, error)
// UUIDIdGeneratorFunction is a IdGeneratorFunction that creates a UUID ID for each document.
func UUIDIdGeneratorFunction(doc interface{}) (string, error) {
return uuid.New().String(), nil
}
// PassthroughDataConverterFunction is a DataConverterFunction that returns the data that is
// passed to it. The interface out of this represents a map[string]interface{}.
func PassthroughDataConverterFunction(docBytes []byte) (interface{}, error) {
var doc interface{}
err := json.Unmarshal(docBytes, &doc)
if err != nil {
return nil, err
}
return doc, nil
}
// AnalyticsIngestOptions contains the options for an Analytics query to KV ingest.
type AnalyticsIngestOptions struct {
analyticsTimeout time.Duration
ingestMethod ingestMethod
ignoreIngestError bool
idGenerator IdGeneratorFunction
dataConverter DataConverterFunction
kvRetryBehavior QueryRetryBehavior
retryOn []error
}
// DefaultAnalyticsIngestOptions creates a new AnalyticsIngestOptions from a set of defaults.
//
// UNCOMMITTED: This API may change.
func DefaultAnalyticsIngestOptions() *AnalyticsIngestOptions {
return &AnalyticsIngestOptions{
ingestMethod: IngestMethodUpsert,
idGenerator: UUIDIdGeneratorFunction,
dataConverter: PassthroughDataConverterFunction,
ignoreIngestError: true,
kvRetryBehavior: NewQueryDelayRetryBehavior(10, 2, 500*time.Millisecond, QueryExponentialDelayFunction),
retryOn: []error{ErrTmpFail, ErrBusy},
}
}
// AnalyticsTimeout sets the timeout value that will be used for execution of the AnalyticsQuery.
func (ai *AnalyticsIngestOptions) AnalyticsTimeout(timeout time.Duration) *AnalyticsIngestOptions {
ai.analyticsTimeout = timeout
return ai
}
// IngestMethod sets ingestMethod that will be used for KV operations
func (ai *AnalyticsIngestOptions) IngestMethod(method ingestMethod) *AnalyticsIngestOptions {
ai.ingestMethod = method
return ai
}
// IgnoreIngestError sets whether errors will be ignored when performing KV operations
func (ai *AnalyticsIngestOptions) IgnoreIngestError(ignore bool) *AnalyticsIngestOptions {
ai.ignoreIngestError = ignore
return ai
}
// IdGenerator sets the IdGeneratorFunction to use for generation of IDs
func (ai *AnalyticsIngestOptions) IdGenerator(fn IdGeneratorFunction) *AnalyticsIngestOptions {
ai.idGenerator = fn
return ai
}
// DataConverter sets the DataConverterFunction to use for conversion of Analytics documents to
// KV documents.
func (ai *AnalyticsIngestOptions) DataConverter(fn DataConverterFunction) *AnalyticsIngestOptions {
ai.dataConverter = fn
return ai
}
// KVRetryBehavior sets the QueryRetryBehavior to use for retrying KV operations when a temporary
// or overload error occurs.
func (ai *AnalyticsIngestOptions) KVRetryBehavior(behavior QueryRetryBehavior) *AnalyticsIngestOptions {
ai.kvRetryBehavior = behavior
return ai
}
// KVRetryOn sets the errors to perform retries on for kv operation errors.
func (ai *AnalyticsIngestOptions) KVRetryOn(errors []error) *AnalyticsIngestOptions {
ai.retryOn = errors
return ai
}
// AnalyticsIngest executes an Analytics query and inserts/updates/replaces the transformed results into a bucket.
//
// UNCOMMITTED: This API may change.
func (b *Bucket) AnalyticsIngest(analyticsQuery *AnalyticsQuery, analyticsParams []interface{}, opts *AnalyticsIngestOptions) error {
return b.analyticsIngest(new(defaultIngestQueryRunner), analyticsQuery, analyticsParams, opts)
}
func (b *Bucket) analyticsIngest(queryRunner ingestQueryRunner, analyticsQuery *AnalyticsQuery, analyticsParams []interface{}, opts *AnalyticsIngestOptions) error {
if analyticsQuery == nil {
return errors.New("query cannot be nil")
}
if opts == nil {
opts = DefaultAnalyticsIngestOptions()
}
if opts.idGenerator == nil {
opts.idGenerator = UUIDIdGeneratorFunction
}
if opts.dataConverter == nil {
opts.dataConverter = PassthroughDataConverterFunction
}
if opts.ingestMethod == nil {
opts.ingestMethod = IngestMethodUpsert
}
analyticsTimeout := opts.analyticsTimeout
if analyticsTimeout == 0 {
analyticsTimeout = b.AnalyticsTimeout()
}
analyticsQuery.ServerSideTimeout(analyticsTimeout)
qResults, err := queryRunner.ExecuteQuery(b, analyticsQuery, analyticsParams)
if err != nil {
return err
}
for {
qBytes := qResults.NextBytes()
if qBytes == nil {
break
}
converted, err := opts.dataConverter(qBytes)
if err != nil {
if opts.ignoreIngestError {
continue
} else {
return err
}
}
id, err := opts.idGenerator(converted)
if err != nil {
if opts.ignoreIngestError {
continue
} else {
return err
}
}
var retries uint
for {
err = b.ingest(id, converted, opts.ingestMethod)
if err == nil {
break
}
if isRetryableError(err, opts.retryOn) {
if opts.kvRetryBehavior == nil || !opts.kvRetryBehavior.CanRetry(retries) {
break
}
} else {
break
}
retries++
time.Sleep(opts.kvRetryBehavior.NextInterval(retries))
}
if err != nil {
if opts.ignoreIngestError {
continue
} else {
return err
}
}
}
return nil
}
type ingestQueryRunner interface {
ExecuteQuery(bucket *Bucket, query *AnalyticsQuery, params []interface{}) (AnalyticsResults, error)
}
type defaultIngestQueryRunner struct {
}
func (runner *defaultIngestQueryRunner) ExecuteQuery(bucket *Bucket, query *AnalyticsQuery, params []interface{}) (AnalyticsResults, error) {
return bucket.ExecuteAnalyticsQuery(query, params)
}
func (b *Bucket) ingest(key string, converted interface{}, method ingestMethod) error {
err := method(b, key, converted)
if err != nil {
return err
}
return nil
}
func isRetryableError(err error, errors []error) bool {
for _, retryable := range errors {
if err == retryable {
return true
}
}
return false
}

63
vendor/gopkg.in/couchbase/gocb.v1/analyticsquery.go generated vendored Normal file
View File

@ -0,0 +1,63 @@
package gocb
import "time"
// AnalyticsQuery represents a pending Analytics query.
type AnalyticsQuery struct {
options map[string]interface{}
}
// NewAnalyticsQuery creates a new AnalyticsQuery object from a query string.
func NewAnalyticsQuery(statement string) *AnalyticsQuery {
nq := &AnalyticsQuery{
options: make(map[string]interface{}),
}
nq.options["statement"] = statement
return nq
}
// ServerSideTimeout indicates the maximum time to wait for this query to complete.
func (aq *AnalyticsQuery) ServerSideTimeout(timeout time.Duration) *AnalyticsQuery {
aq.options["timeout"] = timeout.String()
return aq
}
// Pretty indicates whether the response should be nicely formatted.
func (aq *AnalyticsQuery) Pretty(pretty bool) *AnalyticsQuery {
aq.options["pretty"] = pretty
return aq
}
// ContextId sets the client context id for the request, for use with tracing.
func (aq *AnalyticsQuery) ContextId(clientContextId string) *AnalyticsQuery {
aq.options["client_context_id"] = clientContextId
return aq
}
// RawParam allows specifying custom query options.
func (aq *AnalyticsQuery) RawParam(name string, value interface{}) *AnalyticsQuery {
aq.options[name] = value
return aq
}
// Priority sets whether or not the query should be run with priority status.
func (aq *AnalyticsQuery) Priority(priority bool) *AnalyticsQuery {
if priority {
aq.options["priority"] = -1
} else {
delete(aq.options, "priority")
}
return aq
}
// Deferred sets whether or not the query should be run as a deferred query.
//
// Experimental: This API is subject to change at any time.
func (aq *AnalyticsQuery) Deferred(deferred bool) *AnalyticsQuery {
if deferred {
aq.options["mode"] = "async"
} else {
delete(aq.options, "mode")
}
return aq
}

157
vendor/gopkg.in/couchbase/gocb.v1/auth.go generated vendored Normal file
View File

@ -0,0 +1,157 @@
package gocb
import (
"gopkg.in/couchbase/gocbcore.v7"
)
// UserPassPair represents a username and password pair.
type UserPassPair gocbcore.UserPassPair
type coreAuthWrapper struct {
auth Authenticator
bucketName string
}
// Credentials returns the credentials for a particular service.
func (auth *coreAuthWrapper) Credentials(req gocbcore.AuthCredsRequest) ([]gocbcore.UserPassPair, error) {
creds, err := auth.auth.Credentials(AuthCredsRequest{
Service: ServiceType(req.Service),
Endpoint: req.Endpoint,
Bucket: auth.bucketName,
})
if err != nil {
return nil, err
}
coreCreds := make([]gocbcore.UserPassPair, len(creds))
for credIdx, userPass := range creds {
coreCreds[credIdx] = gocbcore.UserPassPair(userPass)
}
return coreCreds, nil
}
// AuthCredsRequest encapsulates the data for a credential request
// from the new Authenticator interface.
// UNCOMMITTED
type AuthCredsRequest struct {
Service ServiceType
Endpoint string
Bucket string
}
func getSingleCredential(auth Authenticator, req AuthCredsRequest) (UserPassPair, error) {
creds, err := auth.Credentials(req)
if err != nil {
return UserPassPair{}, err
}
if len(creds) != 1 {
return UserPassPair{}, gocbcore.ErrInvalidCredentials
}
return creds[0], nil
}
// Authenticator provides an interface to authenticate to each service. Note that
// only authenticators implemented here are stable, and support for custom
// authenticators is considered volatile.
type Authenticator interface {
Credentials(req AuthCredsRequest) ([]UserPassPair, error)
}
// BucketAuthenticator provides a password for a single bucket.
type BucketAuthenticator struct {
Password string
}
// BucketAuthenticatorMap is a map of bucket name to BucketAuthenticator.
type BucketAuthenticatorMap map[string]BucketAuthenticator
// ClusterAuthenticator implements an Authenticator which uses a list of buckets and passwords.
type ClusterAuthenticator struct {
Buckets BucketAuthenticatorMap
Username string
Password string
}
func (ca ClusterAuthenticator) clusterCreds() []UserPassPair {
var creds []UserPassPair
for bucketName, bucket := range ca.Buckets {
creds = append(creds, UserPassPair{
Username: bucketName,
Password: bucket.Password,
})
}
return creds
}
// Credentials returns the credentials for a particular service.
func (ca ClusterAuthenticator) Credentials(req AuthCredsRequest) ([]UserPassPair, error) {
if req.Bucket == "" {
if req.Service == MemdService || req.Service == MgmtService ||
req.Service == CapiService {
return []UserPassPair{{
Username: ca.Username,
Password: ca.Password,
}}, nil
}
return ca.clusterCreds(), nil
}
if bucketAuth, ok := ca.Buckets[req.Bucket]; ok {
return []UserPassPair{{
Username: req.Bucket,
Password: bucketAuth.Password,
}}, nil
}
return []UserPassPair{{
Username: "",
Password: "",
}}, nil
}
// PasswordAuthenticator implements an Authenticator which uses an RBAC username and password.
type PasswordAuthenticator struct {
Username string
Password string
}
// Credentials returns the credentials for a particular service.
func (ra PasswordAuthenticator) Credentials(req AuthCredsRequest) ([]UserPassPair, error) {
return []UserPassPair{{
Username: ra.Username,
Password: ra.Password,
}}, nil
}
type certAuthenticator interface {
isTlsAuth() bool
}
// CertAuthenticator implements an Authenticator which can be used with certificate authentication.
type CertAuthenticator struct {
}
// Credentials returns the credentials for a particular service.
func (ca CertAuthenticator) Credentials(req AuthCredsRequest) ([]UserPassPair, error) {
return []UserPassPair{{
Username: "",
Password: "",
}}, nil
}
func (ca CertAuthenticator) isTlsAuth() bool {
return true
}
// CertificateAuthenticator is included for backwards compatibility only.
// Deprecated: Use CertAuthenticator instead.
type CertificateAuthenticator struct {
CertAuthenticator
}
func (ca CertificateAuthenticator) isTlsAuth() bool {
return true
}

244
vendor/gopkg.in/couchbase/gocb.v1/bucket.go generated vendored Normal file
View File

@ -0,0 +1,244 @@
package gocb
import (
"math/rand"
"time"
"github.com/opentracing/opentracing-go"
"gopkg.in/couchbase/gocbcore.v7"
)
// Bucket is an interface representing a single bucket within a cluster.
type Bucket struct {
cluster *Cluster
name string
password string
client *gocbcore.Agent
mtEnabled bool
tracer opentracing.Tracer
transcoder Transcoder
opTimeout time.Duration
bulkOpTimeout time.Duration
duraTimeout time.Duration
duraPollTimeout time.Duration
viewTimeout time.Duration
n1qlTimeout time.Duration
ftsTimeout time.Duration
analyticsTimeout time.Duration
internal *BucketInternal
analyticsQueryRetryBehavior QueryRetryBehavior
searchQueryRetryBehavior QueryRetryBehavior
}
func (b *Bucket) startKvOpTrace(operationName string) opentracing.Span {
return b.tracer.StartSpan(operationName,
opentracing.Tag{Key: "couchbase.bucket", Value: b.name},
opentracing.Tag{Key: "couchbase.service", Value: "kv"})
}
func createBucket(cluster *Cluster, config *gocbcore.AgentConfig) (*Bucket, error) {
cli, err := gocbcore.CreateAgent(config)
if err != nil {
return nil, err
}
bucket := &Bucket{
cluster: cluster,
name: config.BucketName,
password: config.Password,
client: cli,
mtEnabled: config.UseMutationTokens,
transcoder: &DefaultTranscoder{},
tracer: config.Tracer,
opTimeout: 2500 * time.Millisecond,
bulkOpTimeout: 10000 * time.Millisecond,
duraTimeout: 40000 * time.Millisecond,
duraPollTimeout: 100 * time.Millisecond,
viewTimeout: 75 * time.Second,
n1qlTimeout: 75 * time.Second,
ftsTimeout: 75 * time.Second,
analyticsTimeout: 75 * time.Second,
analyticsQueryRetryBehavior: NewQueryDelayRetryBehavior(10, 2, 500*time.Millisecond, QueryExponentialDelayFunction),
searchQueryRetryBehavior: NewQueryDelayRetryBehavior(10, 2, 500*time.Millisecond, QueryExponentialDelayFunction),
}
bucket.internal = &BucketInternal{
b: bucket,
}
return bucket, nil
}
// Name returns the name of the bucket we are connected to.
func (b *Bucket) Name() string {
return b.name
}
// UUID returns the uuid of the bucket we are connected to.
func (b *Bucket) UUID() string {
return b.client.BucketUUID()
}
// OperationTimeout returns the maximum amount of time to wait for an operation to succeed.
func (b *Bucket) OperationTimeout() time.Duration {
return b.opTimeout
}
// SetOperationTimeout sets the maximum amount of time to wait for an operation to succeed.
func (b *Bucket) SetOperationTimeout(timeout time.Duration) {
b.opTimeout = timeout
}
// BulkOperationTimeout returns the maximum amount of time to wait for a bulk op to succeed.
func (b *Bucket) BulkOperationTimeout() time.Duration {
return b.bulkOpTimeout
}
// SetBulkOperationTimeout sets the maxium amount of time to wait for a bulk op to succeed.
func (b *Bucket) SetBulkOperationTimeout(timeout time.Duration) {
b.bulkOpTimeout = timeout
}
// DurabilityTimeout returns the maximum amount of time to wait for durability to succeed.
func (b *Bucket) DurabilityTimeout() time.Duration {
return b.duraTimeout
}
// SetDurabilityTimeout sets the maximum amount of time to wait for durability to succeed.
func (b *Bucket) SetDurabilityTimeout(timeout time.Duration) {
b.duraTimeout = timeout
}
// DurabilityPollTimeout returns the amount of time waiting between durability polls.
func (b *Bucket) DurabilityPollTimeout() time.Duration {
return b.duraPollTimeout
}
// SetDurabilityPollTimeout sets the amount of time waiting between durability polls.
func (b *Bucket) SetDurabilityPollTimeout(timeout time.Duration) {
b.duraPollTimeout = timeout
}
// SetSearchQueryRetryBehavior sets the retry behavior to use for retrying queries.
func (b *Bucket) SetSearchQueryRetryBehavior(retryBehavior QueryRetryBehavior) {
b.searchQueryRetryBehavior = retryBehavior
}
// SetAnalyticsQueryRetryBehavior sets the retry behavior to use for retrying queries.
func (b *Bucket) SetAnalyticsQueryRetryBehavior(retryBehavior QueryRetryBehavior) {
b.analyticsQueryRetryBehavior = retryBehavior
}
// ViewTimeout returns the maximum amount of time to wait for a view query to complete.
func (b *Bucket) ViewTimeout() time.Duration {
return b.viewTimeout
}
// SetViewTimeout sets the maximum amount of time to wait for a view query to complete.
func (b *Bucket) SetViewTimeout(timeout time.Duration) {
b.viewTimeout = timeout
}
// N1qlTimeout returns the maximum amount of time to wait for a N1QL query to complete.
func (b *Bucket) N1qlTimeout() time.Duration {
return b.n1qlTimeout
}
// SetN1qlTimeout sets the maximum amount of time to wait for a N1QL query to complete.
func (b *Bucket) SetN1qlTimeout(timeout time.Duration) {
b.n1qlTimeout = timeout
}
// AnalyticsTimeout returns the maximum amount of time to wait for an Analytics query to complete.
func (b *Bucket) AnalyticsTimeout() time.Duration {
return b.analyticsTimeout
}
// SetAnalyticsTimeout sets the maximum amount of time to wait for an Analytics query to complete.
func (b *Bucket) SetAnalyticsTimeout(timeout time.Duration) {
b.analyticsTimeout = timeout
}
// SetTranscoder specifies a Transcoder to use when translating documents from their
// raw byte format to Go types and back.
func (b *Bucket) SetTranscoder(transcoder Transcoder) {
b.transcoder = transcoder
}
// InvalidateQueryCache forces the internal cache of prepared queries to be cleared.
// Queries to be cached are controlled by the Adhoc() method of N1qlQuery.
func (b *Bucket) InvalidateQueryCache() {
b.cluster.InvalidateQueryCache()
}
// Cas represents the specific state of a document on the cluster.
type Cas gocbcore.Cas
type pendingOp gocbcore.PendingOp
func (b *Bucket) getViewEp() (string, error) {
capiEps := b.client.CapiEps()
if len(capiEps) == 0 {
return "", &clientError{"No available view nodes."}
}
return capiEps[rand.Intn(len(capiEps))], nil
}
func (b *Bucket) getMgmtEp() (string, error) {
mgmtEps := b.client.MgmtEps()
if len(mgmtEps) == 0 {
return "", &clientError{"No available management nodes."}
}
return mgmtEps[rand.Intn(len(mgmtEps))], nil
}
func (b *Bucket) getN1qlEp() (string, error) {
n1qlEps := b.client.N1qlEps()
if len(n1qlEps) == 0 {
return "", &clientError{"No available N1QL nodes."}
}
return n1qlEps[rand.Intn(len(n1qlEps))], nil
}
func (b *Bucket) getCbasEp() (string, error) {
cbasEps := b.client.CbasEps()
if len(cbasEps) == 0 {
return "", &clientError{"No available Analytics nodes."}
}
return cbasEps[rand.Intn(len(cbasEps))], nil
}
func (b *Bucket) getFtsEp() (string, error) {
ftsEps := b.client.FtsEps()
if len(ftsEps) == 0 {
return "", &clientError{"No available FTS nodes."}
}
return ftsEps[rand.Intn(len(ftsEps))], nil
}
// Close the instances underlying socket resources. Note that operations pending on the connection may fail.
func (b *Bucket) Close() error {
b.cluster.closeBucket(b)
return b.client.Close()
}
// IoRouter returns the underlying gocb agent managing connections.
func (b *Bucket) IoRouter() *gocbcore.Agent {
return b.client
}
// Internal methods, not safe to be consumed by third parties.
func (b *Bucket) Internal() *BucketInternal {
return b.internal
}
// Manager returns a BucketManager for performing management operations on this bucket.
func (b *Bucket) Manager(username, password string) *BucketManager {
return &BucketManager{
bucket: b,
username: username,
password: password,
}
}

View File

@ -0,0 +1,15 @@
package gocb
import (
"github.com/opentracing/opentracing-go"
)
// ExecuteAnalyticsQuery performs an analytics query and returns a list of rows or an error.
func (b *Bucket) ExecuteAnalyticsQuery(q *AnalyticsQuery, params interface{}) (AnalyticsResults, error) {
span := b.tracer.StartSpan("ExecuteAnalyticsQuery",
opentracing.Tag{Key: "couchbase.service", Value: "fts"})
span.SetTag("bucket_name", b.name)
defer span.Finish()
return b.cluster.doAnalyticsQuery(span.Context(), b, q, params)
}

552
vendor/gopkg.in/couchbase/gocb.v1/bucket_crud.go generated vendored Normal file
View File

@ -0,0 +1,552 @@
package gocb
import (
"github.com/opentracing/opentracing-go"
"gopkg.in/couchbase/gocbcore.v7"
)
// Get retrieves a document from the bucket
func (b *Bucket) Get(key string, valuePtr interface{}) (Cas, error) {
span := b.startKvOpTrace("Get")
defer span.Finish()
return b.get(span.Context(), key, valuePtr)
}
// GetAndTouch retrieves a document and simultaneously updates its expiry time.
func (b *Bucket) GetAndTouch(key string, expiry uint32, valuePtr interface{}) (Cas, error) {
span := b.startKvOpTrace("GetAndTouch")
defer span.Finish()
return b.getAndTouch(span.Context(), key, expiry, valuePtr)
}
// GetAndLock locks a document for a period of time, providing exclusive RW access to it.
func (b *Bucket) GetAndLock(key string, lockTime uint32, valuePtr interface{}) (Cas, error) {
span := b.startKvOpTrace("GetAndLock")
defer span.Finish()
return b.getAndLock(span.Context(), key, lockTime, valuePtr)
}
// Unlock unlocks a document which was locked with GetAndLock.
func (b *Bucket) Unlock(key string, cas Cas) (Cas, error) {
span := b.startKvOpTrace("Unlock")
defer span.Finish()
cas, _, err := b.unlock(span.Context(), key, cas)
return cas, err
}
// GetReplica returns the value of a particular document from a replica server.
func (b *Bucket) GetReplica(key string, valuePtr interface{}, replicaIdx int) (Cas, error) {
span := b.startKvOpTrace("GetReplica")
defer span.Finish()
cas, err := b.getReplica(span.Context(), key, valuePtr, replicaIdx)
return cas, err
}
// Touch touches a document, specifying a new expiry time for it.
// The Cas value must be 0.
func (b *Bucket) Touch(key string, cas Cas, expiry uint32) (Cas, error) {
span := b.startKvOpTrace("Touch")
defer span.Finish()
if cas != 0 {
return 0, ErrNonZeroCas
}
cas, _, err := b.touch(span.Context(), key, expiry)
return cas, err
}
// Remove removes a document from the bucket.
func (b *Bucket) Remove(key string, cas Cas) (Cas, error) {
span := b.startKvOpTrace("Remove")
defer span.Finish()
cas, _, err := b.remove(span.Context(), key, cas)
return cas, err
}
// Upsert inserts or replaces a document in the bucket.
func (b *Bucket) Upsert(key string, value interface{}, expiry uint32) (Cas, error) {
span := b.startKvOpTrace("Upsert")
defer span.Finish()
cas, _, err := b.upsert(span.Context(), key, value, expiry)
return cas, err
}
// Insert inserts a new document to the bucket.
func (b *Bucket) Insert(key string, value interface{}, expiry uint32) (Cas, error) {
span := b.startKvOpTrace("Insert")
defer span.Finish()
cas, _, err := b.insert(span.Context(), key, value, expiry)
return cas, err
}
// Replace replaces a document in the bucket.
func (b *Bucket) Replace(key string, value interface{}, cas Cas, expiry uint32) (Cas, error) {
span := b.startKvOpTrace("Replace")
defer span.Finish()
cas, _, err := b.replace(span.Context(), key, value, cas, expiry)
return cas, err
}
// Append appends a string value to a document.
func (b *Bucket) Append(key, value string) (Cas, error) {
span := b.startKvOpTrace("Append")
defer span.Finish()
cas, _, err := b.append(span.Context(), key, value)
return cas, err
}
// Prepend prepends a string value to a document.
func (b *Bucket) Prepend(key, value string) (Cas, error) {
span := b.startKvOpTrace("Prepend")
defer span.Finish()
cas, _, err := b.prepend(span.Context(), key, value)
return cas, err
}
// Counter performs an atomic addition or subtraction for an integer document. Passing a
// non-negative `initial` value will cause the document to be created if it did not
// already exist.
func (b *Bucket) Counter(key string, delta, initial int64, expiry uint32) (uint64, Cas, error) {
span := b.startKvOpTrace("Counter")
defer span.Finish()
val, cas, _, err := b.counter(span.Context(), key, delta, initial, expiry)
return val, cas, err
}
// ServerStats is a tree of statistics information returned from the server.
// stats := cb.Stats(...)
// for server := stats {
// for statName, stat := server {
// //...
// }
// }
type ServerStats map[string]map[string]string
// Stats returns various server statistics from the cluster.
func (b *Bucket) Stats(key string) (ServerStats, error) {
span := b.startKvOpTrace("Stats")
defer span.Finish()
stats, err := b.stats(span.Context(), key)
return stats, err
}
type opManager struct {
b *Bucket
signal chan error
tracectx opentracing.SpanContext
}
func (ctrl *opManager) Resolve(err error) {
ctrl.signal <- err
}
func (ctrl *opManager) Wait(op gocbcore.PendingOp, err error) error {
if err != nil {
return err
}
timeoutTmr := gocbcore.AcquireTimer(ctrl.b.opTimeout)
select {
case err = <-ctrl.signal:
gocbcore.ReleaseTimer(timeoutTmr, false)
return err
case <-timeoutTmr.C:
gocbcore.ReleaseTimer(timeoutTmr, true)
if !op.Cancel() {
err = <-ctrl.signal
return err
}
return ErrTimeout
}
}
func (ctrl *opManager) Decode(bytes []byte, flags uint32, valuePtr interface{}) error {
dspan := ctrl.b.tracer.StartSpan("decode",
opentracing.ChildOf(ctrl.tracectx))
err := ctrl.b.transcoder.Decode(bytes, flags, valuePtr)
dspan.Finish()
return err
}
func (ctrl *opManager) Encode(value interface{}) ([]byte, uint32, error) {
espan := ctrl.b.tracer.StartSpan("encode",
opentracing.ChildOf(ctrl.tracectx))
bytes, flags, err := ctrl.b.transcoder.Encode(value)
espan.Finish()
return bytes, flags, err
}
func (b *Bucket) newOpManager(tracectx opentracing.SpanContext) *opManager {
return &opManager{
b: b,
signal: make(chan error, 1),
tracectx: tracectx,
}
}
func (b *Bucket) get(tracectx opentracing.SpanContext, key string, valuePtr interface{}) (casOut Cas, errOut error) {
ctrl := b.newOpManager(tracectx)
err := ctrl.Wait(b.client.GetEx(gocbcore.GetOptions{
Key: []byte(key),
TraceContext: tracectx,
}, func(res *gocbcore.GetResult, err error) {
if res != nil {
casOut = Cas(res.Cas)
if err == nil {
err = ctrl.Decode(res.Value, res.Flags, valuePtr)
}
}
ctrl.Resolve(err)
}))
if err != nil {
return 0, err
}
return
}
func (b *Bucket) getAndTouch(tracectx opentracing.SpanContext, key string, expiry uint32, valuePtr interface{}) (casOut Cas, errOut error) {
ctrl := b.newOpManager(tracectx)
err := ctrl.Wait(b.client.GetAndTouchEx(gocbcore.GetAndTouchOptions{
Key: []byte(key),
Expiry: expiry,
TraceContext: tracectx,
}, func(res *gocbcore.GetAndTouchResult, err error) {
if res != nil {
casOut = Cas(res.Cas)
if err == nil {
err = ctrl.Decode(res.Value, res.Flags, valuePtr)
}
}
ctrl.Resolve(err)
}))
if err != nil {
return 0, err
}
return
}
func (b *Bucket) getAndLock(tracectx opentracing.SpanContext, key string, lockTime uint32, valuePtr interface{}) (casOut Cas, errOut error) {
ctrl := b.newOpManager(tracectx)
err := ctrl.Wait(b.client.GetAndLockEx(gocbcore.GetAndLockOptions{
Key: []byte(key),
LockTime: lockTime,
TraceContext: tracectx,
}, func(res *gocbcore.GetAndLockResult, err error) {
if res != nil {
casOut = Cas(res.Cas)
if err == nil {
err = ctrl.Decode(res.Value, res.Flags, valuePtr)
}
}
ctrl.Resolve(err)
}))
if err != nil {
return 0, err
}
return
}
func (b *Bucket) unlock(tracectx opentracing.SpanContext, key string, cas Cas) (casOut Cas, mtOut MutationToken, errOut error) {
ctrl := b.newOpManager(tracectx)
err := ctrl.Wait(b.client.UnlockEx(gocbcore.UnlockOptions{
Key: []byte(key),
Cas: gocbcore.Cas(cas),
TraceContext: tracectx,
}, func(res *gocbcore.UnlockResult, err error) {
if res != nil {
casOut = Cas(res.Cas)
mtOut = MutationToken{res.MutationToken, b}
}
ctrl.Resolve(err)
}))
if err != nil {
return 0, MutationToken{}, err
}
return
}
func (b *Bucket) getReplica(tracectx opentracing.SpanContext, key string, valuePtr interface{}, replicaIdx int) (casOut Cas, errOut error) {
ctrl := b.newOpManager(tracectx)
err := ctrl.Wait(b.client.GetReplicaEx(gocbcore.GetReplicaOptions{
Key: []byte(key),
ReplicaIdx: replicaIdx,
TraceContext: tracectx,
}, func(res *gocbcore.GetReplicaResult, err error) {
if res != nil {
casOut = Cas(res.Cas)
if err == nil {
err = ctrl.Decode(res.Value, res.Flags, valuePtr)
}
}
ctrl.Resolve(err)
}))
if err != nil {
return 0, err
}
return
}
func (b *Bucket) touch(tracectx opentracing.SpanContext, key string, expiry uint32) (casOut Cas, mtOut MutationToken, errOut error) {
ctrl := b.newOpManager(tracectx)
err := ctrl.Wait(b.client.TouchEx(gocbcore.TouchOptions{
Key: []byte(key),
Expiry: expiry,
TraceContext: tracectx,
}, func(res *gocbcore.TouchResult, err error) {
if res != nil {
casOut = Cas(res.Cas)
mtOut = MutationToken{res.MutationToken, b}
}
ctrl.Resolve(err)
}))
if err != nil {
return 0, MutationToken{}, err
}
return
}
func (b *Bucket) remove(tracectx opentracing.SpanContext, key string, cas Cas) (casOut Cas, mtOut MutationToken, errOut error) {
ctrl := b.newOpManager(tracectx)
err := ctrl.Wait(b.client.DeleteEx(gocbcore.DeleteOptions{
Key: []byte(key),
Cas: gocbcore.Cas(cas),
TraceContext: tracectx,
}, func(res *gocbcore.DeleteResult, err error) {
if res != nil {
casOut = Cas(res.Cas)
mtOut = MutationToken{res.MutationToken, b}
}
ctrl.Resolve(err)
}))
if err != nil {
return 0, MutationToken{}, err
}
return
}
func (b *Bucket) upsert(tracectx opentracing.SpanContext, key string, value interface{}, expiry uint32) (casOut Cas, mtOut MutationToken, errOut error) {
ctrl := b.newOpManager(tracectx)
bytes, flags, err := ctrl.Encode(value)
if err != nil {
return 0, MutationToken{}, err
}
err = ctrl.Wait(b.client.SetEx(gocbcore.SetOptions{
Key: []byte(key),
Value: bytes,
Flags: flags,
Expiry: expiry,
TraceContext: tracectx,
}, func(res *gocbcore.StoreResult, err error) {
if res != nil {
casOut = Cas(res.Cas)
mtOut = MutationToken{res.MutationToken, b}
}
ctrl.Resolve(err)
}))
if err != nil {
return 0, MutationToken{}, err
}
return
}
func (b *Bucket) insert(tracectx opentracing.SpanContext, key string, value interface{}, expiry uint32) (casOut Cas, mtOut MutationToken, errOut error) {
ctrl := b.newOpManager(tracectx)
bytes, flags, err := ctrl.Encode(value)
if err != nil {
return 0, MutationToken{}, err
}
err = ctrl.Wait(b.client.AddEx(gocbcore.AddOptions{
Key: []byte(key),
Value: bytes,
Flags: flags,
Expiry: expiry,
TraceContext: tracectx,
}, func(res *gocbcore.StoreResult, err error) {
if res != nil {
casOut = Cas(res.Cas)
mtOut = MutationToken{res.MutationToken, b}
}
ctrl.Resolve(err)
}))
if err != nil {
return 0, MutationToken{}, err
}
return
}
func (b *Bucket) replace(tracectx opentracing.SpanContext, key string, value interface{}, cas Cas, expiry uint32) (casOut Cas, mtOut MutationToken, errOut error) {
ctrl := b.newOpManager(tracectx)
bytes, flags, err := ctrl.Encode(value)
if err != nil {
return 0, MutationToken{}, err
}
err = ctrl.Wait(b.client.ReplaceEx(gocbcore.ReplaceOptions{
Key: []byte(key),
Cas: gocbcore.Cas(cas),
Value: bytes,
Flags: flags,
Expiry: expiry,
TraceContext: tracectx,
}, func(res *gocbcore.StoreResult, err error) {
if res != nil {
casOut = Cas(res.Cas)
mtOut = MutationToken{res.MutationToken, b}
}
ctrl.Resolve(err)
}))
if err != nil {
return 0, MutationToken{}, err
}
return
}
func (b *Bucket) append(tracectx opentracing.SpanContext, key, value string) (casOut Cas, mtOut MutationToken, errOut error) {
ctrl := b.newOpManager(tracectx)
err := ctrl.Wait(b.client.AppendEx(gocbcore.AdjoinOptions{
Key: []byte(key),
Value: []byte(value),
TraceContext: tracectx,
}, func(res *gocbcore.AdjoinResult, err error) {
if res != nil {
casOut = Cas(res.Cas)
mtOut = MutationToken{res.MutationToken, b}
}
ctrl.Resolve(err)
}))
if err != nil {
return 0, MutationToken{}, err
}
return
}
func (b *Bucket) prepend(tracectx opentracing.SpanContext, key, value string) (casOut Cas, mtOut MutationToken, errOut error) {
ctrl := b.newOpManager(tracectx)
err := ctrl.Wait(b.client.PrependEx(gocbcore.AdjoinOptions{
Key: []byte(key),
Value: []byte(value),
TraceContext: tracectx,
}, func(res *gocbcore.AdjoinResult, err error) {
if res != nil {
casOut = Cas(res.Cas)
mtOut = MutationToken{res.MutationToken, b}
}
ctrl.Resolve(err)
}))
if err != nil {
return 0, MutationToken{}, err
}
return
}
func (b *Bucket) counterInc(tracectx opentracing.SpanContext, key string, delta, initial uint64, expiry uint32) (valueOut uint64, casOut Cas, mtOut MutationToken, errOut error) {
ctrl := b.newOpManager(tracectx)
err := ctrl.Wait(b.client.IncrementEx(gocbcore.CounterOptions{
Key: []byte(key),
Delta: delta,
Initial: initial,
Expiry: expiry,
TraceContext: tracectx,
}, func(res *gocbcore.CounterResult, err error) {
if res != nil {
valueOut = res.Value
casOut = Cas(res.Cas)
mtOut = MutationToken{res.MutationToken, b}
}
ctrl.Resolve(err)
}))
if err != nil {
return 0, 0, MutationToken{}, err
}
return
}
func (b *Bucket) counterDec(tracectx opentracing.SpanContext, key string, delta, initial uint64, expiry uint32) (valueOut uint64, casOut Cas, mtOut MutationToken, errOut error) {
ctrl := b.newOpManager(tracectx)
err := ctrl.Wait(b.client.DecrementEx(gocbcore.CounterOptions{
Key: []byte(key),
Delta: delta,
Initial: initial,
Expiry: expiry,
TraceContext: tracectx,
}, func(res *gocbcore.CounterResult, err error) {
if res != nil {
valueOut = res.Value
casOut = Cas(res.Cas)
mtOut = MutationToken{res.MutationToken, b}
}
ctrl.Resolve(err)
}))
if err != nil {
return 0, 0, MutationToken{}, err
}
return
}
func (b *Bucket) counter(tracectx opentracing.SpanContext, key string, delta, initial int64, expiry uint32) (uint64, Cas, MutationToken, error) {
realInitial := uint64(0xFFFFFFFFFFFFFFFF)
if initial >= 0 {
realInitial = uint64(initial)
}
if delta > 0 {
return b.counterInc(tracectx, key, uint64(delta), realInitial, expiry)
} else if delta < 0 {
return b.counterDec(tracectx, key, uint64(-delta), realInitial, expiry)
} else {
return 0, 0, MutationToken{}, clientError{"Delta must be a non-zero value."}
}
}
func (b *Bucket) stats(tracectx opentracing.SpanContext, key string) (statsOut ServerStats, errOut error) {
ctrl := b.newOpManager(tracectx)
statsOut = make(ServerStats)
err := ctrl.Wait(b.client.StatsEx(gocbcore.StatsOptions{
Key: key,
TraceContext: tracectx,
}, func(res *gocbcore.StatsResult, err error) {
if res != nil {
for curServer, curStats := range res.Servers {
if curStats.Error != nil && err == nil {
err = curStats.Error
}
statsOut[curServer] = curStats.Stats
}
}
ctrl.Resolve(err)
}))
if err != nil {
return nil, err
}
return
}

112
vendor/gopkg.in/couchbase/gocb.v1/bucket_diag.go generated vendored Normal file
View File

@ -0,0 +1,112 @@
package gocb
import (
"encoding/json"
"time"
"github.com/google/uuid"
)
// DiagConnState represents the state of a connection in a diagnostics report.
type DiagConnState int
const (
// DiagStateOk indicates that the connection state is ok.
DiagStateOk = DiagConnState(0)
// DiagStateDisconnected indicates that the connection is disconnected.
DiagStateDisconnected = DiagConnState(1)
)
func diagStateString(state DiagConnState) string {
switch state {
case DiagStateOk:
return "ok"
case DiagStateDisconnected:
return "disconnected"
}
return "?"
}
// DiagnosticEntry represents a single entry in a diagnostics report.
type DiagnosticEntry struct {
Service ServiceType
State DiagConnState
LocalAddr string
RemoteAddr string
LastActivity time.Time
}
// DiagnosticReport encapsulates the results of a Diagnostics operation.
type DiagnosticReport struct {
ConfigRev int64
Services []DiagnosticEntry
}
type jsonDiagnosticEntry struct {
State string `json:"state"`
Remote string `json:"remote"`
Local string `json:"local"`
LastActivityUs uint64 `json:"last_activity_us"`
}
type jsonDiagnosticReport struct {
Version int `json:"version"`
Id string `json:"id"`
ConfigRev int `json:"config_rev"`
Sdk string `json:"sdk"`
Services map[string][]jsonDiagnosticEntry `json:"services"`
}
// MarshalJSON generates a JSON representation of this diagnostics report.
func (report *DiagnosticReport) MarshalJSON() ([]byte, error) {
jsonReport := jsonDiagnosticReport{
Version: 1,
Id: uuid.New().String(),
Services: make(map[string][]jsonDiagnosticEntry),
}
for _, service := range report.Services {
serviceStr := diagServiceString(service.Service)
stateStr := diagStateString(service.State)
jsonReport.Services[serviceStr] = append(jsonReport.Services[serviceStr], jsonDiagnosticEntry{
State: stateStr,
Remote: service.RemoteAddr,
Local: service.LocalAddr,
LastActivityUs: uint64(time.Now().Sub(service.LastActivity).Nanoseconds()),
})
}
return json.Marshal(&jsonReport)
}
// Diagnostics returns information about the internal state of the SDK.
//
// Experimental: This API is subject to change at any time.
func (bucket *Bucket) Diagnostics() (*DiagnosticReport, error) {
agentReport, err := bucket.client.Diagnostics()
if err != nil {
return nil, err
}
report := &DiagnosticReport{
ConfigRev: agentReport.ConfigRev,
}
for _, conn := range agentReport.MemdConns {
state := DiagStateDisconnected
if conn.LocalAddr != "" {
state = DiagStateOk
}
report.Services = append(report.Services, DiagnosticEntry{
Service: MemdService,
State: state,
LocalAddr: conn.LocalAddr,
RemoteAddr: conn.RemoteAddr,
LastActivity: conn.LastActivity,
})
}
return report, nil
}

295
vendor/gopkg.in/couchbase/gocb.v1/bucket_ds.go generated vendored Normal file
View File

@ -0,0 +1,295 @@
package gocb
import "fmt"
// MapGet retrieves a single item from a map document by its key.
func (b *Bucket) MapGet(key, path string, valuePtr interface{}) (Cas, error) {
tracespan := b.startKvOpTrace("MapGet")
defer tracespan.Finish()
frag, err := b.startLookupIn("", key, 0).Get(path).execute(tracespan.Context())
if err != nil {
return 0, err
}
err = frag.ContentByIndex(0, valuePtr)
if err != nil {
return 0, err
}
return frag.Cas(), nil
}
// MapRemove removes a specified key from the specified map document.
func (b *Bucket) MapRemove(key, path string) (Cas, error) {
tracespan := b.startKvOpTrace("MapRemove")
defer tracespan.Finish()
frag, err := b.startMutateIn("", key, 0, 0, 0, 0, 0).Remove(path).execute(tracespan.Context())
if err != nil {
return 0, err
}
return frag.Cas(), nil
}
// MapSize returns the current number of items in a map document.
// PERFORMANCE NOTICE: This currently performs a full document fetch...
func (b *Bucket) MapSize(key string) (uint, Cas, error) {
var mapContents map[string]interface{}
cas, err := b.Get(key, &mapContents)
if err != nil {
return 0, 0, err
}
return uint(len(mapContents)), cas, nil
}
// MapAdd inserts an item to a map document.
func (b *Bucket) MapAdd(key, path string, value interface{}, createMap bool) (Cas, error) {
for {
frag, err := b.startMutateIn("MapAdd", key, 0, 0, 0, 0, 0).
Insert(path, value, false).Execute()
if err != nil {
if IsKeyNotFoundError(err) && createMap {
data := make(map[string]interface{})
data[path] = value
cas, err := b.Insert(key, data, 0)
if err != nil {
if IsKeyExistsError(err) {
continue
}
return 0, err
}
return cas, nil
}
return 0, err
}
return frag.Cas(), nil
}
}
// ListGet retrieves an item from a list document by index.
func (b *Bucket) ListGet(key string, index uint, valuePtr interface{}) (Cas, error) {
frag, err := b.LookupIn(key).Get(fmt.Sprintf("[%d]", index)).Execute()
if err != nil {
return 0, err
}
err = frag.ContentByIndex(0, valuePtr)
if err != nil {
return 0, err
}
return frag.Cas(), nil
}
// ListAppend inserts an item to the end of a list document.
func (b *Bucket) ListAppend(key string, value interface{}, createList bool) (Cas, error) {
for {
frag, err := b.MutateIn(key, 0, 0).ArrayAppend("", value, false).Execute()
if err != nil {
if IsKeyNotFoundError(err) && createList {
var data []interface{}
data = append(data, value)
cas, err := b.Insert(key, data, 0)
if err != nil {
if IsKeyExistsError(err) {
continue
}
return 0, err
}
return cas, nil
}
return 0, err
}
return frag.Cas(), nil
}
}
// ListPrepend inserts an item to the beginning of a list document.
func (b *Bucket) ListPrepend(key string, value interface{}, createList bool) (Cas, error) {
for {
frag, err := b.MutateIn(key, 0, 0).ArrayPrepend("", value, false).Execute()
if err != nil {
if IsKeyNotFoundError(err) && createList {
var data []interface{}
data = append(data, value)
cas, err := b.Insert(key, data, 0)
if err != nil {
if IsKeyExistsError(err) {
continue
}
return 0, err
}
return cas, nil
}
return 0, err
}
return frag.Cas(), nil
}
}
// ListRemove removes an item from a list document by its index.
func (b *Bucket) ListRemove(key string, index uint) (Cas, error) {
frag, err := b.MutateIn(key, 0, 0).Remove(fmt.Sprintf("[%d]", index)).Execute()
if err != nil {
return 0, err
}
return frag.Cas(), nil
}
// ListSet replaces the item at a particular index of a list document.
func (b *Bucket) ListSet(key string, index uint, value interface{}) (Cas, error) {
frag, err := b.MutateIn(key, 0, 0).Replace(fmt.Sprintf("[%d]", index), value).Execute()
if err != nil {
return 0, err
}
return frag.Cas(), nil
}
// ListSize returns the current number of items in a list.
// PERFORMANCE NOTICE: This currently performs a full document fetch...
func (b *Bucket) ListSize(key string) (uint, Cas, error) {
var listContents []interface{}
cas, err := b.Get(key, &listContents)
if err != nil {
return 0, 0, err
}
return uint(len(listContents)), cas, nil
}
// SetAdd adds a new value to a set document.
func (b *Bucket) SetAdd(key string, value interface{}, createSet bool) (Cas, error) {
for {
frag, err := b.MutateIn(key, 0, 0).ArrayAddUnique("", value, false).Execute()
if err != nil {
if IsKeyNotFoundError(err) && createSet {
var data []interface{}
data = append(data, value)
cas, err := b.Insert(key, data, 0)
if err != nil {
if IsKeyExistsError(err) {
continue
}
return 0, err
}
return cas, nil
}
return 0, err
}
return frag.Cas(), nil
}
}
// SetExists checks if a particular value exists within the specified set document.
// PERFORMANCE WARNING: This performs a full set fetch and compare.
func (b *Bucket) SetExists(key string, value interface{}) (bool, Cas, error) {
var setContents []interface{}
cas, err := b.Get(key, &setContents)
if err != nil {
return false, 0, err
}
for _, item := range setContents {
if item == value {
return true, cas, nil
}
}
return false, 0, nil
}
// SetSize returns the current number of values in a set.
// PERFORMANCE NOTICE: This currently performs a full document fetch...
func (b *Bucket) SetSize(key string) (uint, Cas, error) {
var setContents []interface{}
cas, err := b.Get(key, &setContents)
if err != nil {
return 0, 0, err
}
return uint(len(setContents)), cas, nil
}
// SetRemove removes a single specified value from the specified set document.
// WARNING: This relies on Go's interface{} comparison behaviour!
// PERFORMANCE WARNING: This performs full set fetch, modify, store cycles.
func (b *Bucket) SetRemove(key string, value interface{}) (Cas, error) {
for {
var setContents []interface{}
cas, err := b.Get(key, &setContents)
if err != nil {
return 0, err
}
foundItem := false
newSetContents := make([]interface{}, 0)
for _, item := range setContents {
if item == value {
foundItem = true
} else {
newSetContents = append(newSetContents, item)
}
}
if !foundItem {
return 0, ErrRangeError
}
cas, err = b.Replace(key, newSetContents, cas, 0)
if err != nil {
if IsKeyExistsError(err) {
// If this is just a CAS error, try again!
continue
}
return 0, err
}
return cas, nil
}
}
// QueuePush adds a new item to the end of a queue.
func (b *Bucket) QueuePush(key string, value interface{}, createQueue bool) (Cas, error) {
return b.ListPrepend(key, value, createQueue)
}
// QueuePop pops the oldest item from a queue and returns it.
func (b *Bucket) QueuePop(key string, valuePtr interface{}) (Cas, error) {
for {
getFrag, err := b.LookupIn(key).Get("[-1]").Execute()
if err != nil {
return 0, err
}
rmFrag, err := b.MutateIn(key, getFrag.Cas(), 0).Remove("[-1]").Execute()
if err != nil {
if IsKeyExistsError(err) {
// If this is just a CAS error, try again!
continue
}
return 0, err
}
err = getFrag.ContentByIndex(0, valuePtr)
if err != nil {
return 0, err
}
return rmFrag.Cas(), nil
}
}
// QueueSize returns the current size of a queue.
func (b *Bucket) QueueSize(key string) (uint, Cas, error) {
var queueContents []interface{}
cas, err := b.Get(key, &queueContents)
if err != nil {
return 0, 0, err
}
return uint(len(queueContents)), cas, nil
}

299
vendor/gopkg.in/couchbase/gocb.v1/bucket_dura.go generated vendored Normal file
View File

@ -0,0 +1,299 @@
package gocb
import (
"github.com/opentracing/opentracing-go"
"gopkg.in/couchbase/gocbcore.v7"
)
func (b *Bucket) observeOnceCas(tracectx opentracing.SpanContext, key []byte, cas Cas, forDelete bool, replicaIdx int, commCh chan uint) (pendingOp, error) {
return b.client.ObserveEx(gocbcore.ObserveOptions{
Key: key,
ReplicaIdx: replicaIdx,
TraceContext: tracectx,
}, func(res *gocbcore.ObserveResult, err error) {
if err != nil || res == nil {
commCh <- 0
return
}
didReplicate := false
didPersist := false
if res.KeyState == gocbcore.KeyStatePersisted {
if !forDelete {
if Cas(res.Cas) == cas {
if replicaIdx != 0 {
didReplicate = true
}
didPersist = true
}
}
} else if res.KeyState == gocbcore.KeyStateNotPersisted {
if !forDelete {
if Cas(res.Cas) == cas {
if replicaIdx != 0 {
didReplicate = true
}
}
}
} else if res.KeyState == gocbcore.KeyStateDeleted {
if forDelete {
didReplicate = true
}
} else {
if forDelete {
didReplicate = true
didPersist = true
}
}
var out uint
if didReplicate {
out |= 1
}
if didPersist {
out |= 2
}
commCh <- out
})
}
func (b *Bucket) observeOnceSeqNo(tracectx opentracing.SpanContext, mt MutationToken, replicaIdx int, commCh chan uint) (pendingOp, error) {
return b.client.ObserveVbEx(gocbcore.ObserveVbOptions{
VbId: mt.token.VbId,
VbUuid: mt.token.VbUuid,
ReplicaIdx: replicaIdx,
TraceContext: tracectx,
}, func(res *gocbcore.ObserveVbResult, err error) {
if err != nil || res == nil {
commCh <- 0
return
}
didReplicate := res.CurrentSeqNo >= mt.token.SeqNo
didPersist := res.PersistSeqNo >= mt.token.SeqNo
var out uint
if didReplicate {
out |= 1
}
if didPersist {
out |= 2
}
commCh <- out
})
}
func (b *Bucket) observeOne(tracectx opentracing.SpanContext, key []byte, mt MutationToken, cas Cas, forDelete bool, replicaIdx int, replicaCh, persistCh chan bool) {
observeOnce := func(commCh chan uint) (pendingOp, error) {
if mt.token.VbUuid != 0 && mt.token.SeqNo != 0 {
return b.observeOnceSeqNo(tracectx, mt, replicaIdx, commCh)
}
return b.observeOnceCas(tracectx, key, cas, forDelete, replicaIdx, commCh)
}
sentReplicated := false
sentPersisted := false
failMe := func() {
if !sentReplicated {
replicaCh <- false
sentReplicated = true
}
if !sentPersisted {
persistCh <- false
sentPersisted = true
}
}
timeoutTmr := gocbcore.AcquireTimer(b.duraTimeout)
commCh := make(chan uint)
for {
op, err := observeOnce(commCh)
if err != nil {
gocbcore.ReleaseTimer(timeoutTmr, false)
failMe()
return
}
select {
case val := <-commCh:
// Got Value
if (val&1) != 0 && !sentReplicated {
replicaCh <- true
sentReplicated = true
}
if (val&2) != 0 && !sentPersisted {
persistCh <- true
sentPersisted = true
}
if sentReplicated && sentPersisted {
return
}
waitTmr := gocbcore.AcquireTimer(b.duraPollTimeout)
select {
case <-waitTmr.C:
gocbcore.ReleaseTimer(waitTmr, true)
// Fall through to outside for loop
case <-timeoutTmr.C:
gocbcore.ReleaseTimer(waitTmr, false)
gocbcore.ReleaseTimer(timeoutTmr, true)
failMe()
return
}
case <-timeoutTmr.C:
// Timed out
op.Cancel()
gocbcore.ReleaseTimer(timeoutTmr, true)
failMe()
return
}
}
}
func (b *Bucket) durability(tracectx opentracing.SpanContext, key string, cas Cas, mt MutationToken, replicaTo, persistTo uint, forDelete bool) error {
numServers := b.client.NumReplicas() + 1
if replicaTo > uint(numServers-1) || persistTo > uint(numServers) {
return ErrNotEnoughReplicas
}
keyBytes := []byte(key)
replicaCh := make(chan bool, numServers)
persistCh := make(chan bool, numServers)
for replicaIdx := 0; replicaIdx < numServers; replicaIdx++ {
go b.observeOne(tracectx, keyBytes, mt, cas, forDelete, replicaIdx, replicaCh, persistCh)
}
results := int(0)
replicas := uint(0)
persists := uint(0)
for {
select {
case rV := <-replicaCh:
if rV {
replicas++
}
results++
case pV := <-persistCh:
if pV {
persists++
}
results++
}
if replicas >= replicaTo && persists >= persistTo {
return nil
} else if results == (numServers * 2) {
return ErrDurabilityTimeout
}
}
}
// TouchDura touches a document, specifying a new expiry time for it. Additionally checks document durability.
// The Cas value must be 0.
func (b *Bucket) TouchDura(key string, cas Cas, expiry uint32, replicateTo, persistTo uint) (Cas, error) {
span := b.startKvOpTrace("TouchDura")
defer span.Finish()
if cas != 0 {
return 0, ErrNonZeroCas
}
cas, mt, err := b.touch(span.Context(), key, expiry)
if err != nil {
return cas, err
}
return cas, b.durability(span.Context(), key, cas, mt, replicateTo, persistTo, false)
}
// RemoveDura removes a document from the bucket. Additionally checks document durability.
func (b *Bucket) RemoveDura(key string, cas Cas, replicateTo, persistTo uint) (Cas, error) {
span := b.startKvOpTrace("RemoveDura")
defer span.Finish()
cas, mt, err := b.remove(span.Context(), key, cas)
if err != nil {
return cas, err
}
return cas, b.durability(span.Context(), key, cas, mt, replicateTo, persistTo, true)
}
// UpsertDura inserts or replaces a document in the bucket. Additionally checks document durability.
func (b *Bucket) UpsertDura(key string, value interface{}, expiry uint32, replicateTo, persistTo uint) (Cas, error) {
span := b.startKvOpTrace("UpsertDura")
defer span.Finish()
cas, mt, err := b.upsert(span.Context(), key, value, expiry)
if err != nil {
return cas, err
}
return cas, b.durability(span.Context(), key, cas, mt, replicateTo, persistTo, false)
}
// InsertDura inserts a new document to the bucket. Additionally checks document durability.
func (b *Bucket) InsertDura(key string, value interface{}, expiry uint32, replicateTo, persistTo uint) (Cas, error) {
span := b.startKvOpTrace("InsertDura")
defer span.Finish()
cas, mt, err := b.insert(span.Context(), key, value, expiry)
if err != nil {
return cas, err
}
return cas, b.durability(span.Context(), key, cas, mt, replicateTo, persistTo, false)
}
// ReplaceDura replaces a document in the bucket. Additionally checks document durability.
func (b *Bucket) ReplaceDura(key string, value interface{}, cas Cas, expiry uint32, replicateTo, persistTo uint) (Cas, error) {
span := b.startKvOpTrace("ReplaceDura")
defer span.Finish()
cas, mt, err := b.replace(span.Context(), key, value, cas, expiry)
if err != nil {
return cas, err
}
return cas, b.durability(span.Context(), key, cas, mt, replicateTo, persistTo, false)
}
// AppendDura appends a string value to a document. Additionally checks document durability.
func (b *Bucket) AppendDura(key, value string, replicateTo, persistTo uint) (Cas, error) {
span := b.startKvOpTrace("AppendDura")
defer span.Finish()
cas, mt, err := b.append(span.Context(), key, value)
if err != nil {
return cas, err
}
return cas, b.durability(span.Context(), key, cas, mt, replicateTo, persistTo, false)
}
// PrependDura prepends a string value to a document. Additionally checks document durability.
func (b *Bucket) PrependDura(key, value string, replicateTo, persistTo uint) (Cas, error) {
span := b.startKvOpTrace("PrependDura")
defer span.Finish()
cas, mt, err := b.prepend(span.Context(), key, value)
if err != nil {
return cas, err
}
return cas, b.durability(span.Context(), key, cas, mt, replicateTo, persistTo, false)
}
// CounterDura performs an atomic addition or subtraction for an integer document. Additionally checks document durability.
func (b *Bucket) CounterDura(key string, delta, initial int64, expiry uint32, replicateTo, persistTo uint) (uint64, Cas, error) {
span := b.startKvOpTrace("CounterDura")
defer span.Finish()
val, cas, mt, err := b.counter(span.Context(), key, delta, initial, expiry)
if err != nil {
return val, cas, err
}
return val, cas, b.durability(span.Context(), key, cas, mt, replicateTo, persistTo, false)
}

119
vendor/gopkg.in/couchbase/gocb.v1/bucket_internal.go generated vendored Normal file
View File

@ -0,0 +1,119 @@
package gocb
import (
"github.com/opentracing/opentracing-go"
"gopkg.in/couchbase/gocbcore.v7"
)
// BucketInternal holds various internally used bucket extension methods.
//
// Internal: This should never be used and is not supported.
type BucketInternal struct {
b *Bucket
}
// GetRandom retrieves a document from the bucket
func (bi *BucketInternal) GetRandom(valuePtr interface{}) (string, Cas, error) {
span := bi.b.startKvOpTrace("GetRandom")
defer span.Finish()
return bi.b.getRandom(span.Context(), valuePtr)
}
// UpsertMeta inserts or replaces (with metadata) a document in a bucket.
func (bi *BucketInternal) UpsertMeta(key string, value, extra []byte, datatype uint8,
options, flags, expiry uint32, cas, revseqno uint64) (Cas, error) {
span := bi.b.startKvOpTrace("UpsertMeta")
defer span.Finish()
outcas, _, err := bi.b.upsertMeta(span.Context(), key, value, extra, datatype, options,
flags, expiry, cas, revseqno)
return outcas, err
}
// RemoveMeta removes a document (with metadata) from the bucket.
func (bi *BucketInternal) RemoveMeta(key string, value, extra []byte, datatype uint8,
options, flags, expiry uint32, cas, revseqno uint64) (Cas, error) {
span := bi.b.startKvOpTrace("RemoveMeta")
defer span.Finish()
outcas, _, err := bi.b.removeMeta(span.Context(), key, value, extra, datatype, options,
flags, expiry, cas, revseqno)
return outcas, err
}
func (b *Bucket) getRandom(tracectx opentracing.SpanContext,
valuePtr interface{}) (keyOut string, casOut Cas, errOut error) {
ctrl := b.newOpManager(tracectx)
err := ctrl.Wait(b.client.GetRandomEx(gocbcore.GetRandomOptions{
TraceContext: tracectx,
}, func(res *gocbcore.GetRandomResult, err error) {
if res != nil {
casOut = Cas(res.Cas)
keyOut = string(res.Key)
if err == nil {
err = ctrl.Decode(res.Value, res.Flags, valuePtr)
}
}
ctrl.Resolve(err)
}))
if err != nil {
return "", 0, err
}
return
}
func (b *Bucket) upsertMeta(tracectx opentracing.SpanContext, key string, value, extra []byte, datatype uint8,
options, flags uint32, expiry uint32, cas, revseqno uint64) (casOut Cas, mtOut MutationToken, errOut error) {
ctrl := b.newOpManager(tracectx)
err := ctrl.Wait(b.client.SetMetaEx(gocbcore.SetMetaOptions{
Key: []byte(key),
Value: value,
Extra: extra,
Datatype: datatype,
Options: options,
Flags: flags,
Expiry: expiry,
Cas: gocbcore.Cas(cas),
RevNo: revseqno,
TraceContext: tracectx,
}, func(res *gocbcore.SetMetaResult, err error) {
if res != nil {
casOut = Cas(res.Cas)
mtOut = MutationToken{res.MutationToken, b}
}
ctrl.Resolve(err)
}))
if err != nil {
return 0, MutationToken{}, err
}
return
}
func (b *Bucket) removeMeta(tracectx opentracing.SpanContext, key string, value, extra []byte, datatype uint8,
options, flags uint32, expiry uint32, cas, revseqno uint64) (casOut Cas, mtOut MutationToken, errOut error) {
ctrl := b.newOpManager(tracectx)
err := ctrl.Wait(b.client.DeleteMetaEx(gocbcore.DeleteMetaOptions{
Key: []byte(key),
Value: value,
Extra: extra,
Datatype: datatype,
Options: options,
Flags: flags,
Expiry: expiry,
Cas: gocbcore.Cas(cas),
RevNo: revseqno,
TraceContext: tracectx,
}, func(res *gocbcore.DeleteMetaResult, err error) {
if res != nil {
casOut = Cas(res.Cas)
mtOut = MutationToken{res.MutationToken, b}
}
ctrl.Resolve(err)
}))
if err != nil {
return 0, MutationToken{}, err
}
return
}

430
vendor/gopkg.in/couchbase/gocb.v1/bucket_multi.go generated vendored Normal file
View File

@ -0,0 +1,430 @@
package gocb
import (
"gopkg.in/couchbase/gocbcore.v7"
)
type bulkOp struct {
pendop gocbcore.PendingOp
}
func (op *bulkOp) cancel() bool {
if op.pendop == nil {
return true
}
res := op.pendop.Cancel()
op.pendop = nil
return res
}
// BulkOp represents a single operation that can be submitted (within a list of more operations) to .Do()
// You can create a bulk operation by instantiating one of the implementations of BulkOp,
// such as GetOp, UpsertOp, ReplaceOp, and more.
type BulkOp interface {
execute(*Bucket, chan BulkOp)
markError(err error)
cancel() bool
}
// Do execute one or more `BulkOp` items in parallel.
func (b *Bucket) Do(ops []BulkOp) error {
timeoutTmr := gocbcore.AcquireTimer(b.bulkOpTimeout)
// Make the channel big enough to hold all our ops in case
// we get delayed inside execute (don't want to block the
// individual op handlers when they dispatch their signal).
signal := make(chan BulkOp, len(ops))
for _, item := range ops {
item.execute(b, signal)
}
for range ops {
select {
case item := <-signal:
// We're really just clearing the pendop from this thread,
// since it already completed, no cancel actually occurs
item.cancel()
case <-timeoutTmr.C:
gocbcore.ReleaseTimer(timeoutTmr, true)
for _, item := range ops {
if !item.cancel() {
<-signal
continue
}
// We use this method to mark the individual items as
// having timed out so we don't move `Err` in bulkOp
// and break backwards compatibility.
item.markError(ErrTimeout)
}
return ErrTimeout
}
}
gocbcore.ReleaseTimer(timeoutTmr, false)
return nil
}
// GetOp represents a type of `BulkOp` used for Get operations. See BulkOp.
type GetOp struct {
bulkOp
Key string
Value interface{}
Cas Cas
Err error
}
func (item *GetOp) markError(err error) {
item.Err = err
}
func (item *GetOp) execute(b *Bucket, signal chan BulkOp) {
op, err := b.client.Get([]byte(item.Key), func(bytes []byte, flags uint32, cas gocbcore.Cas, err error) {
item.Err = err
if item.Err == nil {
item.Err = b.transcoder.Decode(bytes, flags, item.Value)
if item.Err == nil {
item.Cas = Cas(cas)
}
}
signal <- item
})
if err != nil {
item.Err = err
signal <- item
} else {
item.bulkOp.pendop = op
}
}
// GetAndTouchOp represents a type of `BulkOp` used for GetAndTouch operations. See BulkOp.
type GetAndTouchOp struct {
bulkOp
Key string
Value interface{}
Expiry uint32
Cas Cas
Err error
}
func (item *GetAndTouchOp) markError(err error) {
item.Err = err
}
func (item *GetAndTouchOp) execute(b *Bucket, signal chan BulkOp) {
op, err := b.client.GetAndTouch([]byte(item.Key), item.Expiry,
func(bytes []byte, flags uint32, cas gocbcore.Cas, err error) {
item.Err = err
if item.Err == nil {
item.Err = b.transcoder.Decode(bytes, flags, item.Value)
if item.Err == nil {
item.Cas = Cas(cas)
}
}
signal <- item
})
if err != nil {
item.Err = err
signal <- item
} else {
item.bulkOp.pendop = op
}
}
// TouchOp represents a type of `BulkOp` used for Touch operations. See BulkOp.
type TouchOp struct {
bulkOp
Key string
Expiry uint32
Cas Cas
Err error
}
func (item *TouchOp) markError(err error) {
item.Err = err
}
func (item *TouchOp) execute(b *Bucket, signal chan BulkOp) {
op, err := b.client.Touch([]byte(item.Key), gocbcore.Cas(item.Cas), item.Expiry,
func(cas gocbcore.Cas, mutToken gocbcore.MutationToken, err error) {
item.Err = err
if item.Err == nil {
item.Cas = Cas(cas)
}
signal <- item
})
if err != nil {
item.Err = err
signal <- item
} else {
item.bulkOp.pendop = op
}
}
// RemoveOp represents a type of `BulkOp` used for Remove operations. See BulkOp.
type RemoveOp struct {
bulkOp
Key string
Cas Cas
Err error
}
func (item *RemoveOp) markError(err error) {
item.Err = err
}
func (item *RemoveOp) execute(b *Bucket, signal chan BulkOp) {
op, err := b.client.Remove([]byte(item.Key), gocbcore.Cas(item.Cas),
func(cas gocbcore.Cas, mutToken gocbcore.MutationToken, err error) {
item.Err = err
if item.Err == nil {
item.Cas = Cas(cas)
}
signal <- item
})
if err != nil {
item.Err = err
signal <- item
} else {
item.bulkOp.pendop = op
}
}
// UpsertOp represents a type of `BulkOp` used for Upsert operations. See BulkOp.
type UpsertOp struct {
bulkOp
Key string
Value interface{}
Expiry uint32
Cas Cas
Err error
}
func (item *UpsertOp) markError(err error) {
item.Err = err
}
func (item *UpsertOp) execute(b *Bucket, signal chan BulkOp) {
bytes, flags, err := b.transcoder.Encode(item.Value)
if err != nil {
item.Err = err
signal <- item
} else {
op, err := b.client.Set([]byte(item.Key), bytes, flags, item.Expiry,
func(cas gocbcore.Cas, mutToken gocbcore.MutationToken, err error) {
item.Err = err
if item.Err == nil {
item.Cas = Cas(cas)
}
signal <- item
})
if err != nil {
item.Err = err
signal <- item
} else {
item.bulkOp.pendop = op
}
}
}
// InsertOp represents a type of `BulkOp` used for Insert operations. See BulkOp.
type InsertOp struct {
bulkOp
Key string
Value interface{}
Expiry uint32
Cas Cas
Err error
}
func (item *InsertOp) markError(err error) {
item.Err = err
}
func (item *InsertOp) execute(b *Bucket, signal chan BulkOp) {
bytes, flags, err := b.transcoder.Encode(item.Value)
if err != nil {
item.Err = err
signal <- item
} else {
op, err := b.client.Add([]byte(item.Key), bytes, flags, item.Expiry,
func(cas gocbcore.Cas, mutToken gocbcore.MutationToken, err error) {
item.Err = err
if item.Err == nil {
item.Cas = Cas(cas)
}
signal <- item
})
if err != nil {
item.Err = err
signal <- item
} else {
item.bulkOp.pendop = op
}
}
}
// ReplaceOp represents a type of `BulkOp` used for Replace operations. See BulkOp.
type ReplaceOp struct {
bulkOp
Key string
Value interface{}
Expiry uint32
Cas Cas
Err error
}
func (item *ReplaceOp) markError(err error) {
item.Err = err
}
func (item *ReplaceOp) execute(b *Bucket, signal chan BulkOp) {
bytes, flags, err := b.transcoder.Encode(item.Value)
if err != nil {
item.Err = err
signal <- item
} else {
op, err := b.client.Replace([]byte(item.Key), bytes, flags, gocbcore.Cas(item.Cas), item.Expiry,
func(cas gocbcore.Cas, mutToken gocbcore.MutationToken, err error) {
item.Err = err
if item.Err == nil {
item.Cas = Cas(cas)
}
signal <- item
})
if err != nil {
item.Err = err
signal <- item
} else {
item.bulkOp.pendop = op
}
}
}
// AppendOp represents a type of `BulkOp` used for Append operations. See BulkOp.
type AppendOp struct {
bulkOp
Key string
Value string
Cas Cas
Err error
}
func (item *AppendOp) markError(err error) {
item.Err = err
}
func (item *AppendOp) execute(b *Bucket, signal chan BulkOp) {
op, err := b.client.Append([]byte(item.Key), []byte(item.Value),
func(cas gocbcore.Cas, mutToken gocbcore.MutationToken, err error) {
item.Err = err
if item.Err == nil {
item.Cas = Cas(cas)
}
signal <- item
})
if err != nil {
item.Err = err
signal <- item
} else {
item.bulkOp.pendop = op
}
}
// PrependOp represents a type of `BulkOp` used for Prepend operations. See BulkOp.
type PrependOp struct {
bulkOp
Key string
Value string
Cas Cas
Err error
}
func (item *PrependOp) markError(err error) {
item.Err = err
}
func (item *PrependOp) execute(b *Bucket, signal chan BulkOp) {
op, err := b.client.Prepend([]byte(item.Key), []byte(item.Value),
func(cas gocbcore.Cas, mutToken gocbcore.MutationToken, err error) {
item.Err = err
if item.Err == nil {
item.Cas = Cas(cas)
}
signal <- item
})
if err != nil {
item.Err = err
signal <- item
} else {
item.bulkOp.pendop = op
}
}
// CounterOp represents a type of `BulkOp` used for Counter operations. See BulkOp.
type CounterOp struct {
bulkOp
Key string
Delta int64
Initial int64
Expiry uint32
Cas Cas
Value uint64
Err error
}
func (item *CounterOp) markError(err error) {
item.Err = err
}
func (item *CounterOp) execute(b *Bucket, signal chan BulkOp) {
realInitial := uint64(0xFFFFFFFFFFFFFFFF)
if item.Initial > 0 {
realInitial = uint64(item.Initial)
}
if item.Delta > 0 {
op, err := b.client.Increment([]byte(item.Key), uint64(item.Delta), realInitial, item.Expiry,
func(value uint64, cas gocbcore.Cas, mutToken gocbcore.MutationToken, err error) {
item.Err = err
if item.Err == nil {
item.Value = value
item.Cas = Cas(cas)
}
signal <- item
})
if err != nil {
item.Err = err
signal <- item
} else {
item.bulkOp.pendop = op
}
} else if item.Delta < 0 {
op, err := b.client.Decrement([]byte(item.Key), uint64(-item.Delta), realInitial, item.Expiry,
func(value uint64, cas gocbcore.Cas, mutToken gocbcore.MutationToken, err error) {
item.Err = err
if item.Err == nil {
item.Value = value
item.Cas = Cas(cas)
}
signal <- item
})
if err != nil {
item.Err = err
signal <- item
} else {
item.bulkOp.pendop = op
}
} else {
item.Err = clientError{"Delta must be a non-zero value."}
signal <- item
}
}

14
vendor/gopkg.in/couchbase/gocb.v1/bucket_n1qlquery.go generated vendored Normal file
View File

@ -0,0 +1,14 @@
package gocb
import (
"github.com/opentracing/opentracing-go"
)
// ExecuteN1qlQuery performs a n1ql query and returns a list of rows or an error.
func (b *Bucket) ExecuteN1qlQuery(q *N1qlQuery, params interface{}) (QueryResults, error) {
span := b.tracer.StartSpan("ExecuteSearchQuery",
opentracing.Tag{Key: "couchbase.service", Value: "n1ql"})
defer span.Finish()
return b.cluster.doN1qlQuery(span.Context(), b, q, params)
}

312
vendor/gopkg.in/couchbase/gocb.v1/bucket_ping.go generated vendored Normal file
View File

@ -0,0 +1,312 @@
package gocb
import (
"encoding/json"
"fmt"
"net/http"
"sync"
"time"
"github.com/google/uuid"
"gopkg.in/couchbase/gocbcore.v7"
)
func diagServiceString(service ServiceType) string {
switch service {
case MemdService:
return "kv"
case CapiService:
return "view"
case MgmtService:
return "mgmt"
case N1qlService:
return "n1ql"
case FtsService:
return "fts"
case CbasService:
return "cbas"
}
return "?"
}
// PingServiceEntry represents a single entry in a ping report.
type PingServiceEntry struct {
Service ServiceType
Endpoint string
Success bool
Latency time.Duration
}
// PingReport encapsulates the details from a executed ping operation.
type PingReport struct {
Services []PingServiceEntry
}
type jsonPingServiceEntry struct {
Remote string `json:"remote"`
LatencyUs uint64 `json:"latency_us"`
Success bool `json:"success"`
}
type jsonPingReport struct {
Version int `json:"version"`
Id string `json:"id"`
Sdk string `json:"sdk"`
Services map[string][]jsonPingServiceEntry `json:"services"`
}
// MarshalJSON generates a JSON representation of this ping report.
func (report *PingReport) MarshalJSON() ([]byte, error) {
jsonReport := jsonPingReport{
Version: 1,
Id: uuid.New().String(),
Sdk: "gocb/" + Version() + " " + "gocbcore/" + gocbcore.Version(),
Services: make(map[string][]jsonPingServiceEntry),
}
for _, service := range report.Services {
serviceStr := diagServiceString(service.Service)
jsonReport.Services[serviceStr] = append(jsonReport.Services[serviceStr], jsonPingServiceEntry{
Remote: service.Endpoint,
LatencyUs: uint64(service.Latency / time.Nanosecond),
})
}
return json.Marshal(&jsonReport)
}
func (b *Bucket) pingKv() (pingsOut []gocbcore.PingResult, errOut error) {
signal := make(chan bool, 1)
op, err := b.client.Ping(func(results []gocbcore.PingResult) {
pingsOut = make([]gocbcore.PingResult, len(results))
for pingIdx, ping := range results {
// We rewrite the cancelled errors into timeout errors here.
if ping.Error == gocbcore.ErrCancelled {
ping.Error = ErrTimeout
}
pingsOut[pingIdx] = ping
}
signal <- true
})
if err != nil {
return nil, err
}
timeoutTmr := gocbcore.AcquireTimer(b.opTimeout)
select {
case <-signal:
gocbcore.ReleaseTimer(timeoutTmr, false)
return
case <-timeoutTmr.C:
gocbcore.ReleaseTimer(timeoutTmr, true)
if !op.Cancel() {
<-signal
return
}
return nil, ErrTimeout
}
}
// Ping will ping a list of services and verify they are active and
// responding in an acceptable period of time.
//
// Experimental: This API is subject to change at any time.
func (b *Bucket) Ping(services []ServiceType) (*PingReport, error) {
numServices := 0
waitCh := make(chan error, 10)
report := &PingReport{}
var reportLock sync.Mutex
if services == nil {
services = []ServiceType{
MemdService,
}
if b.client.N1qlEps() != nil {
services = append(services, N1qlService)
}
if b.client.FtsEps() != nil {
services = append(services, FtsService)
}
if b.client.CbasEps() != nil {
services = append(services, CbasService)
}
}
httpReq := func(service ServiceType, endpoint, url string) (time.Duration, error) {
c := b.cluster
startTime := time.Now()
client := b.client.HttpClient()
reqUri := fmt.Sprintf("%s/%s", endpoint, url)
req, err := http.NewRequest("GET", reqUri, nil)
if err != nil {
return 0, err
}
timeout := 60 * time.Second
if service == N1qlService {
if b.n1qlTimeout < c.n1qlTimeout {
timeout = b.n1qlTimeout
} else {
timeout = c.n1qlTimeout
}
} else if service == FtsService {
if b.ftsTimeout < c.ftsTimeout {
timeout = b.ftsTimeout
} else {
timeout = c.ftsTimeout
}
} else if service == CbasService {
timeout = c.analyticsTimeout
}
resp, err := doHttpWithTimeout(client, req, timeout)
if err != nil {
return 0, err
}
err = resp.Body.Close()
if err != nil {
logDebugf("Failed to close http request: %s", err)
}
pingLatency := time.Now().Sub(startTime)
return pingLatency, err
}
for _, serviceType := range services {
switch serviceType {
case MemdService:
numServices++
go func() {
pings, err := b.pingKv()
if err != nil {
logWarnf("Failed to ping KV for report: %s", err)
waitCh <- nil
return
}
reportLock.Lock()
// We intentionally ignore errors here and simply include
// any non-error pings that we have received. Note that
// gocbcore's ping command, when cancelled, still returns
// any pings that had occurred before the operation was
// cancelled and then marks the rest as errors.
for _, ping := range pings {
wasSuccess := true
if ping.Error != nil {
wasSuccess = false
}
report.Services = append(report.Services, PingServiceEntry{
Service: MemdService,
Endpoint: ping.Endpoint,
Success: wasSuccess,
Latency: ping.Latency,
})
}
reportLock.Unlock()
waitCh <- nil
}()
case CapiService:
// View Service is not currently supported as a ping target
case N1qlService:
numServices++
go func() {
pingLatency := time.Duration(0)
endpoint, err := b.getN1qlEp()
if err == nil {
pingLatency, err = httpReq(N1qlService, endpoint, "/admin/ping")
}
reportLock.Lock()
if err != nil {
report.Services = append(report.Services, PingServiceEntry{
Service: N1qlService,
Endpoint: endpoint,
Success: false,
})
} else {
report.Services = append(report.Services, PingServiceEntry{
Service: N1qlService,
Endpoint: endpoint,
Success: true,
Latency: pingLatency,
})
}
reportLock.Unlock()
waitCh <- nil
}()
case FtsService:
numServices++
go func() {
pingLatency := time.Duration(0)
endpoint, err := b.getFtsEp()
if err == nil {
pingLatency, err = httpReq(FtsService, endpoint, "/api/ping")
}
reportLock.Lock()
if err != nil {
report.Services = append(report.Services, PingServiceEntry{
Service: FtsService,
Endpoint: endpoint,
Success: false,
})
} else {
report.Services = append(report.Services, PingServiceEntry{
Service: FtsService,
Endpoint: endpoint,
Success: true,
Latency: pingLatency,
})
}
reportLock.Unlock()
waitCh <- nil
}()
case CbasService:
numServices++
go func() {
pingLatency := time.Duration(0)
endpoint, err := b.getCbasEp()
if err == nil {
pingLatency, err = httpReq(CbasService, endpoint, "/admin/ping")
}
reportLock.Lock()
if err != nil {
report.Services = append(report.Services, PingServiceEntry{
Service: CbasService,
Endpoint: endpoint,
Success: false,
})
} else {
report.Services = append(report.Services, PingServiceEntry{
Service: CbasService,
Endpoint: endpoint,
Success: true,
Latency: pingLatency,
})
}
reportLock.Unlock()
waitCh <- nil
}()
}
}
for i := 0; i < numServices; i++ {
<-waitCh
}
return report, nil
}

View File

@ -0,0 +1,15 @@
package gocb
import (
"github.com/opentracing/opentracing-go"
)
// ExecuteSearchQuery performs a view query and returns a list of rows or an error.
func (b *Bucket) ExecuteSearchQuery(q *SearchQuery) (SearchResults, error) {
span := b.tracer.StartSpan("ExecuteSearchQuery",
opentracing.Tag{Key: "couchbase.service", Value: "fts"})
span.SetTag("bucket_name", b.name)
defer span.Finish()
return b.cluster.doSearchQuery(span.Context(), b, q)
}

662
vendor/gopkg.in/couchbase/gocb.v1/bucket_subdoc.go generated vendored Normal file
View File

@ -0,0 +1,662 @@
package gocb
import (
"encoding/json"
"log"
"github.com/opentracing/opentracing-go"
"gopkg.in/couchbase/gocbcore.v7"
)
type subDocResult struct {
path string
data []byte
err error
}
// DocumentFragment represents multiple chunks of a full Document.
type DocumentFragment struct {
cas Cas
mt MutationToken
contents []subDocResult
pathMap map[string]int
}
// Cas returns the Cas of the Document
func (frag *DocumentFragment) Cas() Cas {
return frag.cas
}
// MutationToken returns the MutationToken for the change represented by this DocumentFragment.
func (frag *DocumentFragment) MutationToken() MutationToken {
return frag.mt
}
// ContentByIndex retrieves the value of the operation by its index. The index is the position of
// the operation as it was added to the builder.
func (frag *DocumentFragment) ContentByIndex(idx int, valuePtr interface{}) error {
res := frag.contents[idx]
if res.err != nil {
return res.err
}
if valuePtr == nil {
return nil
}
if valuePtr, ok := valuePtr.(*[]byte); ok {
*valuePtr = res.data
return nil
}
return json.Unmarshal(res.data, valuePtr)
}
// Content retrieves the value of the operation by its path. The path is the path provided
// to the operation
func (frag *DocumentFragment) Content(path string, valuePtr interface{}) error {
if frag.pathMap == nil {
frag.pathMap = make(map[string]int)
for i, v := range frag.contents {
frag.pathMap[v.path] = i
}
}
return frag.ContentByIndex(frag.pathMap[path], valuePtr)
}
// Exists checks whether the indicated path exists in this DocumentFragment and no
// errors were returned from the server.
func (frag *DocumentFragment) Exists(path string) bool {
err := frag.Content(path, nil)
return err == nil
}
// LookupInBuilder is a builder used to create a set of sub-document lookup operations.
type LookupInBuilder struct {
bucket *Bucket
opName string
name string
flags gocbcore.SubdocDocFlag
ops []gocbcore.SubDocOp
}
func (set *LookupInBuilder) execute(tracectx opentracing.SpanContext) (*DocumentFragment, error) {
return set.bucket.lookupIn(tracectx, set)
}
// Execute executes this set of lookup operations on the bucket.
func (set *LookupInBuilder) Execute() (*DocumentFragment, error) {
return set.execute(nil)
}
// GetEx allows you to perform a sub-document Get operation with flags
func (set *LookupInBuilder) GetEx(path string, flags SubdocFlag) *LookupInBuilder {
if path == "" {
op := gocbcore.SubDocOp{
Op: gocbcore.SubDocOpGetDoc,
Flags: gocbcore.SubdocFlag(flags),
}
set.ops = append(set.ops, op)
return set
}
op := gocbcore.SubDocOp{
Op: gocbcore.SubDocOpGet,
Path: path,
Flags: gocbcore.SubdocFlag(flags),
}
set.ops = append(set.ops, op)
return set
}
// Get indicates a path to be retrieved from the document. The value of the path
// can later be retrieved (after .Execute()) using the Content or ContentByIndex
// method. The path syntax follows N1QL's path syntax (e.g. `foo.bar.baz`).
func (set *LookupInBuilder) Get(path string) *LookupInBuilder {
return set.GetEx(path, SubdocFlagNone)
}
// ExistsEx allows you to perform a sub-document Exists operation with flags
func (set *LookupInBuilder) ExistsEx(path string, flags SubdocFlag) *LookupInBuilder {
op := gocbcore.SubDocOp{
Op: gocbcore.SubDocOpExists,
Path: path,
Flags: gocbcore.SubdocFlag(flags),
}
set.ops = append(set.ops, op)
return set
}
// Exists is similar to Get(), but does not actually retrieve the value from the server.
// This may save bandwidth if you only need to check for the existence of a
// path (without caring for its content). You can check the status of this
// operation by using .Content (and ignoring the value) or .Exists()
func (set *LookupInBuilder) Exists(path string) *LookupInBuilder {
return set.ExistsEx(path, SubdocFlagNone)
}
// GetCountEx allows you to perform a sub-document GetCount operation with flags
func (set *LookupInBuilder) GetCountEx(path string, flags SubdocFlag) *LookupInBuilder {
op := gocbcore.SubDocOp{
Op: gocbcore.SubDocOpGetCount,
Path: path,
Flags: gocbcore.SubdocFlag(flags),
}
set.ops = append(set.ops, op)
return set
}
// GetCount allows you to retrieve the number of items in an array or keys within an
// dictionary within an element of a document.
func (set *LookupInBuilder) GetCount(path string) *LookupInBuilder {
return set.GetCountEx(path, SubdocFlagNone)
}
func (b *Bucket) lookupIn(tracectx opentracing.SpanContext, set *LookupInBuilder) (resOut *DocumentFragment, errOut error) {
if tracectx == nil {
lispan := b.startKvOpTrace(set.opName)
defer lispan.Finish()
tracectx = lispan.Context()
}
signal := make(chan bool, 1)
op, err := b.client.LookupInEx(gocbcore.LookupInOptions{
Key: []byte(set.name),
Flags: set.flags,
Ops: set.ops,
TraceContext: tracectx,
}, func(res *gocbcore.LookupInResult, err error) {
errOut = err
if res != nil {
resSet := &DocumentFragment{}
resSet.contents = make([]subDocResult, len(res.Ops))
resSet.cas = Cas(res.Cas)
for i, opRes := range res.Ops {
resSet.contents[i].path = set.ops[i].Path
resSet.contents[i].err = opRes.Err
if opRes.Value != nil {
resSet.contents[i].data = append([]byte(nil), opRes.Value...)
}
}
resOut = resSet
}
signal <- true
})
if err != nil {
return nil, err
}
timeoutTmr := gocbcore.AcquireTimer(b.opTimeout)
select {
case <-signal:
gocbcore.ReleaseTimer(timeoutTmr, false)
return
case <-timeoutTmr.C:
gocbcore.ReleaseTimer(timeoutTmr, true)
if !op.Cancel() {
<-signal
return
}
return nil, ErrTimeout
}
}
func (b *Bucket) startLookupIn(opName string, key string, flags SubdocDocFlag) *LookupInBuilder {
return &LookupInBuilder{
bucket: b,
name: key,
flags: gocbcore.SubdocDocFlag(flags),
opName: opName,
}
}
// LookupInEx creates a sub-document lookup operation builder.
func (b *Bucket) LookupInEx(key string, flags SubdocDocFlag) *LookupInBuilder {
return b.startLookupIn("LookupInEx", key, flags)
}
// LookupIn creates a sub-document lookup operation builder.
func (b *Bucket) LookupIn(key string) *LookupInBuilder {
return b.startLookupIn("LookupIn", key, 0)
}
// MutateInBuilder is a builder used to create a set of sub-document mutation operations.
type MutateInBuilder struct {
bucket *Bucket
opName string
name string
flags gocbcore.SubdocDocFlag
cas gocbcore.Cas
expiry uint32
ops []gocbcore.SubDocOp
errs MultiError
replicaTo uint
persistTo uint
}
func (set *MutateInBuilder) execute(tracectx opentracing.SpanContext) (*DocumentFragment, error) {
return set.bucket.mutateIn(tracectx, set)
}
// Execute executes this set of mutation operations on the bucket.
func (set *MutateInBuilder) Execute() (*DocumentFragment, error) {
return set.execute(nil)
}
func (set *MutateInBuilder) marshalValue(value interface{}) []byte {
if value, ok := value.([]byte); ok {
return value
}
if value, ok := value.(*[]byte); ok {
return *value
}
bytes, err := json.Marshal(value)
if err != nil {
set.errs.add(err)
return nil
}
return bytes
}
// InsertEx allows you to perform a sub-document Insert operation with flags
func (set *MutateInBuilder) InsertEx(path string, value interface{}, flags SubdocFlag) *MutateInBuilder {
if path == "" {
op := gocbcore.SubDocOp{
Op: gocbcore.SubDocOpAddDoc,
Flags: gocbcore.SubdocFlag(flags),
Value: set.marshalValue(value),
}
set.ops = append(set.ops, op)
return set
}
op := gocbcore.SubDocOp{
Op: gocbcore.SubDocOpDictAdd,
Path: path,
Flags: gocbcore.SubdocFlag(flags),
Value: set.marshalValue(value),
}
set.ops = append(set.ops, op)
return set
}
// Insert adds an insert operation to this mutation operation set.
func (set *MutateInBuilder) Insert(path string, value interface{}, createParents bool) *MutateInBuilder {
var flags SubdocFlag
if createParents {
flags |= SubdocFlagCreatePath
}
return set.InsertEx(path, value, flags)
}
// UpsertEx allows you to perform a sub-document Upsert operation with flags
func (set *MutateInBuilder) UpsertEx(path string, value interface{}, flags SubdocFlag) *MutateInBuilder {
if path == "" {
op := gocbcore.SubDocOp{
Op: gocbcore.SubDocOpSetDoc,
Flags: gocbcore.SubdocFlag(flags),
Value: set.marshalValue(value),
}
set.ops = append(set.ops, op)
return set
}
op := gocbcore.SubDocOp{
Op: gocbcore.SubDocOpDictSet,
Path: path,
Flags: gocbcore.SubdocFlag(flags),
Value: set.marshalValue(value),
}
set.ops = append(set.ops, op)
return set
}
// Upsert adds an upsert operation to this mutation operation set.
func (set *MutateInBuilder) Upsert(path string, value interface{}, createParents bool) *MutateInBuilder {
var flags SubdocFlag
if createParents {
flags |= SubdocFlagCreatePath
}
return set.UpsertEx(path, value, flags)
}
// ReplaceEx allows you to perform a sub-document Replace operation with flags
func (set *MutateInBuilder) ReplaceEx(path string, value interface{}, flags SubdocFlag) *MutateInBuilder {
op := gocbcore.SubDocOp{
Op: gocbcore.SubDocOpReplace,
Path: path,
Flags: gocbcore.SubdocFlag(flags),
Value: set.marshalValue(value),
}
set.ops = append(set.ops, op)
return set
}
// Replace adds an replace operation to this mutation operation set.
func (set *MutateInBuilder) Replace(path string, value interface{}) *MutateInBuilder {
return set.ReplaceEx(path, value, SubdocFlagNone)
}
func (set *MutateInBuilder) marshalArrayMulti(in interface{}) (out []byte) {
out, err := json.Marshal(in)
if err != nil {
log.Panic(err)
}
// Assert first character is a '['
if len(out) < 2 || out[0] != '[' {
log.Panic("Not a JSON array")
}
out = out[1 : len(out)-1]
return
}
// RemoveEx allows you to perform a sub-document Remove operation with flags
func (set *MutateInBuilder) RemoveEx(path string, flags SubdocFlag) *MutateInBuilder {
if path == "" {
op := gocbcore.SubDocOp{
Op: gocbcore.SubDocOpDeleteDoc,
Flags: gocbcore.SubdocFlag(flags),
}
set.ops = append(set.ops, op)
return set
}
op := gocbcore.SubDocOp{
Op: gocbcore.SubDocOpDelete,
Path: path,
Flags: gocbcore.SubdocFlag(flags),
}
set.ops = append(set.ops, op)
return set
}
// Remove adds an remove operation to this mutation operation set.
func (set *MutateInBuilder) Remove(path string) *MutateInBuilder {
return set.RemoveEx(path, SubdocFlagNone)
}
// ArrayPrependEx allows you to perform a sub-document ArrayPrepend operation with flags
func (set *MutateInBuilder) ArrayPrependEx(path string, value interface{}, flags SubdocFlag) *MutateInBuilder {
return set.arrayPrependValue(path, set.marshalValue(value), flags)
}
// ArrayPrepend adds an element to the beginning (i.e. left) of an array
func (set *MutateInBuilder) ArrayPrepend(path string, value interface{}, createParents bool) *MutateInBuilder {
var flags SubdocFlag
if createParents {
flags |= SubdocFlagCreatePath
}
return set.ArrayPrependEx(path, value, flags)
}
func (set *MutateInBuilder) arrayPrependValue(path string, bytes []byte, flags SubdocFlag) *MutateInBuilder {
op := gocbcore.SubDocOp{
Op: gocbcore.SubDocOpArrayPushFirst,
Path: path,
Flags: gocbcore.SubdocFlag(flags),
Value: bytes,
}
set.ops = append(set.ops, op)
return set
}
// ArrayAppendEx allows you to perform a sub-document ArrayAppend operation with flags
func (set *MutateInBuilder) ArrayAppendEx(path string, value interface{}, flags SubdocFlag) *MutateInBuilder {
return set.arrayAppendValue(path, set.marshalValue(value), flags)
}
// ArrayAppend adds an element to the end (i.e. right) of an array
func (set *MutateInBuilder) ArrayAppend(path string, value interface{}, createParents bool) *MutateInBuilder {
var flags SubdocFlag
if createParents {
flags |= SubdocFlagCreatePath
}
return set.ArrayAppendEx(path, value, flags)
}
func (set *MutateInBuilder) arrayAppendValue(path string, bytes []byte, flags SubdocFlag) *MutateInBuilder {
op := gocbcore.SubDocOp{
Op: gocbcore.SubDocOpArrayPushLast,
Path: path,
Flags: gocbcore.SubdocFlag(flags),
Value: bytes,
}
set.ops = append(set.ops, op)
return set
}
// ArrayInsertEx allows you to perform a sub-document ArrayInsert operation with flags
func (set *MutateInBuilder) ArrayInsertEx(path string, value interface{}, flags SubdocFlag) *MutateInBuilder {
return set.arrayInsertValue(path, set.marshalValue(value), flags)
}
// ArrayInsert inserts an element at a given position within an array. The position should be
// specified as part of the path, e.g. path.to.array[3]
func (set *MutateInBuilder) ArrayInsert(path string, value interface{}) *MutateInBuilder {
return set.ArrayInsertEx(path, value, SubdocFlagNone)
}
func (set *MutateInBuilder) arrayInsertValue(path string, bytes []byte, flags SubdocFlag) *MutateInBuilder {
op := gocbcore.SubDocOp{
Op: gocbcore.SubDocOpArrayInsert,
Path: path,
Flags: gocbcore.SubdocFlag(flags),
Value: bytes,
}
set.ops = append(set.ops, op)
return set
}
// ArrayAppendMultiEx allows you to perform a sub-document ArrayAppendMulti operation with flags
func (set *MutateInBuilder) ArrayAppendMultiEx(path string, values interface{}, flags SubdocFlag) *MutateInBuilder {
return set.arrayAppendValue(path, set.marshalArrayMulti(values), flags)
}
// ArrayAppendMulti adds multiple values as elements to an array.
// `values` must be an array type
// ArrayAppendMulti("path", []int{1,2,3,4}, true) =>
// "path" [..., 1,2,3,4]
//
// This is a more efficient version (at both the network and server levels)
// of doing
// ArrayAppend("path", 1, true).ArrayAppend("path", 2, true).ArrayAppend("path", 3, true)
//
// See ArrayAppend() for more information
func (set *MutateInBuilder) ArrayAppendMulti(path string, values interface{}, createParents bool) *MutateInBuilder {
var flags SubdocFlag
if createParents {
flags |= SubdocFlagCreatePath
}
return set.ArrayAppendMultiEx(path, values, flags)
}
// ArrayPrependMultiEx allows you to perform a sub-document ArrayPrependMulti operation with flags
func (set *MutateInBuilder) ArrayPrependMultiEx(path string, values interface{}, flags SubdocFlag) *MutateInBuilder {
return set.arrayPrependValue(path, set.marshalArrayMulti(values), flags)
}
// ArrayPrependMulti adds multiple values at the beginning of an array.
// See ArrayAppendMulti for more information about multiple element operations
// and ArrayPrepend for the semantics of this operation
func (set *MutateInBuilder) ArrayPrependMulti(path string, values interface{}, createParents bool) *MutateInBuilder {
var flags SubdocFlag
if createParents {
flags |= SubdocFlagCreatePath
}
return set.ArrayPrependMultiEx(path, values, flags)
}
// ArrayInsertMultiEx allows you to perform a sub-document ArrayInsertMulti operation with flags
func (set *MutateInBuilder) ArrayInsertMultiEx(path string, values interface{}, flags SubdocFlag) *MutateInBuilder {
return set.arrayInsertValue(path, set.marshalArrayMulti(values), flags)
}
// ArrayInsertMulti inserts multiple elements at a specified position within the
// array. See ArrayAppendMulti for more information about multiple element
// operations, and ArrayInsert for more information about array insertion operations
func (set *MutateInBuilder) ArrayInsertMulti(path string, values interface{}) *MutateInBuilder {
return set.ArrayInsertMultiEx(path, values, SubdocFlagNone)
}
// ArrayAddUniqueEx allows you to perform a sub-document ArrayAddUnique operation with flags
func (set *MutateInBuilder) ArrayAddUniqueEx(path string, value interface{}, flags SubdocFlag) *MutateInBuilder {
op := gocbcore.SubDocOp{
Op: gocbcore.SubDocOpArrayAddUnique,
Path: path,
Flags: gocbcore.SubdocFlag(flags),
Value: set.marshalValue(value),
}
set.ops = append(set.ops, op)
return set
}
// ArrayAddUnique adds an dictionary add unique operation to this mutation operation set.
func (set *MutateInBuilder) ArrayAddUnique(path string, value interface{}, createParents bool) *MutateInBuilder {
var flags SubdocFlag
if createParents {
flags |= SubdocFlagCreatePath
}
return set.ArrayAddUniqueEx(path, value, flags)
}
// CounterEx allows you to perform a sub-document Counter operation with flags
func (set *MutateInBuilder) CounterEx(path string, delta int64, flags SubdocFlag) *MutateInBuilder {
op := gocbcore.SubDocOp{
Op: gocbcore.SubDocOpCounter,
Path: path,
Flags: gocbcore.SubdocFlag(flags),
Value: set.marshalValue(delta),
}
set.ops = append(set.ops, op)
return set
}
// Counter adds an counter operation to this mutation operation set.
func (set *MutateInBuilder) Counter(path string, delta int64, createParents bool) *MutateInBuilder {
var flags SubdocFlag
if createParents {
flags |= SubdocFlagCreatePath
}
return set.CounterEx(path, delta, flags)
}
func (b *Bucket) mutateIn(tracectx opentracing.SpanContext, set *MutateInBuilder) (resOut *DocumentFragment, errOut error) {
// Perform the base operation
res, err := b.mutateInBase(tracectx, set)
if err != nil {
return res, err
}
// Skip durability if there was none set
if set.replicaTo == 0 && set.persistTo == 0 {
return res, err
}
// Attempt to satisfy the durability requirements
return res, b.durability(tracectx, set.name, res.cas, res.mt, set.replicaTo, set.persistTo, false)
}
func (b *Bucket) mutateInBase(tracectx opentracing.SpanContext, set *MutateInBuilder) (resOut *DocumentFragment, errOut error) {
if tracectx == nil {
mispan := b.startKvOpTrace(set.opName)
defer mispan.Finish()
tracectx = mispan.Context()
}
errOut = set.errs.get()
if errOut != nil {
return
}
signal := make(chan bool, 1)
op, err := b.client.MutateInEx(gocbcore.MutateInOptions{
Key: []byte(set.name),
Flags: set.flags,
Cas: set.cas,
Expiry: set.expiry,
Ops: set.ops,
TraceContext: tracectx,
}, func(res *gocbcore.MutateInResult, err error) {
errOut = err
if res != nil {
resSet := &DocumentFragment{
cas: Cas(res.Cas),
mt: MutationToken{res.MutationToken, b},
}
resSet.contents = make([]subDocResult, len(res.Ops))
for i, opRes := range res.Ops {
resSet.contents[i].path = set.ops[i].Path
resSet.contents[i].err = opRes.Err
if opRes.Value != nil {
resSet.contents[i].data = append([]byte(nil), opRes.Value...)
}
}
resOut = resSet
}
signal <- true
})
if err != nil {
return nil, err
}
timeoutTmr := gocbcore.AcquireTimer(b.opTimeout)
select {
case <-signal:
gocbcore.ReleaseTimer(timeoutTmr, false)
return
case <-timeoutTmr.C:
gocbcore.ReleaseTimer(timeoutTmr, true)
if !op.Cancel() {
<-signal
return
}
return nil, ErrTimeout
}
}
func (b *Bucket) startMutateIn(opName string, key string, flags SubdocDocFlag, cas Cas, expiry uint32, replicaTo, persistTo uint) *MutateInBuilder {
return &MutateInBuilder{
bucket: b,
opName: opName,
name: key,
flags: gocbcore.SubdocDocFlag(flags),
cas: gocbcore.Cas(cas),
expiry: expiry,
replicaTo: replicaTo,
persistTo: persistTo,
}
}
// MutateInEx creates a sub-document mutation operation builder.
func (b *Bucket) MutateInEx(key string, flags SubdocDocFlag, cas Cas, expiry uint32) *MutateInBuilder {
return b.startMutateIn("MutateInEx", key, flags, cas, expiry, 0, 0)
}
// MutateInExDura creates a sub-document mutation operation builder with durability.
func (b *Bucket) MutateInExDura(key string, flags SubdocDocFlag, cas Cas, expiry uint32, replicaTo, persistTo uint) *MutateInBuilder {
return b.startMutateIn("MutateInExDura", key, flags, cas, expiry, replicaTo, persistTo)
}
// MutateIn creates a sub-document mutation operation builder.
func (b *Bucket) MutateIn(key string, cas Cas, expiry uint32) *MutateInBuilder {
return b.startMutateIn("MutateIn", key, 0, cas, expiry, 0, 0)
}

85
vendor/gopkg.in/couchbase/gocb.v1/bucket_token.go generated vendored Normal file
View File

@ -0,0 +1,85 @@
package gocb
// RemoveMt performs a Remove operation and includes MutationToken in the results.
func (b *Bucket) RemoveMt(key string, cas Cas) (Cas, MutationToken, error) {
if !b.mtEnabled {
panic("You must use OpenBucketMt with Mt operation variants.")
}
span := b.startKvOpTrace("RemoveMt")
defer span.Finish()
return b.remove(span.Context(), key, cas)
}
// UpsertMt performs a Upsert operation and includes MutationToken in the results.
func (b *Bucket) UpsertMt(key string, value interface{}, expiry uint32) (Cas, MutationToken, error) {
if !b.mtEnabled {
panic("You must use OpenBucketMt with Mt operation variants.")
}
span := b.startKvOpTrace("UpsertMt")
defer span.Finish()
return b.upsert(span.Context(), key, value, expiry)
}
// InsertMt performs a Insert operation and includes MutationToken in the results.
func (b *Bucket) InsertMt(key string, value interface{}, expiry uint32) (Cas, MutationToken, error) {
if !b.mtEnabled {
panic("You must use OpenBucketMt with Mt operation variants.")
}
span := b.startKvOpTrace("InsertMt")
defer span.Finish()
return b.insert(span.Context(), key, value, expiry)
}
// ReplaceMt performs a Replace operation and includes MutationToken in the results.
func (b *Bucket) ReplaceMt(key string, value interface{}, cas Cas, expiry uint32) (Cas, MutationToken, error) {
if !b.mtEnabled {
panic("You must use OpenBucketMt with Mt operation variants.")
}
span := b.startKvOpTrace("ReplaceMt")
defer span.Finish()
return b.replace(span.Context(), key, value, cas, expiry)
}
// AppendMt performs a Append operation and includes MutationToken in the results.
func (b *Bucket) AppendMt(key, value string) (Cas, MutationToken, error) {
if !b.mtEnabled {
panic("You must use OpenBucketMt with Mt operation variants.")
}
span := b.startKvOpTrace("AppendMt")
defer span.Finish()
return b.append(span.Context(), key, value)
}
// PrependMt performs a Prepend operation and includes MutationToken in the results.
func (b *Bucket) PrependMt(key, value string) (Cas, MutationToken, error) {
if !b.mtEnabled {
panic("You must use OpenBucketMt with Mt operation variants.")
}
span := b.startKvOpTrace("PrependMt")
defer span.Finish()
return b.prepend(span.Context(), key, value)
}
// CounterMt performs a Counter operation and includes MutationToken in the results.
func (b *Bucket) CounterMt(key string, delta, initial int64, expiry uint32) (uint64, Cas, MutationToken, error) {
if !b.mtEnabled {
panic("You must use OpenBucketMt with Mt operation variants.")
}
span := b.startKvOpTrace("CounterMt")
defer span.Finish()
return b.counter(span.Context(), key, delta, initial, expiry)
}

230
vendor/gopkg.in/couchbase/gocb.v1/bucket_viewquery.go generated vendored Normal file
View File

@ -0,0 +1,230 @@
package gocb
import (
"encoding/json"
"fmt"
"net/http"
"net/url"
"github.com/opentracing/opentracing-go"
)
type viewError struct {
Message string `json:"message"`
Reason string `json:"reason"`
}
type viewResponse struct {
TotalRows int `json:"total_rows,omitempty"`
Rows []json.RawMessage `json:"rows,omitempty"`
Error string `json:"error,omitempty"`
Reason string `json:"reason,omitempty"`
Errors []viewError `json:"errors,omitempty"`
}
func (e *viewError) Error() string {
return e.Message + " - " + e.Reason
}
// ViewResults implements an iterator interface which can be used to iterate over the rows of the query results.
type ViewResults interface {
One(valuePtr interface{}) error
Next(valuePtr interface{}) bool
NextBytes() []byte
Close() error
}
// ViewResultMetrics allows access to the TotalRows value from the view response. This is
// implemented as an additional interface to maintain ABI compatibility for the 1.x series.
type ViewResultMetrics interface {
TotalRows() int
}
type viewResults struct {
index int
rows []json.RawMessage
totalRows int
err error
endErr error
}
func (r *viewResults) Next(valuePtr interface{}) bool {
if r.err != nil {
return false
}
row := r.NextBytes()
if row == nil {
return false
}
r.err = json.Unmarshal(row, valuePtr)
if r.err != nil {
return false
}
return true
}
func (r *viewResults) NextBytes() []byte {
if r.err != nil {
return nil
}
if r.index+1 >= len(r.rows) {
return nil
}
r.index++
return r.rows[r.index]
}
func (r *viewResults) Close() error {
if r.err != nil {
return r.err
}
if r.endErr != nil {
return r.endErr
}
return nil
}
func (r *viewResults) One(valuePtr interface{}) error {
if !r.Next(valuePtr) {
err := r.Close()
if err != nil {
return err
}
return ErrNoResults
}
// Ignore any errors occurring after we already have our result
err := r.Close()
if err != nil {
// Return no error as we got the one result already.
return nil
}
return nil
}
func (r *viewResults) TotalRows() int {
return r.totalRows
}
func (b *Bucket) executeViewQuery(tracectx opentracing.SpanContext, viewType, ddoc, viewName string, options url.Values) (ViewResults, error) {
capiEp, err := b.getViewEp()
if err != nil {
return nil, err
}
reqUri := fmt.Sprintf("%s/_design/%s/%s/%s?%s", capiEp, ddoc, viewType, viewName, options.Encode())
req, err := http.NewRequest("GET", reqUri, nil)
if err != nil {
return nil, err
}
if b.cluster.auth != nil {
userPass, err := getSingleCredential(b.cluster.auth, AuthCredsRequest{
Service: CapiService,
Endpoint: capiEp,
Bucket: b.name,
})
if err != nil {
return nil, err
}
req.SetBasicAuth(userPass.Username, userPass.Password)
} else {
req.SetBasicAuth(b.name, b.password)
}
dtrace := b.tracer.StartSpan("dispatch",
opentracing.ChildOf(tracectx))
resp, err := doHttpWithTimeout(b.client.HttpClient(), req, b.viewTimeout)
if err != nil {
dtrace.Finish()
return nil, err
}
dtrace.Finish()
strace := b.tracer.StartSpan("streaming",
opentracing.ChildOf(tracectx))
viewResp := viewResponse{}
jsonDec := json.NewDecoder(resp.Body)
err = jsonDec.Decode(&viewResp)
if err != nil {
strace.Finish()
return nil, err
}
err = resp.Body.Close()
if err != nil {
logDebugf("Failed to close socket (%s)", err)
}
strace.Finish()
if resp.StatusCode != 200 {
if viewResp.Error != "" {
return nil, &viewError{
Message: viewResp.Error,
Reason: viewResp.Reason,
}
}
return nil, &viewError{
Message: "HTTP Error",
Reason: fmt.Sprintf("Status code was %d.", resp.StatusCode),
}
}
var endErrs MultiError
for _, endErr := range viewResp.Errors {
endErrs.add(&viewError{
Message: endErr.Message,
Reason: endErr.Reason,
})
}
return &viewResults{
index: -1,
rows: viewResp.Rows,
totalRows: viewResp.TotalRows,
endErr: endErrs.get(),
}, nil
}
// ExecuteViewQuery performs a view query and returns a list of rows or an error.
func (b *Bucket) ExecuteViewQuery(q *ViewQuery) (ViewResults, error) {
span := b.tracer.StartSpan("ExecuteViewQuery",
opentracing.Tag{Key: "couchbase.service", Value: "views"})
defer span.Finish()
ddoc, name, opts, err := q.getInfo()
if err != nil {
return nil, err
}
return b.executeViewQuery(span.Context(), "_view", ddoc, name, opts)
}
// ExecuteSpatialQuery performs a spatial query and returns a list of rows or an error.
func (b *Bucket) ExecuteSpatialQuery(q *SpatialQuery) (ViewResults, error) {
span := b.tracer.StartSpan("ExecuteSpatialQuery",
opentracing.Tag{Key: "couchbase.service", Value: "views"})
defer span.Finish()
ddoc, name, opts, err := q.getInfo()
if err != nil {
return nil, err
}
return b.executeViewQuery(span.Context(), "_spatial", ddoc, name, opts)
}

512
vendor/gopkg.in/couchbase/gocb.v1/bucketmgr.go generated vendored Normal file
View File

@ -0,0 +1,512 @@
package gocb
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"strings"
"time"
)
// View represents a Couchbase view within a design document.
type View struct {
Map string `json:"map,omitempty"`
Reduce string `json:"reduce,omitempty"`
}
func (v View) hasReduce() bool {
return v.Reduce != ""
}
// DesignDocument represents a Couchbase design document containing multiple views.
type DesignDocument struct {
Name string `json:"-"`
Views map[string]View `json:"views,omitempty"`
SpatialViews map[string]View `json:"spatial,omitempty"`
}
// IndexInfo represents a Couchbase GSI index.
type IndexInfo struct {
Name string `json:"name"`
IsPrimary bool `json:"is_primary"`
Type IndexType `json:"using"`
State string `json:"state"`
Keyspace string `json:"keyspace_id"`
Namespace string `json:"namespace_id"`
IndexKey []string `json:"index_key"`
}
// BucketManager provides methods for performing bucket management operations.
// See ClusterManager for methods that allow creating and removing buckets themselves.
type BucketManager struct {
bucket *Bucket
username string
password string
}
func (bm *BucketManager) capiRequest(method, uri, contentType string, body io.Reader) (*http.Response, error) {
if contentType == "" && body != nil {
panic("Content-type must be specified for non-null body.")
}
viewEp, err := bm.bucket.getViewEp()
if err != nil {
return nil, err
}
req, err := http.NewRequest(method, viewEp+uri, body)
if contentType != "" {
req.Header.Add("Content-Type", contentType)
}
if err != nil {
return nil, err
}
if bm.username != "" || bm.password != "" {
req.SetBasicAuth(bm.username, bm.password)
}
return bm.bucket.client.HttpClient().Do(req)
}
func (bm *BucketManager) mgmtRequest(method, uri, contentType string, body io.Reader) (*http.Response, error) {
if contentType == "" && body != nil {
panic("Content-type must be specified for non-null body.")
}
mgmtEp, err := bm.bucket.getMgmtEp()
if err != nil {
return nil, err
}
req, err := http.NewRequest(method, mgmtEp+uri, body)
if err != nil {
return nil, err
}
if contentType != "" {
req.Header.Add("Content-Type", contentType)
}
if bm.username != "" || bm.password != "" {
req.SetBasicAuth(bm.username, bm.password)
}
return bm.bucket.client.HttpClient().Do(req)
}
// Flush will delete all the of the data from a bucket.
// Keep in mind that you must have flushing enabled in the buckets configuration.
func (bm *BucketManager) Flush() error {
reqUri := fmt.Sprintf("/pools/default/buckets/%s/controller/doFlush", bm.bucket.name)
resp, err := bm.mgmtRequest("POST", reqUri, "", nil)
if err != nil {
return err
}
if resp.StatusCode != 200 {
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
err = resp.Body.Close()
if err != nil {
logDebugf("Failed to close socket (%s)", err)
}
// handles responses like unauthorized which does not returns any error or data
if len(data) == 0 {
return clientError{message: fmt.Sprintf("Status Code: %d", resp.StatusCode)}
}
return clientError{message: fmt.Sprintf("Message: %s. Status Code: %d", string(data), resp.StatusCode)}
}
return nil
}
// GetDesignDocument retrieves a single design document for the given bucket..
func (bm *BucketManager) GetDesignDocument(name string) (*DesignDocument, error) {
reqUri := fmt.Sprintf("/_design/%s", name)
resp, err := bm.capiRequest("GET", reqUri, "", nil)
if err != nil {
return nil, err
}
if resp.StatusCode != 200 {
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
err = resp.Body.Close()
if err != nil {
logDebugf("Failed to close socket (%s)", err)
}
// handles responses like unauthorized which does not returns any error or data
if len(data) == 0 {
return nil, clientError{message: fmt.Sprintf("Status Code: %d", resp.StatusCode)}
}
return nil, clientError{message: fmt.Sprintf("Message: %s. Status Code: %d", string(data), resp.StatusCode)}
}
ddocObj := DesignDocument{}
jsonDec := json.NewDecoder(resp.Body)
err = jsonDec.Decode(&ddocObj)
if err != nil {
return nil, err
}
ddocObj.Name = name
return &ddocObj, nil
}
// GetDesignDocuments will retrieve all design documents for the given bucket.
func (bm *BucketManager) GetDesignDocuments() ([]*DesignDocument, error) {
reqUri := fmt.Sprintf("/pools/default/buckets/%s/ddocs", bm.bucket.name)
resp, err := bm.mgmtRequest("GET", reqUri, "", nil)
if err != nil {
return nil, err
}
if resp.StatusCode != 200 {
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
err = resp.Body.Close()
if err != nil {
logDebugf("Failed to close socket (%s)", err)
}
// handles responses like unauthorized which does not returns any error or data
if len(data) == 0 {
return nil, clientError{message: fmt.Sprintf("Status Code: %d", resp.StatusCode)}
}
return nil, clientError{message: fmt.Sprintf("Message: %s. Status Code: %d", string(data), resp.StatusCode)}
}
var ddocsObj struct {
Rows []struct {
Doc struct {
Meta struct {
Id string
}
Json DesignDocument
}
}
}
jsonDec := json.NewDecoder(resp.Body)
err = jsonDec.Decode(&ddocsObj)
if err != nil {
return nil, err
}
var ddocs []*DesignDocument
for index, ddocData := range ddocsObj.Rows {
ddoc := &ddocsObj.Rows[index].Doc.Json
ddoc.Name = ddocData.Doc.Meta.Id[8:]
ddocs = append(ddocs, ddoc)
}
return ddocs, nil
}
// InsertDesignDocument inserts a design document to the given bucket.
func (bm *BucketManager) InsertDesignDocument(ddoc *DesignDocument) error {
oldDdoc, err := bm.GetDesignDocument(ddoc.Name)
if oldDdoc != nil || err == nil {
return clientError{"Design document already exists"}
}
return bm.UpsertDesignDocument(ddoc)
}
// UpsertDesignDocument will insert a design document to the given bucket, or update
// an existing design document with the same name.
func (bm *BucketManager) UpsertDesignDocument(ddoc *DesignDocument) error {
reqUri := fmt.Sprintf("/_design/%s", ddoc.Name)
data, err := json.Marshal(&ddoc)
if err != nil {
return err
}
resp, err := bm.capiRequest("PUT", reqUri, "application/json", bytes.NewReader(data))
if err != nil {
return err
}
if resp.StatusCode != 201 {
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
err = resp.Body.Close()
if err != nil {
logDebugf("Failed to close socket (%s)", err)
}
// handles responses like unauthorized which does not returns any error or data
if len(data) == 0 {
return clientError{message: fmt.Sprintf("Status Code: %d", resp.StatusCode)}
}
return clientError{message: fmt.Sprintf("Message: %s. Status Code: %d", string(data), resp.StatusCode)}
}
return nil
}
// RemoveDesignDocument will remove a design document from the given bucket.
func (bm *BucketManager) RemoveDesignDocument(name string) error {
reqUri := fmt.Sprintf("/_design/%s", name)
resp, err := bm.capiRequest("DELETE", reqUri, "", nil)
if err != nil {
return err
}
if resp.StatusCode != 200 {
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
err = resp.Body.Close()
if err != nil {
logDebugf("Failed to close socket (%s)", err)
}
// handles responses like unauthorized which does not returns any error or data
if len(data) == 0 {
return clientError{message: fmt.Sprintf("Status Code: %d", resp.StatusCode)}
}
return clientError{message: fmt.Sprintf("Message: %s. Status Code: %d", string(data), resp.StatusCode)}
}
return nil
}
func (bm *BucketManager) createIndex(indexName string, fields []string, ignoreIfExists, deferred bool) error {
var qs string
if len(fields) == 0 {
qs += "CREATE PRIMARY INDEX"
} else {
qs += "CREATE INDEX"
}
if indexName != "" {
qs += " `" + indexName + "`"
}
qs += " ON `" + bm.bucket.name + "`"
if len(fields) > 0 {
qs += " ("
for i := 0; i < len(fields); i++ {
if i > 0 {
qs += ", "
}
qs += "`" + fields[i] + "`"
}
qs += ")"
}
if deferred {
qs += " WITH {\"defer_build\": true}"
}
rows, err := bm.bucket.ExecuteN1qlQuery(NewN1qlQuery(qs), nil)
if err != nil {
if strings.Contains(err.Error(), "already exist") {
if ignoreIfExists {
return nil
}
return ErrIndexAlreadyExists
}
return err
}
return rows.Close()
}
// CreateIndex creates an index over the specified fields.
func (bm *BucketManager) CreateIndex(indexName string, fields []string, ignoreIfExists, deferred bool) error {
if indexName == "" {
return ErrIndexInvalidName
}
if len(fields) <= 0 {
return ErrIndexNoFields
}
return bm.createIndex(indexName, fields, ignoreIfExists, deferred)
}
// CreatePrimaryIndex creates a primary index. An empty customName uses the default naming.
func (bm *BucketManager) CreatePrimaryIndex(customName string, ignoreIfExists, deferred bool) error {
return bm.createIndex(customName, nil, ignoreIfExists, deferred)
}
func (bm *BucketManager) dropIndex(indexName string, ignoreIfNotExists bool) error {
var qs string
if indexName == "" {
qs += "DROP PRIMARY INDEX ON `" + bm.bucket.name + "`"
} else {
qs += "DROP INDEX `" + bm.bucket.name + "`.`" + indexName + "`"
}
rows, err := bm.bucket.ExecuteN1qlQuery(NewN1qlQuery(qs), nil)
if err != nil {
if strings.Contains(err.Error(), "not found") {
if ignoreIfNotExists {
return nil
}
return ErrIndexNotFound
}
return err
}
return rows.Close()
}
// DropIndex drops a specific index by name.
func (bm *BucketManager) DropIndex(indexName string, ignoreIfNotExists bool) error {
if indexName == "" {
return ErrIndexInvalidName
}
return bm.dropIndex(indexName, ignoreIfNotExists)
}
// DropPrimaryIndex drops the primary index. Pass an empty customName for unnamed primary indexes.
func (bm *BucketManager) DropPrimaryIndex(customName string, ignoreIfNotExists bool) error {
return bm.dropIndex(customName, ignoreIfNotExists)
}
// GetIndexes returns a list of all currently registered indexes.
func (bm *BucketManager) GetIndexes() ([]IndexInfo, error) {
q := NewN1qlQuery("SELECT `indexes`.* FROM system:indexes")
rows, err := bm.bucket.ExecuteN1qlQuery(q, nil)
if err != nil {
return nil, err
}
var indexes []IndexInfo
var index IndexInfo
for rows.Next(&index) {
indexes = append(indexes, index)
index = IndexInfo{}
}
if err := rows.Close(); err != nil {
return nil, err
}
return indexes, nil
}
// BuildDeferredIndexes builds all indexes which are currently in deferred state.
func (bm *BucketManager) BuildDeferredIndexes() ([]string, error) {
indexList, err := bm.GetIndexes()
if err != nil {
return nil, err
}
var deferredList []string
for i := 0; i < len(indexList); i++ {
var index = indexList[i]
if index.State == "deferred" || index.State == "pending" {
deferredList = append(deferredList, index.Name)
}
}
if len(deferredList) == 0 {
// Don't try to build an empty index list
return nil, nil
}
var qs string
qs += "BUILD INDEX ON `" + bm.bucket.name + "`("
for i := 0; i < len(deferredList); i++ {
if i > 0 {
qs += ", "
}
qs += "`" + deferredList[i] + "`"
}
qs += ")"
rows, err := bm.bucket.ExecuteN1qlQuery(NewN1qlQuery(qs), nil)
if err != nil {
return nil, err
}
if err := rows.Close(); err != nil {
return nil, err
}
return deferredList, nil
}
func checkIndexesActive(indexes []IndexInfo, checkList []string) (bool, error) {
var checkIndexes []IndexInfo
for i := 0; i < len(checkList); i++ {
indexName := checkList[i]
for j := 0; j < len(indexes); j++ {
if indexes[j].Name == indexName {
checkIndexes = append(checkIndexes, indexes[j])
break
}
}
}
if len(checkIndexes) != len(checkList) {
return false, ErrIndexNotFound
}
for i := 0; i < len(checkIndexes); i++ {
if checkIndexes[i].State != "online" {
return false, nil
}
}
return true, nil
}
// WatchIndexes waits for a set of indexes to come online
func (bm *BucketManager) WatchIndexes(watchList []string, watchPrimary bool, timeout time.Duration) error {
if watchPrimary {
watchList = append(watchList, "#primary")
}
curInterval := 50 * time.Millisecond
timeoutTime := time.Now().Add(timeout)
for {
indexes, err := bm.GetIndexes()
if err != nil {
return err
}
allOnline, err := checkIndexesActive(indexes, watchList)
if err != nil {
return err
}
if allOnline {
break
}
curInterval += 500 * time.Millisecond
if curInterval > 1000 {
curInterval = 1000
}
if time.Now().Add(curInterval).After(timeoutTime) {
return ErrTimeout
}
// Wait till our next poll interval
time.Sleep(curInterval)
}
return nil
}

449
vendor/gopkg.in/couchbase/gocb.v1/cluster.go generated vendored Normal file
View File

@ -0,0 +1,449 @@
package gocb
import (
"errors"
"fmt"
"net/http"
"strconv"
"sync"
"time"
"github.com/opentracing/opentracing-go"
"gopkg.in/couchbase/gocbcore.v7"
"gopkg.in/couchbaselabs/gocbconnstr.v1"
)
// Cluster represents a connection to a specific Couchbase cluster.
type Cluster struct {
auth Authenticator
agentConfig gocbcore.AgentConfig
n1qlTimeout time.Duration
ftsTimeout time.Duration
analyticsTimeout time.Duration
clusterLock sync.RWMutex
queryCache map[string]*n1qlCache
bucketList []*Bucket
httpCli *http.Client
}
// Connect creates a new Cluster object for a specific cluster.
// These options are copied from (and should stay in sync with) the gocbcore agent.FromConnStr comment.
// Supported connSpecStr options are:
// cacertpath (string) - Path to the CA certificate
// certpath (string) - Path to your authentication certificate
// keypath (string) - Path to your authentication key
// config_total_timeout (int) - Maximum period to attempt to connect to cluster in ms.
// config_node_timeout (int) - Maximum period to attempt to connect to a node in ms.
// http_redial_period (int) - Maximum period to keep HTTP config connections open in ms.
// http_retry_delay (int) - Period to wait between retrying nodes for HTTP config in ms.
// config_poll_floor_interval (int) - Minimum time to wait between fetching configs via CCCP in ms.
// config_poll_interval (int) - Period to wait between CCCP config polling in ms.
// kv_pool_size (int) - The number of connections to establish per node.
// max_queue_size (int) - The maximum size of the operation queues per node.
// use_kverrmaps (bool) - Whether to enable error maps from the server.
// use_enhanced_errors (bool) - Whether to enable enhanced error information.
// fetch_mutation_tokens (bool) - Whether to fetch mutation tokens for operations.
// compression (bool) - Whether to enable network-wise compression of documents.
// compression_min_size (int) - The minimal size of the document to consider compression.
// compression_min_ratio (float64) - The minimal compress ratio (compressed / original) for the document to be sent compressed.
// server_duration (bool) - Whether to enable fetching server operation durations.
// http_max_idle_conns (int) - Maximum number of idle http connections in the pool.
// http_max_idle_conns_per_host (int) - Maximum number of idle http connections in the pool per host.
// http_idle_conn_timeout (int) - Maximum length of time for an idle connection to stay in the pool in ms.
// network (string) - The network type to use.
// orphaned_response_logging (bool) - Whether to enable orphan response logging.
// orphaned_response_logging_interval (int) - How often to log orphan responses in ms.
// orphaned_response_logging_sample_size (int) - The number of samples to include in each orphaned response log.
// operation_tracing (bool) - Whether to enable tracing.
// n1ql_timeout (int) - Maximum execution time for n1ql queries in ms.
// fts_timeout (int) - Maximum execution time for fts searches in ms.
// analytics_timeout (int) - Maximum execution time for analytics queries in ms.
func Connect(connSpecStr string) (*Cluster, error) {
spec, err := gocbconnstr.Parse(connSpecStr)
if err != nil {
return nil, err
}
if spec.Bucket != "" {
return nil, errors.New("Connection string passed to Connect() must not have any bucket specified!")
}
fetchOption := func(name string) (string, bool) {
optValue := spec.Options[name]
if len(optValue) == 0 {
return "", false
}
return optValue[len(optValue)-1], true
}
config := gocbcore.AgentConfig{
UserString: "gocb/" + Version(),
ConnectTimeout: 60000 * time.Millisecond,
ServerConnectTimeout: 7000 * time.Millisecond,
NmvRetryDelay: 100 * time.Millisecond,
UseKvErrorMaps: true,
UseDurations: true,
NoRootTraceSpans: true,
UseCompression: true,
UseZombieLogger: true,
}
err = config.FromConnStr(connSpecStr)
if err != nil {
return nil, err
}
useTracing := true
if valStr, ok := fetchOption("operation_tracing"); ok {
val, err := strconv.ParseBool(valStr)
if err != nil {
return nil, fmt.Errorf("operation_tracing option must be a boolean")
}
useTracing = val
}
var initialTracer opentracing.Tracer
if useTracing {
initialTracer = &ThresholdLoggingTracer{}
} else {
initialTracer = &opentracing.NoopTracer{}
}
config.Tracer = initialTracer
tracerAddRef(initialTracer)
httpCli := &http.Client{
Transport: &http.Transport{
TLSClientConfig: config.TlsConfig,
},
}
cluster := &Cluster{
agentConfig: config,
n1qlTimeout: 75 * time.Second,
ftsTimeout: 75 * time.Second,
analyticsTimeout: 75 * time.Second,
httpCli: httpCli,
queryCache: make(map[string]*n1qlCache),
}
if valStr, ok := fetchOption("n1ql_timeout"); ok {
val, err := strconv.ParseInt(valStr, 10, 64)
if err != nil {
return nil, fmt.Errorf("n1ql_timeout option must be a number")
}
cluster.n1qlTimeout = time.Duration(val) * time.Millisecond
}
if valStr, ok := fetchOption("fts_timeout"); ok {
val, err := strconv.ParseInt(valStr, 10, 64)
if err != nil {
return nil, fmt.Errorf("fts_timeout option must be a number")
}
cluster.ftsTimeout = time.Duration(val) * time.Millisecond
}
if valStr, ok := fetchOption("analytics_timeout"); ok {
val, err := strconv.ParseInt(valStr, 10, 64)
if err != nil {
return nil, fmt.Errorf("analytics_timeout option must be a number")
}
cluster.analyticsTimeout = time.Duration(val) * time.Millisecond
}
return cluster, nil
}
// SetTracer allows you to specify a custom tracer to use for this cluster.
// EXPERIMENTAL
func (c *Cluster) SetTracer(tracer opentracing.Tracer) {
if c.agentConfig.Tracer != nil {
tracerDecRef(c.agentConfig.Tracer)
}
tracerAddRef(tracer)
c.agentConfig.Tracer = tracer
}
// EnhancedErrors returns the current enhanced error message state.
func (c *Cluster) EnhancedErrors() bool {
return c.agentConfig.UseEnhancedErrors
}
// SetEnhancedErrors sets the current enhanced error message state.
func (c *Cluster) SetEnhancedErrors(enabled bool) {
c.agentConfig.UseEnhancedErrors = enabled
}
// ConnectTimeout returns the maximum time to wait when attempting to connect to a bucket.
func (c *Cluster) ConnectTimeout() time.Duration {
return c.agentConfig.ConnectTimeout
}
// SetConnectTimeout sets the maximum time to wait when attempting to connect to a bucket.
func (c *Cluster) SetConnectTimeout(timeout time.Duration) {
c.agentConfig.ConnectTimeout = timeout
}
// ServerConnectTimeout returns the maximum time to attempt to connect to a single node.
func (c *Cluster) ServerConnectTimeout() time.Duration {
return c.agentConfig.ServerConnectTimeout
}
// SetServerConnectTimeout sets the maximum time to attempt to connect to a single node.
func (c *Cluster) SetServerConnectTimeout(timeout time.Duration) {
c.agentConfig.ServerConnectTimeout = timeout
}
// N1qlTimeout returns the maximum time to wait for a cluster-level N1QL query to complete.
func (c *Cluster) N1qlTimeout() time.Duration {
return c.n1qlTimeout
}
// SetN1qlTimeout sets the maximum time to wait for a cluster-level N1QL query to complete.
func (c *Cluster) SetN1qlTimeout(timeout time.Duration) {
c.n1qlTimeout = timeout
}
// FtsTimeout returns the maximum time to wait for a cluster-level FTS query to complete.
func (c *Cluster) FtsTimeout() time.Duration {
return c.ftsTimeout
}
// SetFtsTimeout sets the maximum time to wait for a cluster-level FTS query to complete.
func (c *Cluster) SetFtsTimeout(timeout time.Duration) {
c.ftsTimeout = timeout
}
// AnalyticsTimeout returns the maximum time to wait for a cluster-level Analytics query to complete.
func (c *Cluster) AnalyticsTimeout() time.Duration {
return c.analyticsTimeout
}
// SetAnalyticsTimeout sets the maximum time to wait for a cluster-level Analytics query to complete.
func (c *Cluster) SetAnalyticsTimeout(timeout time.Duration) {
c.analyticsTimeout = timeout
}
// NmvRetryDelay returns the time to wait between retrying an operation due to not my vbucket.
func (c *Cluster) NmvRetryDelay() time.Duration {
return c.agentConfig.NmvRetryDelay
}
// SetNmvRetryDelay sets the time to wait between retrying an operation due to not my vbucket.
func (c *Cluster) SetNmvRetryDelay(delay time.Duration) {
c.agentConfig.NmvRetryDelay = delay
}
// InvalidateQueryCache forces the internal cache of prepared queries to be cleared.
func (c *Cluster) InvalidateQueryCache() {
c.clusterLock.Lock()
c.queryCache = make(map[string]*n1qlCache)
c.clusterLock.Unlock()
}
// Close shuts down all buckets in this cluster and invalidates any references this cluster has.
func (c *Cluster) Close() error {
var overallErr error
// We have an upper bound on how many buckets we try
// to close soely for deadlock prevention
for i := 0; i < 1024; i++ {
c.clusterLock.Lock()
if len(c.bucketList) == 0 {
c.clusterLock.Unlock()
break
}
bucket := c.bucketList[0]
c.clusterLock.Unlock()
err := bucket.Close()
if err != nil && gocbcore.ErrorCause(err) != gocbcore.ErrShutdown {
logWarnf("Failed to close a bucket in cluster close: %s", err)
overallErr = err
}
}
if c.agentConfig.Tracer != nil {
tracerDecRef(c.agentConfig.Tracer)
c.agentConfig.Tracer = nil
}
return overallErr
}
func (c *Cluster) makeAgentConfig(bucket, password string, forceMt bool) (*gocbcore.AgentConfig, error) {
auth := c.auth
useCertificates := c.agentConfig.TlsConfig != nil && len(c.agentConfig.TlsConfig.Certificates) > 0
if useCertificates {
if auth == nil {
return nil, ErrMixedCertAuthentication
}
certAuth, ok := auth.(certAuthenticator)
if !ok || !certAuth.isTlsAuth() {
return nil, ErrMixedCertAuthentication
}
}
if auth == nil {
authMap := make(BucketAuthenticatorMap)
authMap[bucket] = BucketAuthenticator{
Password: password,
}
auth = ClusterAuthenticator{
Buckets: authMap,
}
} else {
if password != "" {
return nil, ErrMixedAuthentication
}
certAuth, ok := auth.(certAuthenticator)
if ok && certAuth.isTlsAuth() && !useCertificates {
return nil, ErrMixedCertAuthentication
}
}
config := c.agentConfig
config.BucketName = bucket
config.Password = password
config.Auth = &coreAuthWrapper{
auth: auth,
bucketName: bucket,
}
if forceMt {
config.UseMutationTokens = true
}
return &config, nil
}
// Authenticate specifies an Authenticator interface to use to authenticate with cluster services.
func (c *Cluster) Authenticate(auth Authenticator) error {
c.auth = auth
return nil
}
func (c *Cluster) openBucket(bucket, password string, forceMt bool) (*Bucket, error) {
agentConfig, err := c.makeAgentConfig(bucket, password, forceMt)
if err != nil {
return nil, err
}
b, err := createBucket(c, agentConfig)
if err != nil {
return nil, err
}
c.clusterLock.Lock()
c.bucketList = append(c.bucketList, b)
c.clusterLock.Unlock()
return b, nil
}
// OpenBucket opens a new connection to the specified bucket.
func (c *Cluster) OpenBucket(bucket, password string) (*Bucket, error) {
return c.openBucket(bucket, password, false)
}
// OpenBucketWithMt opens a new connection to the specified bucket and enables mutation tokens.
// MutationTokens allow you to execute queries and durability requirements with very specific
// operation-level consistency.
func (c *Cluster) OpenBucketWithMt(bucket, password string) (*Bucket, error) {
return c.openBucket(bucket, password, true)
}
func (c *Cluster) closeBucket(bucket *Bucket) {
c.clusterLock.Lock()
for i, e := range c.bucketList {
if e == bucket {
c.bucketList = append(c.bucketList[0:i], c.bucketList[i+1:]...)
break
}
}
c.clusterLock.Unlock()
}
// Manager returns a ClusterManager object for performing cluster management operations on this cluster.
func (c *Cluster) Manager(username, password string) *ClusterManager {
var mgmtHosts []string
for _, host := range c.agentConfig.HttpAddrs {
if c.agentConfig.TlsConfig != nil {
mgmtHosts = append(mgmtHosts, "https://"+host)
} else {
mgmtHosts = append(mgmtHosts, "http://"+host)
}
}
tlsConfig := c.agentConfig.TlsConfig
return &ClusterManager{
hosts: mgmtHosts,
username: username,
password: password,
httpCli: &http.Client{
Transport: &http.Transport{
TLSClientConfig: tlsConfig,
},
},
cluster: c,
}
}
// StreamingBucket represents a bucket connection used for streaming data over DCP.
type StreamingBucket struct {
client *gocbcore.Agent
}
// IoRouter returns the underlying gocb agent managing connections.
func (b *StreamingBucket) IoRouter() *gocbcore.Agent {
return b.client
}
// OpenStreamingBucket opens a new connection to the specified bucket for the purpose of streaming data.
func (c *Cluster) OpenStreamingBucket(streamName, bucket, password string) (*StreamingBucket, error) {
agentConfig, err := c.makeAgentConfig(bucket, password, false)
if err != nil {
return nil, err
}
cli, err := gocbcore.CreateDcpAgent(agentConfig, streamName, 0)
if err != nil {
return nil, err
}
return &StreamingBucket{
client: cli,
}, nil
}
func (c *Cluster) randomBucket() (*Bucket, error) {
c.clusterLock.RLock()
if len(c.bucketList) == 0 {
c.clusterLock.RUnlock()
return nil, ErrNoOpenBuckets
}
bucket := c.bucketList[0]
c.clusterLock.RUnlock()
return bucket, nil
}
type httpClient interface {
Do(req *http.Request) (*http.Response, error)
}
// getFtsEp retrieves a search endpoint from a random bucket
func (c *Cluster) getFtsEp() (string, error) {
tmpB, err := c.randomBucket()
if err != nil {
return "", err
}
ftsEp, err := tmpB.getFtsEp()
if err != nil {
return "", err
}
return ftsEp, nil
}

Some files were not shown because too many files have changed in this diff Show More