Merge pull request #149 from totherme/test-framework
Beginnings of a test framework
This commit is contained in:
commit
4f1e692bec
|
|
@ -25,6 +25,24 @@
|
||||||
revision = "5741799b275a3c4a5a9623a993576d7545cf7b5c"
|
revision = "5741799b275a3c4a5a9623a993576d7545cf7b5c"
|
||||||
version = "v2.4.0"
|
version = "v2.4.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/emicklei/go-restful-swagger12"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "dcef7f55730566d41eae5db10e7d6981829720f6"
|
||||||
|
version = "1.0.1"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/fsnotify/fsnotify"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "629574ca2a5df945712d3079857300b5e4da0236"
|
||||||
|
version = "v1.4.2"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/ghodss/yaml"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "0ca9ea5df5451ffdf184b4428c902747c2c11cd7"
|
||||||
|
version = "v1.0.0"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
name = "github.com/go-openapi/jsonpointer"
|
name = "github.com/go-openapi/jsonpointer"
|
||||||
|
|
@ -61,34 +79,184 @@
|
||||||
packages = ["."]
|
packages = ["."]
|
||||||
revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998"
|
revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/golang/protobuf"
|
||||||
|
packages = ["proto","ptypes","ptypes/any","ptypes/duration","ptypes/timestamp"]
|
||||||
|
revision = "1e59b77b52bf8e4b449a57e6f79f21226d571845"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/google/btree"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "316fb6d3f031ae8f4d457c6c5186b9e3ded70435"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
name = "github.com/google/gofuzz"
|
name = "github.com/google/gofuzz"
|
||||||
packages = ["."]
|
packages = ["."]
|
||||||
revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1"
|
revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/googleapis/gnostic"
|
||||||
|
packages = ["OpenAPIv2","compiler","extensions"]
|
||||||
|
revision = "ee43cbb60db7bd22502942cccbc39059117352ab"
|
||||||
|
version = "v0.1.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/gregjones/httpcache"
|
||||||
|
packages = [".","diskcache"]
|
||||||
|
revision = "2bcd89a1743fd4b373f7370ce8ddc14dfbd18229"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/hashicorp/hcl"
|
||||||
|
packages = [".","hcl/ast","hcl/parser","hcl/scanner","hcl/strconv","hcl/token","json/parser","json/scanner","json/token"]
|
||||||
|
revision = "23c074d0eceb2b8a5bfdbb271ab780cde70f05a8"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/howeyc/gopass"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "bf9dde6d0d2c004a008c27aaee91170c786f6db8"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/imdario/mergo"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "7fe0c75c13abdee74b09fcacef5ea1c6bba6a874"
|
||||||
|
version = "0.2.4"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/inconshreveable/mousetrap"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
|
||||||
|
version = "v1.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/json-iterator/go"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "f7279a603edee96fe7764d3de9c6ff8cf9970994"
|
||||||
|
version = "1.0.4"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/juju/ratelimit"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "59fac5042749a5afb9af70e813da1dd5474f0167"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/magiconair/properties"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "be5ece7dd465ab0765a9682137865547526d1dfb"
|
||||||
|
version = "v1.7.3"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
name = "github.com/mailru/easyjson"
|
name = "github.com/mailru/easyjson"
|
||||||
packages = ["buffer","jlexer","jwriter"]
|
packages = ["buffer","jlexer","jwriter"]
|
||||||
revision = "5f62e4f3afa2f576dc86531b7df4d966b19ef8f8"
|
revision = "5f62e4f3afa2f576dc86531b7df4d966b19ef8f8"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/mitchellh/go-homedir"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "b8bc1bf767474819792c23f32d8286a45736f1c6"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/mitchellh/mapstructure"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "06020f85339e21b2478f756a78e295255ffa4d6a"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/onsi/ginkgo"
|
||||||
|
packages = [".","config","internal/codelocation","internal/containernode","internal/failer","internal/leafnodes","internal/remote","internal/spec","internal/spec_iterator","internal/specrunner","internal/suite","internal/testingtproxy","internal/writer","reporters","reporters/stenographer","reporters/stenographer/support/go-colorable","reporters/stenographer/support/go-isatty","types"]
|
||||||
|
revision = "9eda700730cba42af70d53180f9dcce9266bc2bc"
|
||||||
|
version = "v1.4.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/onsi/gomega"
|
||||||
|
packages = [".","format","gbytes","gexec","internal/assertion","internal/asyncassertion","internal/oraclematcher","internal/testingtsupport","matchers","matchers/support/goraph/bipartitegraph","matchers/support/goraph/edge","matchers/support/goraph/node","matchers/support/goraph/util","types"]
|
||||||
|
revision = "c893efa28eb45626cdaa76c9f653b62488858837"
|
||||||
|
version = "v1.2.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/pelletier/go-toml"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "16398bac157da96aa88f98a2df640c7f32af1da2"
|
||||||
|
version = "v1.0.1"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/petar/GoLLRB"
|
||||||
|
packages = ["llrb"]
|
||||||
|
revision = "53be0d36a84c2a886ca057d34b6aa4468df9ccb4"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/peterbourgon/diskv"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "5f041e8faa004a95c88a202771f4cc3e991971e6"
|
||||||
|
version = "v2.0.1"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/spf13/afero"
|
||||||
|
packages = [".","mem"]
|
||||||
|
revision = "8d919cbe7e2627e417f3e45c3c0e489a5b7e2536"
|
||||||
|
version = "v1.0.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/spf13/cast"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "acbeb36b902d72a7a4c18e8f3241075e7ab763e4"
|
||||||
|
version = "v1.1.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/spf13/cobra"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "7b2c5ac9fc04fc5efafb60700713d4fa609b777b"
|
||||||
|
version = "v0.0.1"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/spf13/jwalterweatherman"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "12bd96e66386c1960ab0f74ced1362f66f552f7b"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
name = "github.com/spf13/pflag"
|
name = "github.com/spf13/pflag"
|
||||||
packages = ["."]
|
packages = ["."]
|
||||||
revision = "e57e3eeb33f795204c1ca35f56c44f83227c6e66"
|
revision = "e57e3eeb33f795204c1ca35f56c44f83227c6e66"
|
||||||
version = "v1.0.0"
|
version = "v1.0.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/spf13/viper"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "25b30aa063fc18e48662b86996252eabdcf2f0c7"
|
||||||
|
version = "v1.0.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "golang.org/x/crypto"
|
||||||
|
packages = ["ssh/terminal"]
|
||||||
|
revision = "94eea52f7b742c7cbe0b03b22f0c4c8631ece122"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
name = "golang.org/x/net"
|
name = "golang.org/x/net"
|
||||||
packages = ["http2","http2/hpack","idna","lex/httplex"]
|
packages = ["html","html/atom","html/charset","http2","http2/hpack","idna","lex/httplex"]
|
||||||
revision = "a337091b0525af65de94df2eb7e98bd9962dcbe2"
|
revision = "a337091b0525af65de94df2eb7e98bd9962dcbe2"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "golang.org/x/sys"
|
||||||
|
packages = ["unix","windows"]
|
||||||
|
revision = "1006bb3484c92b19a5b6612452e038b554fadb9c"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
name = "golang.org/x/text"
|
name = "golang.org/x/text"
|
||||||
packages = ["collate","collate/build","internal/colltab","internal/gen","internal/tag","internal/triegen","internal/ucd","language","secure/bidirule","transform","unicode/bidi","unicode/cldr","unicode/norm","unicode/rangetable","width"]
|
packages = ["collate","collate/build","encoding","encoding/charmap","encoding/htmlindex","encoding/internal","encoding/internal/identifier","encoding/japanese","encoding/korean","encoding/simplifiedchinese","encoding/traditionalchinese","encoding/unicode","internal/colltab","internal/gen","internal/tag","internal/triegen","internal/ucd","internal/utf8internal","language","runes","secure/bidirule","transform","unicode/bidi","unicode/cldr","unicode/norm","unicode/rangetable","width"]
|
||||||
revision = "88f656faf3f37f690df1a32515b479415e1a6769"
|
revision = "88f656faf3f37f690df1a32515b479415e1a6769"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
|
|
@ -103,12 +271,24 @@
|
||||||
packages = ["."]
|
packages = ["."]
|
||||||
revision = "eb3733d160e74a9c7e442f435eb3bea458e1d19f"
|
revision = "eb3733d160e74a9c7e442f435eb3bea458e1d19f"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "k8s.io/api"
|
||||||
|
packages = ["admissionregistration/v1alpha1","apps/v1beta1","apps/v1beta2","authentication/v1","authentication/v1beta1","authorization/v1","authorization/v1beta1","autoscaling/v1","autoscaling/v2beta1","batch/v1","batch/v1beta1","batch/v2alpha1","certificates/v1beta1","core/v1","extensions/v1beta1","networking/v1","policy/v1beta1","rbac/v1","rbac/v1alpha1","rbac/v1beta1","scheduling/v1alpha1","settings/v1alpha1","storage/v1","storage/v1beta1"]
|
||||||
|
revision = "218912509d74a117d05a718bb926d0948e531c20"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
name = "k8s.io/apimachinery"
|
name = "k8s.io/apimachinery"
|
||||||
packages = ["pkg/api/equality","pkg/api/meta","pkg/api/resource","pkg/apis/meta/v1","pkg/apis/meta/v1/unstructured","pkg/apis/meta/v1alpha1","pkg/conversion","pkg/conversion/queryparams","pkg/conversion/unstructured","pkg/fields","pkg/labels","pkg/runtime","pkg/runtime/schema","pkg/selection","pkg/types","pkg/util/diff","pkg/util/errors","pkg/util/intstr","pkg/util/json","pkg/util/net","pkg/util/runtime","pkg/util/sets","pkg/util/validation","pkg/util/validation/field","pkg/util/wait","pkg/watch","third_party/forked/golang/reflect"]
|
packages = ["pkg/api/equality","pkg/api/errors","pkg/api/meta","pkg/api/resource","pkg/apis/meta/v1","pkg/apis/meta/v1/unstructured","pkg/apis/meta/v1alpha1","pkg/conversion","pkg/conversion/queryparams","pkg/conversion/unstructured","pkg/fields","pkg/labels","pkg/runtime","pkg/runtime/schema","pkg/runtime/serializer","pkg/runtime/serializer/json","pkg/runtime/serializer/protobuf","pkg/runtime/serializer/recognizer","pkg/runtime/serializer/streaming","pkg/runtime/serializer/versioning","pkg/selection","pkg/types","pkg/util/clock","pkg/util/diff","pkg/util/errors","pkg/util/framer","pkg/util/intstr","pkg/util/json","pkg/util/net","pkg/util/runtime","pkg/util/sets","pkg/util/validation","pkg/util/validation/field","pkg/util/wait","pkg/util/yaml","pkg/version","pkg/watch","third_party/forked/golang/reflect"]
|
||||||
revision = "18a564baac720819100827c16fdebcadb05b2d0d"
|
revision = "18a564baac720819100827c16fdebcadb05b2d0d"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "k8s.io/client-go"
|
||||||
|
packages = ["discovery","kubernetes","kubernetes/scheme","kubernetes/typed/admissionregistration/v1alpha1","kubernetes/typed/apps/v1beta1","kubernetes/typed/apps/v1beta2","kubernetes/typed/authentication/v1","kubernetes/typed/authentication/v1beta1","kubernetes/typed/authorization/v1","kubernetes/typed/authorization/v1beta1","kubernetes/typed/autoscaling/v1","kubernetes/typed/autoscaling/v2beta1","kubernetes/typed/batch/v1","kubernetes/typed/batch/v1beta1","kubernetes/typed/batch/v2alpha1","kubernetes/typed/certificates/v1beta1","kubernetes/typed/core/v1","kubernetes/typed/extensions/v1beta1","kubernetes/typed/networking/v1","kubernetes/typed/policy/v1beta1","kubernetes/typed/rbac/v1","kubernetes/typed/rbac/v1alpha1","kubernetes/typed/rbac/v1beta1","kubernetes/typed/scheduling/v1alpha1","kubernetes/typed/settings/v1alpha1","kubernetes/typed/storage/v1","kubernetes/typed/storage/v1beta1","pkg/version","rest","rest/watch","tools/auth","tools/clientcmd","tools/clientcmd/api","tools/clientcmd/api/latest","tools/clientcmd/api/v1","tools/metrics","tools/reference","transport","util/cert","util/flowcontrol","util/homedir","util/integer"]
|
||||||
|
revision = "2ae454230481a7cb5544325e12ad7658ecccd19b"
|
||||||
|
version = "v5.0.1"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
name = "k8s.io/kube-openapi"
|
name = "k8s.io/kube-openapi"
|
||||||
|
|
@ -118,6 +298,6 @@
|
||||||
[solve-meta]
|
[solve-meta]
|
||||||
analyzer-name = "dep"
|
analyzer-name = "dep"
|
||||||
analyzer-version = 1
|
analyzer-version = 1
|
||||||
inputs-digest = "132e4fbab5990bffdbf7ad26a6bd17d75e27af4c1bd689b9c22baddf130cbd14"
|
inputs-digest = "f97c722e792f642563d42011d9e2dff8cd05844996c03c124ceca6409cda38ce"
|
||||||
solver-name = "gps-cdcl"
|
solver-name = "gps-cdcl"
|
||||||
solver-version = 1
|
solver-version = 1
|
||||||
|
|
|
||||||
|
|
@ -19,7 +19,13 @@ go vet -all ./...
|
||||||
rc=$((rc || $?))
|
rc=$((rc || $?))
|
||||||
|
|
||||||
echo "Running go test"
|
echo "Running go test"
|
||||||
go test -v ./...
|
go list ./... | grep -vF pkg/framework/test | xargs go test -v
|
||||||
|
rc=$((rc || $?))
|
||||||
|
|
||||||
|
echo "Running test framework tests"
|
||||||
|
go get github.com/onsi/ginkgo/ginkgo \
|
||||||
|
&& ./pkg/framework/test/scripts/download-binaries.sh \
|
||||||
|
&& ./pkg/framework/test/scripts/run-tests.sh
|
||||||
rc=$((rc || $?))
|
rc=$((rc || $?))
|
||||||
|
|
||||||
exit $rc
|
exit $rc
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1 @@
|
||||||
|
assets/bin
|
||||||
|
|
@ -0,0 +1,88 @@
|
||||||
|
package test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os/exec"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/onsi/gomega"
|
||||||
|
"github.com/onsi/gomega/gbytes"
|
||||||
|
"github.com/onsi/gomega/gexec"
|
||||||
|
)
|
||||||
|
|
||||||
|
// APIServer knows how to run a kubernetes apiserver. Set it up with the path to a precompiled binary.
|
||||||
|
type APIServer struct {
|
||||||
|
// The path to the apiserver binary
|
||||||
|
Path string
|
||||||
|
EtcdURL string
|
||||||
|
session *gexec.Session
|
||||||
|
stdOut *gbytes.Buffer
|
||||||
|
stdErr *gbytes.Buffer
|
||||||
|
certDirManager certDirManager
|
||||||
|
}
|
||||||
|
|
||||||
|
type certDirManager interface {
|
||||||
|
Create() (string, error)
|
||||||
|
Destroy() error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start starts the apiserver, waits for it to come up, and returns an error, if occoured.
|
||||||
|
func (s *APIServer) Start() error {
|
||||||
|
s.certDirManager = NewTempDirManager()
|
||||||
|
s.stdOut = gbytes.NewBuffer()
|
||||||
|
s.stdErr = gbytes.NewBuffer()
|
||||||
|
|
||||||
|
certDir, err := s.certDirManager.Create()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
args := []string{
|
||||||
|
"--authorization-mode=Node,RBAC",
|
||||||
|
"--runtime-config=admissionregistration.k8s.io/v1alpha1",
|
||||||
|
"--v=3", "--vmodule=",
|
||||||
|
"--admission-control=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,SecurityContextDeny,DefaultStorageClass,DefaultTolerationSeconds,GenericAdmissionWebhook,ResourceQuota",
|
||||||
|
"--admission-control-config-file=",
|
||||||
|
"--bind-address=0.0.0.0",
|
||||||
|
"--insecure-bind-address=127.0.0.1",
|
||||||
|
"--insecure-port=8080",
|
||||||
|
"--storage-backend=etcd3",
|
||||||
|
fmt.Sprintf("--etcd-servers=%s", s.EtcdURL),
|
||||||
|
fmt.Sprintf("--cert-dir=%s", certDir),
|
||||||
|
}
|
||||||
|
|
||||||
|
detectedStart := s.stdErr.Detect("Serving insecurely on 127.0.0.1:8080")
|
||||||
|
timedOut := time.After(20 * time.Second)
|
||||||
|
|
||||||
|
command := exec.Command(s.Path, args...)
|
||||||
|
s.session, err = gexec.Start(command, s.stdOut, s.stdErr)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-detectedStart:
|
||||||
|
return nil
|
||||||
|
case <-timedOut:
|
||||||
|
return fmt.Errorf("timeout waiting for apiserver to start serving")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop stops this process gracefully, waits for its termination, and cleans up the cert directory.
|
||||||
|
func (s *APIServer) Stop() {
|
||||||
|
if s.session != nil {
|
||||||
|
s.session.Terminate().Wait(20 * time.Second)
|
||||||
|
err := s.certDirManager.Destroy()
|
||||||
|
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExitCode returns the exit code of the process, if it has exited. If it hasn't exited yet, ExitCode returns -1.
|
||||||
|
func (s *APIServer) ExitCode() int {
|
||||||
|
return s.session.ExitCode()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Buffer implements the gbytes.BufferProvider interface and returns the stdout of the process
|
||||||
|
func (s *APIServer) Buffer() *gbytes.Buffer {
|
||||||
|
return s.session.Buffer()
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,62 @@
|
||||||
|
package test_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
. "k8s.io/kubectl/pkg/framework/test"
|
||||||
|
|
||||||
|
. "github.com/onsi/ginkgo"
|
||||||
|
. "github.com/onsi/gomega"
|
||||||
|
"github.com/onsi/gomega/gbytes"
|
||||||
|
"github.com/onsi/gomega/gexec"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ = Describe("Apiserver", func() {
|
||||||
|
|
||||||
|
Context("when given a path to a binary that runs for a long time", func() {
|
||||||
|
It("can start and stop that binary", func() {
|
||||||
|
pathToFakeAPIServer, err := gexec.Build("k8s.io/kubectl/pkg/framework/test/assets/fakeapiserver")
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
apiServer := &APIServer{
|
||||||
|
Path: pathToFakeAPIServer,
|
||||||
|
EtcdURL: "the etcd url",
|
||||||
|
}
|
||||||
|
|
||||||
|
By("Starting the API Server")
|
||||||
|
err = apiServer.Start()
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
Eventually(apiServer).Should(gbytes.Say("Everything is fine"))
|
||||||
|
Expect(apiServer).NotTo(gexec.Exit())
|
||||||
|
|
||||||
|
By("Stopping the API Server")
|
||||||
|
apiServer.Stop()
|
||||||
|
Expect(apiServer).To(gexec.Exit(143))
|
||||||
|
})
|
||||||
|
|
||||||
|
})
|
||||||
|
|
||||||
|
Context("when no path is given", func() {
|
||||||
|
It("fails with a helpful error", func() {
|
||||||
|
apiServer := &APIServer{}
|
||||||
|
err := apiServer.Start()
|
||||||
|
Expect(err).To(MatchError(ContainSubstring("no such file or directory")))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
Context("when given a path to a non-executable", func() {
|
||||||
|
It("fails with a helpful error", func() {
|
||||||
|
apiServer := &APIServer{
|
||||||
|
Path: "./apiserver.go",
|
||||||
|
EtcdURL: "the etcd url",
|
||||||
|
}
|
||||||
|
err := apiServer.Start()
|
||||||
|
Expect(err).To(MatchError(ContainSubstring("./apiserver.go: permission denied")))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
Context("when we try to stop a server that hasn't been started", func() {
|
||||||
|
It("does not panic", func() {
|
||||||
|
server := &APIServer{}
|
||||||
|
server.Stop()
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
@ -0,0 +1 @@
|
||||||
|
This directory will be the home of some binaries which are downloaded with `pkg/framework/test/scripts/download-binaries`.
|
||||||
|
|
@ -0,0 +1,44 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"regexp"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
expectedArgs := []*regexp.Regexp{
|
||||||
|
regexp.MustCompile("^--authorization-mode=Node,RBAC$"),
|
||||||
|
regexp.MustCompile("^--runtime-config=admissionregistration.k8s.io/v1alpha1$"),
|
||||||
|
regexp.MustCompile("^--v=3$"),
|
||||||
|
regexp.MustCompile("^--vmodule=$"),
|
||||||
|
regexp.MustCompile("^--admission-control=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,SecurityContextDeny,DefaultStorageClass,DefaultTolerationSeconds,GenericAdmissionWebhook,ResourceQuota$"),
|
||||||
|
regexp.MustCompile("^--admission-control-config-file=$"),
|
||||||
|
regexp.MustCompile("^--bind-address=0.0.0.0$"),
|
||||||
|
regexp.MustCompile("^--insecure-bind-address=127.0.0.1$"),
|
||||||
|
regexp.MustCompile("^--insecure-port=8080$"),
|
||||||
|
regexp.MustCompile("^--storage-backend=etcd3$"),
|
||||||
|
regexp.MustCompile("^--etcd-servers=the etcd url$"),
|
||||||
|
regexp.MustCompile("^--cert-dir=.*"),
|
||||||
|
}
|
||||||
|
numExpectedArgs := len(expectedArgs)
|
||||||
|
numGivenArgs := len(os.Args) - 1
|
||||||
|
|
||||||
|
if numGivenArgs < numExpectedArgs {
|
||||||
|
fmt.Printf("Expected at least %d args, only got %d\n", numExpectedArgs, numGivenArgs)
|
||||||
|
os.Exit(2)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, argRegexp := range expectedArgs {
|
||||||
|
givenArg := os.Args[i+1]
|
||||||
|
if !argRegexp.MatchString(givenArg) {
|
||||||
|
fmt.Printf("Expected arg '%s' to match '%s'\n", givenArg, argRegexp.String())
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fmt.Println("Everything is fine")
|
||||||
|
fmt.Fprintln(os.Stderr, "Serving insecurely on 127.0.0.1:8080")
|
||||||
|
|
||||||
|
time.Sleep(10 * time.Minute)
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,39 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"regexp"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
expectedArgs := []*regexp.Regexp{
|
||||||
|
regexp.MustCompile("^--debug$"),
|
||||||
|
regexp.MustCompile("^--advertise-client-urls$"),
|
||||||
|
regexp.MustCompile("^our etcd url$"),
|
||||||
|
regexp.MustCompile("^--listen-client-urls$"),
|
||||||
|
regexp.MustCompile("^our etcd url$"),
|
||||||
|
regexp.MustCompile("^--data-dir$"),
|
||||||
|
regexp.MustCompile("^.+"),
|
||||||
|
}
|
||||||
|
numExpectedArgs := len(expectedArgs)
|
||||||
|
numGivenArgs := len(os.Args) - 1
|
||||||
|
|
||||||
|
if numGivenArgs < numExpectedArgs {
|
||||||
|
fmt.Printf("Expected at least %d args, only got %d\n", numExpectedArgs, numGivenArgs)
|
||||||
|
os.Exit(2)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, argRegexp := range expectedArgs {
|
||||||
|
givenArg := os.Args[i+1]
|
||||||
|
if !argRegexp.MatchString(givenArg) {
|
||||||
|
fmt.Printf("Expected arg '%s' to match '%s'\n", givenArg, argRegexp.String())
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fmt.Println("Everything is dandy")
|
||||||
|
fmt.Fprintln(os.Stderr, "serving insecure client requests on 127.0.0.1:2379")
|
||||||
|
|
||||||
|
time.Sleep(10 * time.Minute)
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,75 @@
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
- name: test-dev-branch
|
||||||
|
public: true
|
||||||
|
serial: true
|
||||||
|
plan:
|
||||||
|
- get: git-kubectl-dev
|
||||||
|
trigger: true
|
||||||
|
- task: run-tests
|
||||||
|
config:
|
||||||
|
platform: linux
|
||||||
|
image_resource:
|
||||||
|
type: docker-image
|
||||||
|
source:
|
||||||
|
repository: golang
|
||||||
|
tag: 1.9
|
||||||
|
inputs:
|
||||||
|
- name: git-kubectl-dev
|
||||||
|
path: go/src/k8s.io/kubectl
|
||||||
|
run:
|
||||||
|
path: /bin/bash
|
||||||
|
args:
|
||||||
|
- -c
|
||||||
|
- |
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
set -eux
|
||||||
|
chown -R nobody:nogroup "${PWD}/go"
|
||||||
|
|
||||||
|
cat <<'EOS' | su -c bash -s /bin/bash nobody
|
||||||
|
set -eux
|
||||||
|
export GOPATH="${PWD}/go"
|
||||||
|
export PATH="${PATH}:/usr/local/go/bin:${GOPATH}/bin"
|
||||||
|
go get github.com/onsi/ginkgo/ginkgo
|
||||||
|
"${GOPATH}/src/k8s.io/kubectl/pkg/framework/test/scripts/download-binaries.sh"
|
||||||
|
GINKGO_PERFORMANCE=1 "${GOPATH}/src/k8s.io/kubectl/pkg/framework/test/scripts/run-tests.sh"
|
||||||
|
EOS
|
||||||
|
- name: push-to-prod-branch
|
||||||
|
serial: true
|
||||||
|
plan:
|
||||||
|
- get: git-kubectl-dev
|
||||||
|
trigger: true
|
||||||
|
passed:
|
||||||
|
- test-dev-branch
|
||||||
|
- put: git-kubectl-pair2
|
||||||
|
params:
|
||||||
|
repository: git-kubectl-dev
|
||||||
|
force: true
|
||||||
|
- put: git-kubectl-pair1
|
||||||
|
params:
|
||||||
|
repository: git-kubectl-dev
|
||||||
|
force: true
|
||||||
|
|
||||||
|
|
||||||
|
resources:
|
||||||
|
- name: git-kubectl-dev
|
||||||
|
type: git
|
||||||
|
source:
|
||||||
|
uri: {{git-dev-url}} # git@github.com:totherme/kubectl
|
||||||
|
branch: test-framework-dev
|
||||||
|
private_key: {{git-dev-private-key}}
|
||||||
|
ignore_paths: [pkg/framework/test/ci]
|
||||||
|
|
||||||
|
- name: git-kubectl-pair1
|
||||||
|
type: git
|
||||||
|
source:
|
||||||
|
uri: {{git-pair1-url}} #git@github.com:totherme/kubectl
|
||||||
|
branch: test-framework
|
||||||
|
private_key: {{git-pair1-private-key}}
|
||||||
|
|
||||||
|
- name: git-kubectl-pair2
|
||||||
|
type: git
|
||||||
|
source:
|
||||||
|
uri: {{git-pair2-url}} #git@github.com:hoegaarden/kubectl
|
||||||
|
branch: test-framework
|
||||||
|
private_key: {{git-pair2-private-key}}
|
||||||
|
|
@ -0,0 +1,38 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
set -eu
|
||||||
|
|
||||||
|
# Use DEBUG=1 ./set-pipeline.sh to get debug output
|
||||||
|
[[ -z "${DEBUG:-""}" ]] || set -x
|
||||||
|
|
||||||
|
# Use CONCOURSE_TARGET=my-concourse ./set-pipeline.sh to connect to your local concourse
|
||||||
|
: "${CONCOURSE_TARGET:="wings"}"
|
||||||
|
# Use PIPELINE_NAME=my-name ./set-pipeline.sh to give your pipeline a different name
|
||||||
|
: "${PIPELINE_NAME:="kubectl"}"
|
||||||
|
|
||||||
|
# Use PAIR1_LASTPASS=my-lastpass-key ./set-pipeline.sh to get your github keys and URL from your lastpass entry
|
||||||
|
: "${PAIR1_LASTPASS:="oss-k8s-github-gds-keypair"}"
|
||||||
|
: "${PAIR2_LASTPASS:="oss-k8s-github-hhorl-keypair"}"
|
||||||
|
|
||||||
|
github_pair1_key="$(lpass show "${PAIR1_LASTPASS}" --field "Private Key")"
|
||||||
|
github_pair2_key="$(lpass show "${PAIR2_LASTPASS}" --field "Private Key")"
|
||||||
|
github_pair1_url="$(lpass show "${PAIR1_LASTPASS}" --notes)"
|
||||||
|
github_pair2_url="$(lpass show "${PAIR2_LASTPASS}" --notes)"
|
||||||
|
|
||||||
|
script_dir="$(cd "$(dirname "$0")" ; pwd)"
|
||||||
|
|
||||||
|
# Create/Update the pipline
|
||||||
|
fly set-pipeline \
|
||||||
|
--target="${CONCOURSE_TARGET}" \
|
||||||
|
--pipeline="${PIPELINE_NAME}" \
|
||||||
|
--config="${script_dir}/pipeline.yml" \
|
||||||
|
--var=git-dev-url="${github_pair1_url}" \
|
||||||
|
--var=git-pair1-url="${github_pair1_url}" \
|
||||||
|
--var=git-pair2-url="${github_pair2_url}" \
|
||||||
|
--var=git-dev-private-key="${github_pair1_key}" \
|
||||||
|
--var=git-pair1-private-key="${github_pair1_key}" \
|
||||||
|
--var=git-pair2-private-key="${github_pair2_key}"
|
||||||
|
|
||||||
|
# Make the pipeline publicly available
|
||||||
|
fly expose-pipeline \
|
||||||
|
--target="${CONCOURSE_TARGET}" \
|
||||||
|
--pipeline="${PIPELINE_NAME}"
|
||||||
|
|
@ -0,0 +1,65 @@
|
||||||
|
// Copyright © 2017 NAME HERE <EMAIL ADDRESS>
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package cmd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
"k8s.io/client-go/kubernetes"
|
||||||
|
"k8s.io/client-go/tools/clientcmd"
|
||||||
|
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
// listPodsCmd represents the listPods command
|
||||||
|
var listPodsCmd = &cobra.Command{
|
||||||
|
Use: "listPods",
|
||||||
|
Short: "List all pods",
|
||||||
|
Long: `Give a list of all pods known by the system`,
|
||||||
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
|
runGetPods()
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func runGetPods() {
|
||||||
|
config, _ := clientcmd.BuildConfigFromFlags("http://localhost:8080", "")
|
||||||
|
|
||||||
|
// create the clientset
|
||||||
|
clientset, _ := kubernetes.NewForConfig(config)
|
||||||
|
|
||||||
|
pods, err := clientset.CoreV1().Pods("").List(metav1.ListOptions{})
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
if len(pods.Items) > 0 {
|
||||||
|
} else {
|
||||||
|
fmt.Println("There are no pods.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
RootCmd.AddCommand(listPodsCmd)
|
||||||
|
|
||||||
|
// Here you will define your flags and configuration settings.
|
||||||
|
|
||||||
|
// Cobra supports Persistent Flags which will work for this command
|
||||||
|
// and all subcommands, e.g.:
|
||||||
|
// listPodsCmd.PersistentFlags().String("foo", "", "A help for foo")
|
||||||
|
|
||||||
|
// Cobra supports local flags which will only run when this command
|
||||||
|
// is called directly, e.g.:
|
||||||
|
// listPodsCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,86 @@
|
||||||
|
// Copyright © 2017 NAME HERE <EMAIL ADDRESS>
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package cmd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
homedir "github.com/mitchellh/go-homedir"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
"github.com/spf13/viper"
|
||||||
|
)
|
||||||
|
|
||||||
|
var cfgFile string
|
||||||
|
|
||||||
|
// RootCmd represents the base command when called without any subcommands
|
||||||
|
var RootCmd = &cobra.Command{
|
||||||
|
Use: "democli",
|
||||||
|
Short: "A demo CLI application",
|
||||||
|
Long: `This is a demo kubernetes CLI, which interacts with the kubernetes API.
|
||||||
|
|
||||||
|
The purpose of this CLI is to demo the testing framework that was used to develop it.`,
|
||||||
|
// Uncomment the following line if your bare application
|
||||||
|
// has an action associated with it:
|
||||||
|
// Run: func(cmd *cobra.Command, args []string) { },
|
||||||
|
}
|
||||||
|
|
||||||
|
// Execute adds all child commands to the root command and sets flags appropriately.
|
||||||
|
// This is called by main.main(). It only needs to happen once to the rootCmd.
|
||||||
|
func Execute() {
|
||||||
|
if err := RootCmd.Execute(); err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
cobra.OnInitialize(initConfig)
|
||||||
|
|
||||||
|
// Here you will define your flags and configuration settings.
|
||||||
|
// Cobra supports persistent flags, which, if defined here,
|
||||||
|
// will be global for your application.
|
||||||
|
RootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.democli.yaml)")
|
||||||
|
|
||||||
|
// Cobra also supports local flags, which will only run
|
||||||
|
// when this action is called directly.
|
||||||
|
RootCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
|
||||||
|
}
|
||||||
|
|
||||||
|
// initConfig reads in config file and ENV variables if set.
|
||||||
|
func initConfig() {
|
||||||
|
if cfgFile != "" {
|
||||||
|
// Use config file from the flag.
|
||||||
|
viper.SetConfigFile(cfgFile)
|
||||||
|
} else {
|
||||||
|
// Find home directory.
|
||||||
|
home, err := homedir.Dir()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Search config in home directory with name ".democli" (without extension).
|
||||||
|
viper.AddConfigPath(home)
|
||||||
|
viper.SetConfigName(".democli")
|
||||||
|
}
|
||||||
|
|
||||||
|
viper.AutomaticEnv() // read in environment variables that match
|
||||||
|
|
||||||
|
// If a config file is found, read it in.
|
||||||
|
if err := viper.ReadInConfig(); err == nil {
|
||||||
|
fmt.Println("Using config file:", viper.ConfigFileUsed())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,42 @@
|
||||||
|
package integration_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
. "github.com/onsi/ginkgo"
|
||||||
|
. "github.com/onsi/gomega"
|
||||||
|
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
"github.com/onsi/gomega/gexec"
|
||||||
|
"k8s.io/kubectl/pkg/framework/test"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestIntegration(t *testing.T) {
|
||||||
|
RegisterFailHandler(Fail)
|
||||||
|
RunSpecs(t, "Integration Suite")
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
pathToDemoCommand string
|
||||||
|
fixtures *test.Fixtures
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ = BeforeSuite(func() {
|
||||||
|
var err error
|
||||||
|
pathToDemoCommand, err = gexec.Build("k8s.io/kubectl/pkg/framework/test/democli/")
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
assetsDir, ok := os.LookupEnv("KUBE_ASSETS_DIR")
|
||||||
|
Expect(ok).To(BeTrue(), "KUBE_ASSETS_DIR should point to a directory containing etcd and apiserver binaries")
|
||||||
|
fixtures = test.NewFixtures(filepath.Join(assetsDir, "etcd"), filepath.Join(assetsDir, "kube-apiserver"))
|
||||||
|
err = fixtures.Start()
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
})
|
||||||
|
|
||||||
|
var _ = AfterSuite(func() {
|
||||||
|
fixtures.Stop()
|
||||||
|
gexec.CleanupBuildArtifacts()
|
||||||
|
})
|
||||||
|
|
@ -0,0 +1,30 @@
|
||||||
|
package integration_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os/exec"
|
||||||
|
|
||||||
|
. "github.com/onsi/ginkgo"
|
||||||
|
. "github.com/onsi/gomega"
|
||||||
|
"github.com/onsi/gomega/gbytes"
|
||||||
|
"github.com/onsi/gomega/gexec"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ = Describe("Integration", func() {
|
||||||
|
It("can give us a helpful help message", func() {
|
||||||
|
helpfulMessage := `This is a demo kubernetes CLI, which interacts with the kubernetes API.`
|
||||||
|
|
||||||
|
command := exec.Command(pathToDemoCommand, "--help")
|
||||||
|
session, err := gexec.Start(command, GinkgoWriter, GinkgoWriter)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Eventually(session).Should(gexec.Exit(0))
|
||||||
|
Expect(session.Out).To(gbytes.Say(helpfulMessage))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("can get a list of pods", func() {
|
||||||
|
command := exec.Command(pathToDemoCommand, "listPods")
|
||||||
|
session, err := gexec.Start(command, GinkgoWriter, GinkgoWriter)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Eventually(session).Should(gexec.Exit(0))
|
||||||
|
Expect(session.Out).To(gbytes.Say("There are no pods."))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
@ -0,0 +1,21 @@
|
||||||
|
// Copyright © 2017 NAME HERE <EMAIL ADDRESS>
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import "k8s.io/kubectl/pkg/framework/test/democli/cmd"
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
cmd.Execute()
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,84 @@
|
||||||
|
package test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os/exec"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/onsi/gomega"
|
||||||
|
"github.com/onsi/gomega/gbytes"
|
||||||
|
"github.com/onsi/gomega/gexec"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Etcd knows how to run an etcd server. Set it up with the path to a precompiled binary.
|
||||||
|
type Etcd struct {
|
||||||
|
// The path to the etcd binary
|
||||||
|
Path string
|
||||||
|
EtcdURL string
|
||||||
|
session *gexec.Session
|
||||||
|
stdOut *gbytes.Buffer
|
||||||
|
stdErr *gbytes.Buffer
|
||||||
|
dataDirManager dataDirManager
|
||||||
|
}
|
||||||
|
|
||||||
|
type dataDirManager interface {
|
||||||
|
Create() (string, error)
|
||||||
|
Destroy() error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start starts the etcd, waits for it to come up, and returns an error, if occoured.
|
||||||
|
func (e *Etcd) Start() error {
|
||||||
|
e.dataDirManager = NewTempDirManager()
|
||||||
|
e.stdOut = gbytes.NewBuffer()
|
||||||
|
e.stdErr = gbytes.NewBuffer()
|
||||||
|
|
||||||
|
dataDir, err := e.dataDirManager.Create()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
args := []string{
|
||||||
|
"--debug",
|
||||||
|
"--advertise-client-urls",
|
||||||
|
e.EtcdURL,
|
||||||
|
"--listen-client-urls",
|
||||||
|
e.EtcdURL,
|
||||||
|
"--data-dir",
|
||||||
|
dataDir,
|
||||||
|
}
|
||||||
|
|
||||||
|
detectedStart := e.stdErr.Detect("serving insecure client requests on 127.0.0.1:2379")
|
||||||
|
timedOut := time.After(20 * time.Second)
|
||||||
|
|
||||||
|
command := exec.Command(e.Path, args...)
|
||||||
|
e.session, err = gexec.Start(command, e.stdOut, e.stdErr)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-detectedStart:
|
||||||
|
return nil
|
||||||
|
case <-timedOut:
|
||||||
|
return fmt.Errorf("timeout waiting for etcd to start serving")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop stops this process gracefully, waits for its termination, and cleans up the data directory.
|
||||||
|
func (e *Etcd) Stop() {
|
||||||
|
if e.session != nil {
|
||||||
|
e.session.Terminate().Wait(20 * time.Second)
|
||||||
|
err := e.dataDirManager.Destroy()
|
||||||
|
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExitCode returns the exit code of the process, if it has exited. If it hasn't exited yet, ExitCode returns -1.
|
||||||
|
func (e *Etcd) ExitCode() int {
|
||||||
|
return e.session.ExitCode()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Buffer implements the gbytes.BufferProvider interface and returns the stdout of the process
|
||||||
|
func (e *Etcd) Buffer() *gbytes.Buffer {
|
||||||
|
return e.session.Buffer()
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,61 @@
|
||||||
|
package test_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
. "k8s.io/kubectl/pkg/framework/test"
|
||||||
|
|
||||||
|
. "github.com/onsi/ginkgo"
|
||||||
|
. "github.com/onsi/gomega"
|
||||||
|
"github.com/onsi/gomega/gbytes"
|
||||||
|
"github.com/onsi/gomega/gexec"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ = Describe("Etcd", func() {
|
||||||
|
|
||||||
|
Context("when given a path to a binary that runs for a long time", func() {
|
||||||
|
It("can start and stop that binary", func() {
|
||||||
|
pathToFakeEtcd, err := gexec.Build("k8s.io/kubectl/pkg/framework/test/assets/fakeetcd")
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
etcd := &Etcd{
|
||||||
|
Path: pathToFakeEtcd,
|
||||||
|
EtcdURL: "our etcd url",
|
||||||
|
}
|
||||||
|
|
||||||
|
By("Starting the Etcd Server")
|
||||||
|
err = etcd.Start()
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
Eventually(etcd).Should(gbytes.Say("Everything is dandy"))
|
||||||
|
Expect(etcd).NotTo(gexec.Exit())
|
||||||
|
|
||||||
|
By("Stopping the Etcd Server")
|
||||||
|
etcd.Stop()
|
||||||
|
Expect(etcd).To(gexec.Exit(143))
|
||||||
|
})
|
||||||
|
|
||||||
|
})
|
||||||
|
|
||||||
|
Context("when no path is given", func() {
|
||||||
|
It("fails with a helpful error", func() {
|
||||||
|
etcd := &Etcd{}
|
||||||
|
err := etcd.Start()
|
||||||
|
Expect(err).To(MatchError(ContainSubstring("no such file or directory")))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
Context("when given a path to a non-executable", func() {
|
||||||
|
It("fails with a helpful error", func() {
|
||||||
|
etcd := &Etcd{
|
||||||
|
Path: "./etcd.go",
|
||||||
|
}
|
||||||
|
err := etcd.Start()
|
||||||
|
Expect(err).To(MatchError(ContainSubstring("./etcd.go: permission denied")))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
Context("when we try to stop a server that hasn't been started", func() {
|
||||||
|
It("does not panic", func() {
|
||||||
|
etcd := &Etcd{}
|
||||||
|
etcd.Stop()
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
@ -0,0 +1,65 @@
|
||||||
|
package test
|
||||||
|
|
||||||
|
// Fixtures is a struct that knows how to start all your test fixtures.
|
||||||
|
//
|
||||||
|
// Right now, that means Etcd and your APIServer. This is likely to increase in future.
|
||||||
|
type Fixtures struct {
|
||||||
|
Etcd FixtureProcess
|
||||||
|
APIServer FixtureProcess
|
||||||
|
}
|
||||||
|
|
||||||
|
// FixtureProcess knows how to start and stop a Fixture processes.
|
||||||
|
// This interface is potentially going to be expanded to e.g. allow access to the processes StdOut/StdErr
|
||||||
|
// and other internals.
|
||||||
|
type FixtureProcess interface {
|
||||||
|
Start() error
|
||||||
|
Stop()
|
||||||
|
}
|
||||||
|
|
||||||
|
//go:generate counterfeiter . FixtureProcess
|
||||||
|
|
||||||
|
// NewFixtures will give you a Fixtures struct that's properly wired together.
|
||||||
|
func NewFixtures(pathToEtcd, pathToAPIServer string) *Fixtures {
|
||||||
|
etcdURL := "http://127.0.0.1:2379"
|
||||||
|
return &Fixtures{
|
||||||
|
Etcd: &Etcd{
|
||||||
|
Path: pathToEtcd,
|
||||||
|
EtcdURL: etcdURL,
|
||||||
|
},
|
||||||
|
APIServer: &APIServer{
|
||||||
|
Path: pathToAPIServer,
|
||||||
|
EtcdURL: etcdURL,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start will start all your fixtures. To stop them, call Stop().
|
||||||
|
func (f *Fixtures) Start() error {
|
||||||
|
started := make(chan error)
|
||||||
|
starter := func(process FixtureProcess) {
|
||||||
|
started <- process.Start()
|
||||||
|
}
|
||||||
|
processes := []FixtureProcess{
|
||||||
|
f.Etcd,
|
||||||
|
f.APIServer,
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, process := range processes {
|
||||||
|
go starter(process)
|
||||||
|
}
|
||||||
|
|
||||||
|
for pendingProcesses := len(processes); pendingProcesses > 0; pendingProcesses-- {
|
||||||
|
if err := <-started; err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop will stop all your fixtures, and clean up their data.
|
||||||
|
func (f *Fixtures) Stop() error {
|
||||||
|
f.APIServer.Stop()
|
||||||
|
f.Etcd.Stop()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,71 @@
|
||||||
|
package test_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
. "k8s.io/kubectl/pkg/framework/test"
|
||||||
|
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
. "github.com/onsi/ginkgo"
|
||||||
|
. "github.com/onsi/gomega"
|
||||||
|
"k8s.io/kubectl/pkg/framework/test/testfakes"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ = Describe("Fixtures", func() {
|
||||||
|
It("can construct a properly wired Fixtures struct", func() {
|
||||||
|
f := NewFixtures("path to etcd", "path to apiserver")
|
||||||
|
Expect(f.Etcd.(*Etcd).Path).To(Equal("path to etcd"))
|
||||||
|
Expect(f.APIServer.(*APIServer).Path).To(Equal("path to apiserver"))
|
||||||
|
})
|
||||||
|
|
||||||
|
Context("with a properly configured set of Fixtures", func() {
|
||||||
|
var (
|
||||||
|
fakeEtcdProcess *testfakes.FakeFixtureProcess
|
||||||
|
fakeAPIServerProcess *testfakes.FakeFixtureProcess
|
||||||
|
fixtures Fixtures
|
||||||
|
)
|
||||||
|
BeforeEach(func() {
|
||||||
|
fakeEtcdProcess = &testfakes.FakeFixtureProcess{}
|
||||||
|
fakeAPIServerProcess = &testfakes.FakeFixtureProcess{}
|
||||||
|
fixtures = Fixtures{
|
||||||
|
Etcd: fakeEtcdProcess,
|
||||||
|
APIServer: fakeAPIServerProcess,
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
It("can start them", func() {
|
||||||
|
err := fixtures.Start()
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
By("starting Etcd")
|
||||||
|
Expect(fakeEtcdProcess.StartCallCount()).To(Equal(1),
|
||||||
|
"the EtcdStartStopper should be called exactly once")
|
||||||
|
|
||||||
|
By("starting APIServer")
|
||||||
|
Expect(fakeAPIServerProcess.StartCallCount()).To(Equal(1),
|
||||||
|
"the APIServerStartStopper should be called exactly once")
|
||||||
|
})
|
||||||
|
|
||||||
|
Context("when starting etcd fails", func() {
|
||||||
|
It("wraps the error", func() {
|
||||||
|
fakeEtcdProcess.StartReturns(fmt.Errorf("some error"))
|
||||||
|
err := fixtures.Start()
|
||||||
|
Expect(err).To(MatchError(ContainSubstring("some error")))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
Context("when starting APIServer fails", func() {
|
||||||
|
It("wraps the error", func() {
|
||||||
|
fakeAPIServerProcess.StartReturns(fmt.Errorf("another error"))
|
||||||
|
err := fixtures.Start()
|
||||||
|
Expect(err).To(MatchError(ContainSubstring("another error")))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
It("can can clean up the temporary directory and stop", func() {
|
||||||
|
fixtures.Stop()
|
||||||
|
Expect(fakeEtcdProcess.StopCallCount()).To(Equal(1))
|
||||||
|
Expect(fakeAPIServerProcess.StopCallCount()).To(Equal(1))
|
||||||
|
})
|
||||||
|
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
@ -0,0 +1,35 @@
|
||||||
|
package integration_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
. "github.com/onsi/ginkgo"
|
||||||
|
. "github.com/onsi/gomega"
|
||||||
|
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
"github.com/onsi/gomega/gexec"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestIntegration(t *testing.T) {
|
||||||
|
RegisterFailHandler(Fail)
|
||||||
|
RunSpecs(t, "Integration Suite")
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
defaultPathToEtcd string
|
||||||
|
defaultPathToApiserver string
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ = BeforeSuite(func() {
|
||||||
|
assetsDir, ok := os.LookupEnv("KUBE_ASSETS_DIR")
|
||||||
|
Expect(ok).To(BeTrue(), "Expected $KUBE_ASSETS_DIR to be set")
|
||||||
|
|
||||||
|
defaultPathToEtcd = filepath.Join(assetsDir, "etcd")
|
||||||
|
defaultPathToApiserver = filepath.Join(assetsDir, "kube-apiserver")
|
||||||
|
})
|
||||||
|
|
||||||
|
var _ = AfterSuite(func() {
|
||||||
|
gexec.TerminateAndWait()
|
||||||
|
})
|
||||||
|
|
@ -0,0 +1,58 @@
|
||||||
|
package integration_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
. "github.com/onsi/ginkgo"
|
||||||
|
. "github.com/onsi/gomega"
|
||||||
|
"k8s.io/kubectl/pkg/framework/test"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ = Describe("Integration", func() {
|
||||||
|
It("Successfully manages the fixtures lifecycle", func() {
|
||||||
|
fixtures := test.NewFixtures(defaultPathToEtcd, defaultPathToApiserver)
|
||||||
|
|
||||||
|
err := fixtures.Start()
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "Expected fixtures to start successfully")
|
||||||
|
|
||||||
|
isEtcdListening := isSomethingListeningOnPort(2379)
|
||||||
|
isAPIServerListening := isSomethingListeningOnPort(8080)
|
||||||
|
|
||||||
|
Expect(isEtcdListening()).To(BeTrue(), "Expected Etcd to listen on 2379")
|
||||||
|
|
||||||
|
Expect(isAPIServerListening()).To(BeTrue(), "Expected APIServer to listen on 8080")
|
||||||
|
|
||||||
|
err = fixtures.Stop()
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "Expected fixtures to stop successfully")
|
||||||
|
|
||||||
|
Expect(isEtcdListening()).To(BeFalse(), "Expected Etcd not to listen anymore")
|
||||||
|
|
||||||
|
By("Ensuring APIServer is not listening anymore")
|
||||||
|
Expect(isAPIServerListening()).To(BeFalse(), "Expected APIServer not to listen anymore")
|
||||||
|
})
|
||||||
|
|
||||||
|
Measure("It should be fast to bring up and tear down the fixtures", func(b Benchmarker) {
|
||||||
|
b.Time("lifecycle", func() {
|
||||||
|
fixtures := test.NewFixtures(defaultPathToEtcd, defaultPathToApiserver)
|
||||||
|
|
||||||
|
fixtures.Start()
|
||||||
|
fixtures.Stop()
|
||||||
|
})
|
||||||
|
}, 10)
|
||||||
|
})
|
||||||
|
|
||||||
|
type portChecker func() bool
|
||||||
|
|
||||||
|
func isSomethingListeningOnPort(port int) portChecker {
|
||||||
|
return func() bool {
|
||||||
|
conn, err := net.DialTimeout("tcp", net.JoinHostPort("", fmt.Sprintf("%d", port)), 1*time.Second)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
conn.Close()
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,24 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
set -eu
|
||||||
|
|
||||||
|
# Use DEBUG=1 ./scripts/download-binaries.sh to get debug output
|
||||||
|
quiet="--quiet"
|
||||||
|
[[ -z "${DEBUG:-""}" ]] || {
|
||||||
|
set -x
|
||||||
|
quiet=""
|
||||||
|
}
|
||||||
|
|
||||||
|
# Use BASE_URL=https://my/binaries/url ./scripts/download-binaries to download
|
||||||
|
# from a different bucket
|
||||||
|
: "${BASE_URL:="https://storage.googleapis.com/k8s-c10s-test-binaries"}"
|
||||||
|
|
||||||
|
test_framework_dir="$(cd "$(dirname "$0")/.." ; pwd)"
|
||||||
|
os="$(uname -s)"
|
||||||
|
arch="$(uname -m)"
|
||||||
|
|
||||||
|
echo "About to download a couple of binaries. This might take a while..."
|
||||||
|
wget $quiet "${BASE_URL}/etcd-${os}-${arch}" -O "${test_framework_dir}/assets/bin/etcd"
|
||||||
|
wget $quiet "${BASE_URL}/kube-apiserver-${os}-${arch}" -O "${test_framework_dir}/assets/bin/kube-apiserver"
|
||||||
|
chmod +x "${test_framework_dir}/assets/bin/etcd"
|
||||||
|
chmod +x "${test_framework_dir}/assets/bin/kube-apiserver"
|
||||||
|
echo "Done!"
|
||||||
|
|
@ -0,0 +1,20 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
set -eu
|
||||||
|
|
||||||
|
# Use DEBUG=1 ./scripts/run-tests.sh to get debug output
|
||||||
|
[[ -z "${DEBUG:-""}" ]] || set -x
|
||||||
|
|
||||||
|
GINKGO="ginkgo"
|
||||||
|
if [[ -n "${GINKGO_WATCH:-""}" ]] ; then
|
||||||
|
GINKGO="$GINKGO watch"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -z ${GINKGO_PERFORMANCE:-""} ]] ; then
|
||||||
|
GINKGO="$GINKGO -skipMeasurements"
|
||||||
|
fi
|
||||||
|
|
||||||
|
test_framework_dir="$(cd "$(dirname "$0")/.." ; pwd)"
|
||||||
|
|
||||||
|
export KUBE_ASSETS_DIR="${test_framework_dir}/assets/bin"
|
||||||
|
|
||||||
|
$GINKGO -r "${test_framework_dir}"
|
||||||
|
|
@ -0,0 +1,49 @@
|
||||||
|
package test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TempDirMaker can create directories.
|
||||||
|
type TempDirMaker func(dir, prefix string) (name string, err error)
|
||||||
|
|
||||||
|
// TempDirRemover can delete directories
|
||||||
|
type TempDirRemover func(dir string) error
|
||||||
|
|
||||||
|
// NewTempDirManager returns a new manager for creation and deleteion of temporary directories.
|
||||||
|
func NewTempDirManager() *TempDirManager {
|
||||||
|
return &TempDirManager{
|
||||||
|
Maker: ioutil.TempDir,
|
||||||
|
Remover: os.RemoveAll,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TempDirManager knows when to call the directory maker and remover and keeps track of created directories.
|
||||||
|
type TempDirManager struct {
|
||||||
|
Maker TempDirMaker
|
||||||
|
Remover TempDirRemover
|
||||||
|
dir string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create knows how to create a temporary directory and how to keep track of it.
|
||||||
|
func (t *TempDirManager) Create() (string, error) {
|
||||||
|
if t.dir == "" {
|
||||||
|
dir, err := t.Maker("", "kube-test-framework-")
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
t.dir = dir
|
||||||
|
}
|
||||||
|
return t.dir, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Destroy knows how to destroy a previously created directory.
|
||||||
|
func (t *TempDirManager) Destroy() error {
|
||||||
|
if t.dir != "" {
|
||||||
|
err := t.Remover(t.dir)
|
||||||
|
t.dir = ""
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,111 @@
|
||||||
|
package test_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
. "k8s.io/kubectl/pkg/framework/test"
|
||||||
|
|
||||||
|
. "github.com/onsi/ginkgo"
|
||||||
|
. "github.com/onsi/gomega"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ = Describe("TempDirManager", func() {
|
||||||
|
var (
|
||||||
|
manager *TempDirManager
|
||||||
|
removerError error
|
||||||
|
createError error
|
||||||
|
managedDirCount int
|
||||||
|
separateDirCounter int
|
||||||
|
)
|
||||||
|
BeforeEach(func() {
|
||||||
|
managedDirCount = 0
|
||||||
|
separateDirCounter = 0
|
||||||
|
createError = nil
|
||||||
|
removerError = nil
|
||||||
|
manager = NewTempDirManager()
|
||||||
|
manager.Maker = func(dir, prefix string) (string, error) {
|
||||||
|
managedDirCount += 1
|
||||||
|
separateDirCounter += 1
|
||||||
|
return fmt.Sprintf("%d-%s-%s", separateDirCounter, dir, prefix), createError
|
||||||
|
}
|
||||||
|
manager.Remover = func(dir string) error {
|
||||||
|
managedDirCount -= 1
|
||||||
|
return removerError
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
It("can creates and remove directories", func() {
|
||||||
|
Expect(managedDirCount).To(Equal(0))
|
||||||
|
manager.Create()
|
||||||
|
Expect(managedDirCount).To(Equal(1))
|
||||||
|
manager.Destroy()
|
||||||
|
Expect(managedDirCount).To(Equal(0))
|
||||||
|
})
|
||||||
|
|
||||||
|
Context("when I call Create() multiple times on the same manager", func() {
|
||||||
|
It("returns the same directory every time", func() {
|
||||||
|
var dir1, dir2 string
|
||||||
|
var err error
|
||||||
|
|
||||||
|
Expect(managedDirCount).To(Equal(0))
|
||||||
|
|
||||||
|
dir1, err = manager.Create()
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(managedDirCount).To(Equal(1))
|
||||||
|
|
||||||
|
dir2, err = manager.Create()
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(managedDirCount).To(Equal(1))
|
||||||
|
Expect(dir1).To(Equal(dir2))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("deletes the managed directory as soon as Destroy() is called even once", func() {
|
||||||
|
var err error
|
||||||
|
|
||||||
|
Expect(managedDirCount).To(Equal(0))
|
||||||
|
|
||||||
|
_, err = manager.Create()
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
_, err = manager.Create()
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(managedDirCount).To(Equal(1))
|
||||||
|
|
||||||
|
manager.Destroy()
|
||||||
|
Expect(managedDirCount).To(Equal(0))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
Context("when I call Destroy() without calling create first", func() {
|
||||||
|
It("does nothing", func() {
|
||||||
|
Expect(managedDirCount).To(Equal(0))
|
||||||
|
manager.Destroy()
|
||||||
|
Expect(managedDirCount).To(Equal(0))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
Context("when the remover returns an error", func() {
|
||||||
|
JustBeforeEach(func() {
|
||||||
|
removerError = fmt.Errorf("Error on removing dir")
|
||||||
|
})
|
||||||
|
It("handles that error depending on whether Create() has been called", func() {
|
||||||
|
By("avoiding the error if Create() has not been called")
|
||||||
|
err := manager.Destroy()
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
By("propagating the error if Create() has been called")
|
||||||
|
manager.Create()
|
||||||
|
err = manager.Destroy()
|
||||||
|
Expect(err).To(MatchError("Error on removing dir"))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
Context("when the creater returns an error", func() {
|
||||||
|
JustBeforeEach(func() {
|
||||||
|
createError = fmt.Errorf("Error on creating dir")
|
||||||
|
})
|
||||||
|
It("bubbles up the error", func() {
|
||||||
|
_, err := manager.Create()
|
||||||
|
Expect(err).To(MatchError("Error on creating dir"))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
@ -0,0 +1,13 @@
|
||||||
|
package test_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
. "github.com/onsi/ginkgo"
|
||||||
|
. "github.com/onsi/gomega"
|
||||||
|
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestTest(t *testing.T) {
|
||||||
|
RegisterFailHandler(Fail)
|
||||||
|
RunSpecs(t, "Test Suite")
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,109 @@
|
||||||
|
// Code generated by counterfeiter. DO NOT EDIT.
|
||||||
|
package testfakes
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"k8s.io/kubectl/pkg/framework/test"
|
||||||
|
)
|
||||||
|
|
||||||
|
type FakeFixtureProcess struct {
|
||||||
|
StartStub func() error
|
||||||
|
startMutex sync.RWMutex
|
||||||
|
startArgsForCall []struct{}
|
||||||
|
startReturns struct {
|
||||||
|
result1 error
|
||||||
|
}
|
||||||
|
startReturnsOnCall map[int]struct {
|
||||||
|
result1 error
|
||||||
|
}
|
||||||
|
StopStub func()
|
||||||
|
stopMutex sync.RWMutex
|
||||||
|
stopArgsForCall []struct{}
|
||||||
|
invocations map[string][][]interface{}
|
||||||
|
invocationsMutex sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fake *FakeFixtureProcess) Start() error {
|
||||||
|
fake.startMutex.Lock()
|
||||||
|
ret, specificReturn := fake.startReturnsOnCall[len(fake.startArgsForCall)]
|
||||||
|
fake.startArgsForCall = append(fake.startArgsForCall, struct{}{})
|
||||||
|
fake.recordInvocation("Start", []interface{}{})
|
||||||
|
fake.startMutex.Unlock()
|
||||||
|
if fake.StartStub != nil {
|
||||||
|
return fake.StartStub()
|
||||||
|
}
|
||||||
|
if specificReturn {
|
||||||
|
return ret.result1
|
||||||
|
}
|
||||||
|
return fake.startReturns.result1
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fake *FakeFixtureProcess) StartCallCount() int {
|
||||||
|
fake.startMutex.RLock()
|
||||||
|
defer fake.startMutex.RUnlock()
|
||||||
|
return len(fake.startArgsForCall)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fake *FakeFixtureProcess) StartReturns(result1 error) {
|
||||||
|
fake.StartStub = nil
|
||||||
|
fake.startReturns = struct {
|
||||||
|
result1 error
|
||||||
|
}{result1}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fake *FakeFixtureProcess) StartReturnsOnCall(i int, result1 error) {
|
||||||
|
fake.StartStub = nil
|
||||||
|
if fake.startReturnsOnCall == nil {
|
||||||
|
fake.startReturnsOnCall = make(map[int]struct {
|
||||||
|
result1 error
|
||||||
|
})
|
||||||
|
}
|
||||||
|
fake.startReturnsOnCall[i] = struct {
|
||||||
|
result1 error
|
||||||
|
}{result1}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fake *FakeFixtureProcess) Stop() {
|
||||||
|
fake.stopMutex.Lock()
|
||||||
|
fake.stopArgsForCall = append(fake.stopArgsForCall, struct{}{})
|
||||||
|
fake.recordInvocation("Stop", []interface{}{})
|
||||||
|
fake.stopMutex.Unlock()
|
||||||
|
if fake.StopStub != nil {
|
||||||
|
fake.StopStub()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fake *FakeFixtureProcess) StopCallCount() int {
|
||||||
|
fake.stopMutex.RLock()
|
||||||
|
defer fake.stopMutex.RUnlock()
|
||||||
|
return len(fake.stopArgsForCall)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fake *FakeFixtureProcess) Invocations() map[string][][]interface{} {
|
||||||
|
fake.invocationsMutex.RLock()
|
||||||
|
defer fake.invocationsMutex.RUnlock()
|
||||||
|
fake.startMutex.RLock()
|
||||||
|
defer fake.startMutex.RUnlock()
|
||||||
|
fake.stopMutex.RLock()
|
||||||
|
defer fake.stopMutex.RUnlock()
|
||||||
|
copiedInvocations := map[string][][]interface{}{}
|
||||||
|
for key, value := range fake.invocations {
|
||||||
|
copiedInvocations[key] = value
|
||||||
|
}
|
||||||
|
return copiedInvocations
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fake *FakeFixtureProcess) recordInvocation(key string, args []interface{}) {
|
||||||
|
fake.invocationsMutex.Lock()
|
||||||
|
defer fake.invocationsMutex.Unlock()
|
||||||
|
if fake.invocations == nil {
|
||||||
|
fake.invocations = map[string][][]interface{}{}
|
||||||
|
}
|
||||||
|
if fake.invocations[key] == nil {
|
||||||
|
fake.invocations[key] = [][]interface{}{}
|
||||||
|
}
|
||||||
|
fake.invocations[key] = append(fake.invocations[key], args)
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ test.FixtureProcess = new(FakeFixtureProcess)
|
||||||
|
|
@ -0,0 +1,4 @@
|
||||||
|
language: go
|
||||||
|
|
||||||
|
go:
|
||||||
|
- 1.x
|
||||||
|
|
@ -0,0 +1,46 @@
|
||||||
|
Change history of swagger
|
||||||
|
=
|
||||||
|
2017-01-30
|
||||||
|
- moved from go-restful/swagger to go-restful-swagger12
|
||||||
|
|
||||||
|
2015-10-16
|
||||||
|
- add type override mechanism for swagger models (MR 254, nathanejohnson)
|
||||||
|
- replace uses of wildcard in generated apidocs (issue 251)
|
||||||
|
|
||||||
|
2015-05-25
|
||||||
|
- (api break) changed the type of Properties in Model
|
||||||
|
- (api break) changed the type of Models in ApiDeclaration
|
||||||
|
- (api break) changed the parameter type of PostBuildDeclarationMapFunc
|
||||||
|
|
||||||
|
2015-04-09
|
||||||
|
- add ModelBuildable interface for customization of Model
|
||||||
|
|
||||||
|
2015-03-17
|
||||||
|
- preserve order of Routes per WebService in Swagger listing
|
||||||
|
- fix use of $ref and type in Swagger models
|
||||||
|
- add api version to listing
|
||||||
|
|
||||||
|
2014-11-14
|
||||||
|
- operation parameters are now sorted using ordering path,query,form,header,body
|
||||||
|
|
||||||
|
2014-11-12
|
||||||
|
- respect omitempty tag value for embedded structs
|
||||||
|
- expose ApiVersion of WebService to Swagger ApiDeclaration
|
||||||
|
|
||||||
|
2014-05-29
|
||||||
|
- (api add) Ability to define custom http.Handler to serve swagger-ui static files
|
||||||
|
|
||||||
|
2014-05-04
|
||||||
|
- (fix) include model for array element type of response
|
||||||
|
|
||||||
|
2014-01-03
|
||||||
|
- (fix) do not add primitive type to the Api models
|
||||||
|
|
||||||
|
2013-11-27
|
||||||
|
- (fix) make Swagger work for WebServices with root ("/" or "") paths
|
||||||
|
|
||||||
|
2013-10-29
|
||||||
|
- (api add) package variable LogInfo to customize logging function
|
||||||
|
|
||||||
|
2013-10-15
|
||||||
|
- upgraded to spec version 1.2 (https://github.com/wordnik/swagger-core/wiki/1.2-transition)
|
||||||
|
|
@ -0,0 +1,22 @@
|
||||||
|
Copyright (c) 2017 Ernest Micklei
|
||||||
|
|
||||||
|
MIT License
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining
|
||||||
|
a copy of this software and associated documentation files (the
|
||||||
|
"Software"), to deal in the Software without restriction, including
|
||||||
|
without limitation the rights to use, copy, modify, merge, publish,
|
||||||
|
distribute, sublicense, and/or sell copies of the Software, and to
|
||||||
|
permit persons to whom the Software is furnished to do so, subject to
|
||||||
|
the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be
|
||||||
|
included in all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||||
|
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||||
|
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||||
|
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||||
|
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||||
|
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||||
|
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
|
@ -0,0 +1,83 @@
|
||||||
|
# go-restful-swagger12
|
||||||
|
|
||||||
|
[](https://travis-ci.org/emicklei/go-restful-swagger12)
|
||||||
|
[](https://godoc.org/github.com/emicklei/go-restful-swagger12)
|
||||||
|
|
||||||
|
How to use Swagger UI with go-restful
|
||||||
|
=
|
||||||
|
|
||||||
|
Get the Swagger UI sources (version 1.2 only)
|
||||||
|
|
||||||
|
git clone https://github.com/wordnik/swagger-ui.git
|
||||||
|
|
||||||
|
The project contains a "dist" folder.
|
||||||
|
Its contents has all the Swagger UI files you need.
|
||||||
|
|
||||||
|
The `index.html` has an `url` set to `http://petstore.swagger.wordnik.com/api/api-docs`.
|
||||||
|
You need to change that to match your WebService JSON endpoint e.g. `http://localhost:8080/apidocs.json`
|
||||||
|
|
||||||
|
Now, you can install the Swagger WebService for serving the Swagger specification in JSON.
|
||||||
|
|
||||||
|
config := swagger.Config{
|
||||||
|
WebServices: restful.RegisteredWebServices(),
|
||||||
|
ApiPath: "/apidocs.json",
|
||||||
|
SwaggerPath: "/apidocs/",
|
||||||
|
SwaggerFilePath: "/Users/emicklei/Projects/swagger-ui/dist"}
|
||||||
|
swagger.InstallSwaggerService(config)
|
||||||
|
|
||||||
|
|
||||||
|
Documenting Structs
|
||||||
|
--
|
||||||
|
|
||||||
|
Currently there are 2 ways to document your structs in the go-restful Swagger.
|
||||||
|
|
||||||
|
###### By using struct tags
|
||||||
|
- Use tag "description" to annotate a struct field with a description to show in the UI
|
||||||
|
- Use tag "modelDescription" to annotate the struct itself with a description to show in the UI. The tag can be added in an field of the struct and in case that there are multiple definition, they will be appended with an empty line.
|
||||||
|
|
||||||
|
###### By using the SwaggerDoc method
|
||||||
|
Here is an example with an `Address` struct and the documentation for each of the fields. The `""` is a special entry for **documenting the struct itself**.
|
||||||
|
|
||||||
|
type Address struct {
|
||||||
|
Country string `json:"country,omitempty"`
|
||||||
|
PostCode int `json:"postcode,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (Address) SwaggerDoc() map[string]string {
|
||||||
|
return map[string]string{
|
||||||
|
"": "Address doc",
|
||||||
|
"country": "Country doc",
|
||||||
|
"postcode": "PostCode doc",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
This example will generate a JSON like this
|
||||||
|
|
||||||
|
{
|
||||||
|
"Address": {
|
||||||
|
"id": "Address",
|
||||||
|
"description": "Address doc",
|
||||||
|
"properties": {
|
||||||
|
"country": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Country doc"
|
||||||
|
},
|
||||||
|
"postcode": {
|
||||||
|
"type": "integer",
|
||||||
|
"format": "int32",
|
||||||
|
"description": "PostCode doc"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
**Very Important Notes:**
|
||||||
|
- `SwaggerDoc()` is using a **NON-Pointer** receiver (e.g. func (Address) and not func (*Address))
|
||||||
|
- The returned map should use as key the name of the field as defined in the JSON parameter (e.g. `"postcode"` and not `"PostCode"`)
|
||||||
|
|
||||||
|
Notes
|
||||||
|
--
|
||||||
|
- The Nickname of an Operation is automatically set by finding the name of the function. You can override it using RouteBuilder.Operation(..)
|
||||||
|
- The WebServices field of swagger.Config can be used to control which service you want to expose and document ; you can have multiple configs and therefore multiple endpoints.
|
||||||
|
|
||||||
|
© 2017, ernestmicklei.com. MIT License. Contributions welcome.
|
||||||
64
vendor/github.com/emicklei/go-restful-swagger12/api_declaration_list.go
generated
vendored
Normal file
64
vendor/github.com/emicklei/go-restful-swagger12/api_declaration_list.go
generated
vendored
Normal file
|
|
@ -0,0 +1,64 @@
|
||||||
|
package swagger
|
||||||
|
|
||||||
|
// Copyright 2015 Ernest Micklei. All rights reserved.
|
||||||
|
// Use of this source code is governed by a license
|
||||||
|
// that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ApiDeclarationList maintains an ordered list of ApiDeclaration.
|
||||||
|
type ApiDeclarationList struct {
|
||||||
|
List []ApiDeclaration
|
||||||
|
}
|
||||||
|
|
||||||
|
// At returns the ApiDeclaration by its path unless absent, then ok is false
|
||||||
|
func (l *ApiDeclarationList) At(path string) (a ApiDeclaration, ok bool) {
|
||||||
|
for _, each := range l.List {
|
||||||
|
if each.ResourcePath == path {
|
||||||
|
return each, true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return a, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put adds or replaces a ApiDeclaration with this name
|
||||||
|
func (l *ApiDeclarationList) Put(path string, a ApiDeclaration) {
|
||||||
|
// maybe replace existing
|
||||||
|
for i, each := range l.List {
|
||||||
|
if each.ResourcePath == path {
|
||||||
|
// replace
|
||||||
|
l.List[i] = a
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// add
|
||||||
|
l.List = append(l.List, a)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do enumerates all the properties, each with its assigned name
|
||||||
|
func (l *ApiDeclarationList) Do(block func(path string, decl ApiDeclaration)) {
|
||||||
|
for _, each := range l.List {
|
||||||
|
block(each.ResourcePath, each)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalJSON writes the ModelPropertyList as if it was a map[string]ModelProperty
|
||||||
|
func (l ApiDeclarationList) MarshalJSON() ([]byte, error) {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
encoder := json.NewEncoder(&buf)
|
||||||
|
buf.WriteString("{\n")
|
||||||
|
for i, each := range l.List {
|
||||||
|
buf.WriteString("\"")
|
||||||
|
buf.WriteString(each.ResourcePath)
|
||||||
|
buf.WriteString("\": ")
|
||||||
|
encoder.Encode(each)
|
||||||
|
if i < len(l.List)-1 {
|
||||||
|
buf.WriteString(",\n")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
buf.WriteString("}")
|
||||||
|
return buf.Bytes(), nil
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,46 @@
|
||||||
|
package swagger
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"reflect"
|
||||||
|
|
||||||
|
"github.com/emicklei/go-restful"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PostBuildDeclarationMapFunc can be used to modify the api declaration map.
|
||||||
|
type PostBuildDeclarationMapFunc func(apiDeclarationMap *ApiDeclarationList)
|
||||||
|
|
||||||
|
// MapSchemaFormatFunc can be used to modify typeName at definition time.
|
||||||
|
type MapSchemaFormatFunc func(typeName string) string
|
||||||
|
|
||||||
|
// MapModelTypeNameFunc can be used to return the desired typeName for a given
|
||||||
|
// type. It will return false if the default name should be used.
|
||||||
|
type MapModelTypeNameFunc func(t reflect.Type) (string, bool)
|
||||||
|
|
||||||
|
type Config struct {
|
||||||
|
// url where the services are available, e.g. http://localhost:8080
|
||||||
|
// if left empty then the basePath of Swagger is taken from the actual request
|
||||||
|
WebServicesUrl string
|
||||||
|
// path where the JSON api is avaiable , e.g. /apidocs
|
||||||
|
ApiPath string
|
||||||
|
// [optional] path where the swagger UI will be served, e.g. /swagger
|
||||||
|
SwaggerPath string
|
||||||
|
// [optional] location of folder containing Swagger HTML5 application index.html
|
||||||
|
SwaggerFilePath string
|
||||||
|
// api listing is constructed from this list of restful WebServices.
|
||||||
|
WebServices []*restful.WebService
|
||||||
|
// will serve all static content (scripts,pages,images)
|
||||||
|
StaticHandler http.Handler
|
||||||
|
// [optional] on default CORS (Cross-Origin-Resource-Sharing) is enabled.
|
||||||
|
DisableCORS bool
|
||||||
|
// Top-level API version. Is reflected in the resource listing.
|
||||||
|
ApiVersion string
|
||||||
|
// If set then call this handler after building the complete ApiDeclaration Map
|
||||||
|
PostBuildHandler PostBuildDeclarationMapFunc
|
||||||
|
// Swagger global info struct
|
||||||
|
Info Info
|
||||||
|
// [optional] If set, model builder should call this handler to get addition typename-to-swagger-format-field conversion.
|
||||||
|
SchemaFormatHandler MapSchemaFormatFunc
|
||||||
|
// [optional] If set, model builder should call this handler to retrieve the name for a given type.
|
||||||
|
ModelTypeNameHandler MapModelTypeNameFunc
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,467 @@
|
||||||
|
package swagger
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ModelBuildable is used for extending Structs that need more control over
|
||||||
|
// how the Model appears in the Swagger api declaration.
|
||||||
|
type ModelBuildable interface {
|
||||||
|
PostBuildModel(m *Model) *Model
|
||||||
|
}
|
||||||
|
|
||||||
|
type modelBuilder struct {
|
||||||
|
Models *ModelList
|
||||||
|
Config *Config
|
||||||
|
}
|
||||||
|
|
||||||
|
type documentable interface {
|
||||||
|
SwaggerDoc() map[string]string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if this structure has a method with signature func (<theModel>) SwaggerDoc() map[string]string
|
||||||
|
// If it exists, retrive the documentation and overwrite all struct tag descriptions
|
||||||
|
func getDocFromMethodSwaggerDoc2(model reflect.Type) map[string]string {
|
||||||
|
if docable, ok := reflect.New(model).Elem().Interface().(documentable); ok {
|
||||||
|
return docable.SwaggerDoc()
|
||||||
|
}
|
||||||
|
return make(map[string]string)
|
||||||
|
}
|
||||||
|
|
||||||
|
// addModelFrom creates and adds a Model to the builder and detects and calls
|
||||||
|
// the post build hook for customizations
|
||||||
|
func (b modelBuilder) addModelFrom(sample interface{}) {
|
||||||
|
if modelOrNil := b.addModel(reflect.TypeOf(sample), ""); modelOrNil != nil {
|
||||||
|
// allow customizations
|
||||||
|
if buildable, ok := sample.(ModelBuildable); ok {
|
||||||
|
modelOrNil = buildable.PostBuildModel(modelOrNil)
|
||||||
|
b.Models.Put(modelOrNil.Id, *modelOrNil)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b modelBuilder) addModel(st reflect.Type, nameOverride string) *Model {
|
||||||
|
// Turn pointers into simpler types so further checks are
|
||||||
|
// correct.
|
||||||
|
if st.Kind() == reflect.Ptr {
|
||||||
|
st = st.Elem()
|
||||||
|
}
|
||||||
|
|
||||||
|
modelName := b.keyFrom(st)
|
||||||
|
if nameOverride != "" {
|
||||||
|
modelName = nameOverride
|
||||||
|
}
|
||||||
|
// no models needed for primitive types
|
||||||
|
if b.isPrimitiveType(modelName) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// golang encoding/json packages says array and slice values encode as
|
||||||
|
// JSON arrays, except that []byte encodes as a base64-encoded string.
|
||||||
|
// If we see a []byte here, treat it at as a primitive type (string)
|
||||||
|
// and deal with it in buildArrayTypeProperty.
|
||||||
|
if (st.Kind() == reflect.Slice || st.Kind() == reflect.Array) &&
|
||||||
|
st.Elem().Kind() == reflect.Uint8 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// see if we already have visited this model
|
||||||
|
if _, ok := b.Models.At(modelName); ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
sm := Model{
|
||||||
|
Id: modelName,
|
||||||
|
Required: []string{},
|
||||||
|
Properties: ModelPropertyList{}}
|
||||||
|
|
||||||
|
// reference the model before further initializing (enables recursive structs)
|
||||||
|
b.Models.Put(modelName, sm)
|
||||||
|
|
||||||
|
// check for slice or array
|
||||||
|
if st.Kind() == reflect.Slice || st.Kind() == reflect.Array {
|
||||||
|
b.addModel(st.Elem(), "")
|
||||||
|
return &sm
|
||||||
|
}
|
||||||
|
// check for structure or primitive type
|
||||||
|
if st.Kind() != reflect.Struct {
|
||||||
|
return &sm
|
||||||
|
}
|
||||||
|
|
||||||
|
fullDoc := getDocFromMethodSwaggerDoc2(st)
|
||||||
|
modelDescriptions := []string{}
|
||||||
|
|
||||||
|
for i := 0; i < st.NumField(); i++ {
|
||||||
|
field := st.Field(i)
|
||||||
|
jsonName, modelDescription, prop := b.buildProperty(field, &sm, modelName)
|
||||||
|
if len(modelDescription) > 0 {
|
||||||
|
modelDescriptions = append(modelDescriptions, modelDescription)
|
||||||
|
}
|
||||||
|
|
||||||
|
// add if not omitted
|
||||||
|
if len(jsonName) != 0 {
|
||||||
|
// update description
|
||||||
|
if fieldDoc, ok := fullDoc[jsonName]; ok {
|
||||||
|
prop.Description = fieldDoc
|
||||||
|
}
|
||||||
|
// update Required
|
||||||
|
if b.isPropertyRequired(field) {
|
||||||
|
sm.Required = append(sm.Required, jsonName)
|
||||||
|
}
|
||||||
|
sm.Properties.Put(jsonName, prop)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// We always overwrite documentation if SwaggerDoc method exists
|
||||||
|
// "" is special for documenting the struct itself
|
||||||
|
if modelDoc, ok := fullDoc[""]; ok {
|
||||||
|
sm.Description = modelDoc
|
||||||
|
} else if len(modelDescriptions) != 0 {
|
||||||
|
sm.Description = strings.Join(modelDescriptions, "\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
// update model builder with completed model
|
||||||
|
b.Models.Put(modelName, sm)
|
||||||
|
|
||||||
|
return &sm
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b modelBuilder) isPropertyRequired(field reflect.StructField) bool {
|
||||||
|
required := true
|
||||||
|
if jsonTag := field.Tag.Get("json"); jsonTag != "" {
|
||||||
|
s := strings.Split(jsonTag, ",")
|
||||||
|
if len(s) > 1 && s[1] == "omitempty" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return required
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b modelBuilder) buildProperty(field reflect.StructField, model *Model, modelName string) (jsonName, modelDescription string, prop ModelProperty) {
|
||||||
|
jsonName = b.jsonNameOfField(field)
|
||||||
|
if len(jsonName) == 0 {
|
||||||
|
// empty name signals skip property
|
||||||
|
return "", "", prop
|
||||||
|
}
|
||||||
|
|
||||||
|
if field.Name == "XMLName" && field.Type.String() == "xml.Name" {
|
||||||
|
// property is metadata for the xml.Name attribute, can be skipped
|
||||||
|
return "", "", prop
|
||||||
|
}
|
||||||
|
|
||||||
|
if tag := field.Tag.Get("modelDescription"); tag != "" {
|
||||||
|
modelDescription = tag
|
||||||
|
}
|
||||||
|
|
||||||
|
prop.setPropertyMetadata(field)
|
||||||
|
if prop.Type != nil {
|
||||||
|
return jsonName, modelDescription, prop
|
||||||
|
}
|
||||||
|
fieldType := field.Type
|
||||||
|
|
||||||
|
// check if type is doing its own marshalling
|
||||||
|
marshalerType := reflect.TypeOf((*json.Marshaler)(nil)).Elem()
|
||||||
|
if fieldType.Implements(marshalerType) {
|
||||||
|
var pType = "string"
|
||||||
|
if prop.Type == nil {
|
||||||
|
prop.Type = &pType
|
||||||
|
}
|
||||||
|
if prop.Format == "" {
|
||||||
|
prop.Format = b.jsonSchemaFormat(b.keyFrom(fieldType))
|
||||||
|
}
|
||||||
|
return jsonName, modelDescription, prop
|
||||||
|
}
|
||||||
|
|
||||||
|
// check if annotation says it is a string
|
||||||
|
if jsonTag := field.Tag.Get("json"); jsonTag != "" {
|
||||||
|
s := strings.Split(jsonTag, ",")
|
||||||
|
if len(s) > 1 && s[1] == "string" {
|
||||||
|
stringt := "string"
|
||||||
|
prop.Type = &stringt
|
||||||
|
return jsonName, modelDescription, prop
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fieldKind := fieldType.Kind()
|
||||||
|
switch {
|
||||||
|
case fieldKind == reflect.Struct:
|
||||||
|
jsonName, prop := b.buildStructTypeProperty(field, jsonName, model)
|
||||||
|
return jsonName, modelDescription, prop
|
||||||
|
case fieldKind == reflect.Slice || fieldKind == reflect.Array:
|
||||||
|
jsonName, prop := b.buildArrayTypeProperty(field, jsonName, modelName)
|
||||||
|
return jsonName, modelDescription, prop
|
||||||
|
case fieldKind == reflect.Ptr:
|
||||||
|
jsonName, prop := b.buildPointerTypeProperty(field, jsonName, modelName)
|
||||||
|
return jsonName, modelDescription, prop
|
||||||
|
case fieldKind == reflect.String:
|
||||||
|
stringt := "string"
|
||||||
|
prop.Type = &stringt
|
||||||
|
return jsonName, modelDescription, prop
|
||||||
|
case fieldKind == reflect.Map:
|
||||||
|
// if it's a map, it's unstructured, and swagger 1.2 can't handle it
|
||||||
|
objectType := "object"
|
||||||
|
prop.Type = &objectType
|
||||||
|
return jsonName, modelDescription, prop
|
||||||
|
}
|
||||||
|
|
||||||
|
fieldTypeName := b.keyFrom(fieldType)
|
||||||
|
if b.isPrimitiveType(fieldTypeName) {
|
||||||
|
mapped := b.jsonSchemaType(fieldTypeName)
|
||||||
|
prop.Type = &mapped
|
||||||
|
prop.Format = b.jsonSchemaFormat(fieldTypeName)
|
||||||
|
return jsonName, modelDescription, prop
|
||||||
|
}
|
||||||
|
modelType := b.keyFrom(fieldType)
|
||||||
|
prop.Ref = &modelType
|
||||||
|
|
||||||
|
if fieldType.Name() == "" { // override type of anonymous structs
|
||||||
|
nestedTypeName := modelName + "." + jsonName
|
||||||
|
prop.Ref = &nestedTypeName
|
||||||
|
b.addModel(fieldType, nestedTypeName)
|
||||||
|
}
|
||||||
|
return jsonName, modelDescription, prop
|
||||||
|
}
|
||||||
|
|
||||||
|
func hasNamedJSONTag(field reflect.StructField) bool {
|
||||||
|
parts := strings.Split(field.Tag.Get("json"), ",")
|
||||||
|
if len(parts) == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for _, s := range parts[1:] {
|
||||||
|
if s == "inline" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return len(parts[0]) > 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b modelBuilder) buildStructTypeProperty(field reflect.StructField, jsonName string, model *Model) (nameJson string, prop ModelProperty) {
|
||||||
|
prop.setPropertyMetadata(field)
|
||||||
|
// Check for type override in tag
|
||||||
|
if prop.Type != nil {
|
||||||
|
return jsonName, prop
|
||||||
|
}
|
||||||
|
fieldType := field.Type
|
||||||
|
// check for anonymous
|
||||||
|
if len(fieldType.Name()) == 0 {
|
||||||
|
// anonymous
|
||||||
|
anonType := model.Id + "." + jsonName
|
||||||
|
b.addModel(fieldType, anonType)
|
||||||
|
prop.Ref = &anonType
|
||||||
|
return jsonName, prop
|
||||||
|
}
|
||||||
|
|
||||||
|
if field.Name == fieldType.Name() && field.Anonymous && !hasNamedJSONTag(field) {
|
||||||
|
// embedded struct
|
||||||
|
sub := modelBuilder{new(ModelList), b.Config}
|
||||||
|
sub.addModel(fieldType, "")
|
||||||
|
subKey := sub.keyFrom(fieldType)
|
||||||
|
// merge properties from sub
|
||||||
|
subModel, _ := sub.Models.At(subKey)
|
||||||
|
subModel.Properties.Do(func(k string, v ModelProperty) {
|
||||||
|
model.Properties.Put(k, v)
|
||||||
|
// if subModel says this property is required then include it
|
||||||
|
required := false
|
||||||
|
for _, each := range subModel.Required {
|
||||||
|
if k == each {
|
||||||
|
required = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if required {
|
||||||
|
model.Required = append(model.Required, k)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
// add all new referenced models
|
||||||
|
sub.Models.Do(func(key string, sub Model) {
|
||||||
|
if key != subKey {
|
||||||
|
if _, ok := b.Models.At(key); !ok {
|
||||||
|
b.Models.Put(key, sub)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
// empty name signals skip property
|
||||||
|
return "", prop
|
||||||
|
}
|
||||||
|
// simple struct
|
||||||
|
b.addModel(fieldType, "")
|
||||||
|
var pType = b.keyFrom(fieldType)
|
||||||
|
prop.Ref = &pType
|
||||||
|
return jsonName, prop
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b modelBuilder) buildArrayTypeProperty(field reflect.StructField, jsonName, modelName string) (nameJson string, prop ModelProperty) {
|
||||||
|
// check for type override in tags
|
||||||
|
prop.setPropertyMetadata(field)
|
||||||
|
if prop.Type != nil {
|
||||||
|
return jsonName, prop
|
||||||
|
}
|
||||||
|
fieldType := field.Type
|
||||||
|
if fieldType.Elem().Kind() == reflect.Uint8 {
|
||||||
|
stringt := "string"
|
||||||
|
prop.Type = &stringt
|
||||||
|
return jsonName, prop
|
||||||
|
}
|
||||||
|
var pType = "array"
|
||||||
|
prop.Type = &pType
|
||||||
|
isPrimitive := b.isPrimitiveType(fieldType.Elem().Name())
|
||||||
|
elemTypeName := b.getElementTypeName(modelName, jsonName, fieldType.Elem())
|
||||||
|
prop.Items = new(Item)
|
||||||
|
if isPrimitive {
|
||||||
|
mapped := b.jsonSchemaType(elemTypeName)
|
||||||
|
prop.Items.Type = &mapped
|
||||||
|
} else {
|
||||||
|
prop.Items.Ref = &elemTypeName
|
||||||
|
}
|
||||||
|
// add|overwrite model for element type
|
||||||
|
if fieldType.Elem().Kind() == reflect.Ptr {
|
||||||
|
fieldType = fieldType.Elem()
|
||||||
|
}
|
||||||
|
if !isPrimitive {
|
||||||
|
b.addModel(fieldType.Elem(), elemTypeName)
|
||||||
|
}
|
||||||
|
return jsonName, prop
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b modelBuilder) buildPointerTypeProperty(field reflect.StructField, jsonName, modelName string) (nameJson string, prop ModelProperty) {
|
||||||
|
prop.setPropertyMetadata(field)
|
||||||
|
// Check for type override in tags
|
||||||
|
if prop.Type != nil {
|
||||||
|
return jsonName, prop
|
||||||
|
}
|
||||||
|
fieldType := field.Type
|
||||||
|
|
||||||
|
// override type of pointer to list-likes
|
||||||
|
if fieldType.Elem().Kind() == reflect.Slice || fieldType.Elem().Kind() == reflect.Array {
|
||||||
|
var pType = "array"
|
||||||
|
prop.Type = &pType
|
||||||
|
isPrimitive := b.isPrimitiveType(fieldType.Elem().Elem().Name())
|
||||||
|
elemName := b.getElementTypeName(modelName, jsonName, fieldType.Elem().Elem())
|
||||||
|
if isPrimitive {
|
||||||
|
primName := b.jsonSchemaType(elemName)
|
||||||
|
prop.Items = &Item{Ref: &primName}
|
||||||
|
} else {
|
||||||
|
prop.Items = &Item{Ref: &elemName}
|
||||||
|
}
|
||||||
|
if !isPrimitive {
|
||||||
|
// add|overwrite model for element type
|
||||||
|
b.addModel(fieldType.Elem().Elem(), elemName)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// non-array, pointer type
|
||||||
|
fieldTypeName := b.keyFrom(fieldType.Elem())
|
||||||
|
var pType = b.jsonSchemaType(fieldTypeName) // no star, include pkg path
|
||||||
|
if b.isPrimitiveType(fieldTypeName) {
|
||||||
|
prop.Type = &pType
|
||||||
|
prop.Format = b.jsonSchemaFormat(fieldTypeName)
|
||||||
|
return jsonName, prop
|
||||||
|
}
|
||||||
|
prop.Ref = &pType
|
||||||
|
elemName := ""
|
||||||
|
if fieldType.Elem().Name() == "" {
|
||||||
|
elemName = modelName + "." + jsonName
|
||||||
|
prop.Ref = &elemName
|
||||||
|
}
|
||||||
|
b.addModel(fieldType.Elem(), elemName)
|
||||||
|
}
|
||||||
|
return jsonName, prop
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b modelBuilder) getElementTypeName(modelName, jsonName string, t reflect.Type) string {
|
||||||
|
if t.Kind() == reflect.Ptr {
|
||||||
|
t = t.Elem()
|
||||||
|
}
|
||||||
|
if t.Name() == "" {
|
||||||
|
return modelName + "." + jsonName
|
||||||
|
}
|
||||||
|
return b.keyFrom(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b modelBuilder) keyFrom(st reflect.Type) string {
|
||||||
|
key := st.String()
|
||||||
|
if b.Config != nil && b.Config.ModelTypeNameHandler != nil {
|
||||||
|
if name, ok := b.Config.ModelTypeNameHandler(st); ok {
|
||||||
|
key = name
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(st.Name()) == 0 { // unnamed type
|
||||||
|
// Swagger UI has special meaning for [
|
||||||
|
key = strings.Replace(key, "[]", "||", -1)
|
||||||
|
}
|
||||||
|
return key
|
||||||
|
}
|
||||||
|
|
||||||
|
// see also https://golang.org/ref/spec#Numeric_types
|
||||||
|
func (b modelBuilder) isPrimitiveType(modelName string) bool {
|
||||||
|
if len(modelName) == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return strings.Contains("uint uint8 uint16 uint32 uint64 int int8 int16 int32 int64 float32 float64 bool string byte rune time.Time", modelName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// jsonNameOfField returns the name of the field as it should appear in JSON format
|
||||||
|
// An empty string indicates that this field is not part of the JSON representation
|
||||||
|
func (b modelBuilder) jsonNameOfField(field reflect.StructField) string {
|
||||||
|
if jsonTag := field.Tag.Get("json"); jsonTag != "" {
|
||||||
|
s := strings.Split(jsonTag, ",")
|
||||||
|
if s[0] == "-" {
|
||||||
|
// empty name signals skip property
|
||||||
|
return ""
|
||||||
|
} else if s[0] != "" {
|
||||||
|
return s[0]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return field.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
// see also http://json-schema.org/latest/json-schema-core.html#anchor8
|
||||||
|
func (b modelBuilder) jsonSchemaType(modelName string) string {
|
||||||
|
schemaMap := map[string]string{
|
||||||
|
"uint": "integer",
|
||||||
|
"uint8": "integer",
|
||||||
|
"uint16": "integer",
|
||||||
|
"uint32": "integer",
|
||||||
|
"uint64": "integer",
|
||||||
|
|
||||||
|
"int": "integer",
|
||||||
|
"int8": "integer",
|
||||||
|
"int16": "integer",
|
||||||
|
"int32": "integer",
|
||||||
|
"int64": "integer",
|
||||||
|
|
||||||
|
"byte": "integer",
|
||||||
|
"float64": "number",
|
||||||
|
"float32": "number",
|
||||||
|
"bool": "boolean",
|
||||||
|
"time.Time": "string",
|
||||||
|
}
|
||||||
|
mapped, ok := schemaMap[modelName]
|
||||||
|
if !ok {
|
||||||
|
return modelName // use as is (custom or struct)
|
||||||
|
}
|
||||||
|
return mapped
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b modelBuilder) jsonSchemaFormat(modelName string) string {
|
||||||
|
if b.Config != nil && b.Config.SchemaFormatHandler != nil {
|
||||||
|
if mapped := b.Config.SchemaFormatHandler(modelName); mapped != "" {
|
||||||
|
return mapped
|
||||||
|
}
|
||||||
|
}
|
||||||
|
schemaMap := map[string]string{
|
||||||
|
"int": "int32",
|
||||||
|
"int32": "int32",
|
||||||
|
"int64": "int64",
|
||||||
|
"byte": "byte",
|
||||||
|
"uint": "integer",
|
||||||
|
"uint8": "byte",
|
||||||
|
"float64": "double",
|
||||||
|
"float32": "float",
|
||||||
|
"time.Time": "date-time",
|
||||||
|
"*time.Time": "date-time",
|
||||||
|
}
|
||||||
|
mapped, ok := schemaMap[modelName]
|
||||||
|
if !ok {
|
||||||
|
return "" // no format
|
||||||
|
}
|
||||||
|
return mapped
|
||||||
|
}
|
||||||
1283
vendor/github.com/emicklei/go-restful-swagger12/model_builder_test.go
generated
vendored
Normal file
1283
vendor/github.com/emicklei/go-restful-swagger12/model_builder_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
|
|
@ -0,0 +1,86 @@
|
||||||
|
package swagger
|
||||||
|
|
||||||
|
// Copyright 2015 Ernest Micklei. All rights reserved.
|
||||||
|
// Use of this source code is governed by a license
|
||||||
|
// that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NamedModel associates a name with a Model (not using its Id)
|
||||||
|
type NamedModel struct {
|
||||||
|
Name string
|
||||||
|
Model Model
|
||||||
|
}
|
||||||
|
|
||||||
|
// ModelList encapsulates a list of NamedModel (association)
|
||||||
|
type ModelList struct {
|
||||||
|
List []NamedModel
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put adds or replaces a Model by its name
|
||||||
|
func (l *ModelList) Put(name string, model Model) {
|
||||||
|
for i, each := range l.List {
|
||||||
|
if each.Name == name {
|
||||||
|
// replace
|
||||||
|
l.List[i] = NamedModel{name, model}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// add
|
||||||
|
l.List = append(l.List, NamedModel{name, model})
|
||||||
|
}
|
||||||
|
|
||||||
|
// At returns a Model by its name, ok is false if absent
|
||||||
|
func (l *ModelList) At(name string) (m Model, ok bool) {
|
||||||
|
for _, each := range l.List {
|
||||||
|
if each.Name == name {
|
||||||
|
return each.Model, true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return m, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do enumerates all the models, each with its assigned name
|
||||||
|
func (l *ModelList) Do(block func(name string, value Model)) {
|
||||||
|
for _, each := range l.List {
|
||||||
|
block(each.Name, each.Model)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalJSON writes the ModelList as if it was a map[string]Model
|
||||||
|
func (l ModelList) MarshalJSON() ([]byte, error) {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
encoder := json.NewEncoder(&buf)
|
||||||
|
buf.WriteString("{\n")
|
||||||
|
for i, each := range l.List {
|
||||||
|
buf.WriteString("\"")
|
||||||
|
buf.WriteString(each.Name)
|
||||||
|
buf.WriteString("\": ")
|
||||||
|
encoder.Encode(each.Model)
|
||||||
|
if i < len(l.List)-1 {
|
||||||
|
buf.WriteString(",\n")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
buf.WriteString("}")
|
||||||
|
return buf.Bytes(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON reads back a ModelList. This is an expensive operation.
|
||||||
|
func (l *ModelList) UnmarshalJSON(data []byte) error {
|
||||||
|
raw := map[string]interface{}{}
|
||||||
|
json.NewDecoder(bytes.NewReader(data)).Decode(&raw)
|
||||||
|
for k, v := range raw {
|
||||||
|
// produces JSON bytes for each value
|
||||||
|
data, err := json.Marshal(v)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
var m Model
|
||||||
|
json.NewDecoder(bytes.NewReader(data)).Decode(&m)
|
||||||
|
l.Put(k, m)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
48
vendor/github.com/emicklei/go-restful-swagger12/model_list_test.go
generated
vendored
Normal file
48
vendor/github.com/emicklei/go-restful-swagger12/model_list_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,48 @@
|
||||||
|
package swagger
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestModelList(t *testing.T) {
|
||||||
|
m := Model{}
|
||||||
|
m.Id = "m"
|
||||||
|
l := ModelList{}
|
||||||
|
l.Put("m", m)
|
||||||
|
k, ok := l.At("m")
|
||||||
|
if !ok {
|
||||||
|
t.Error("want model back")
|
||||||
|
}
|
||||||
|
if got, want := k.Id, "m"; got != want {
|
||||||
|
t.Errorf("got %v want %v", got, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestModelList_Marshal(t *testing.T) {
|
||||||
|
l := ModelList{}
|
||||||
|
m := Model{Id: "myid"}
|
||||||
|
l.Put("myid", m)
|
||||||
|
data, err := json.Marshal(l)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
if got, want := string(data), `{"myid":{"id":"myid","properties":{}}}`; got != want {
|
||||||
|
t.Errorf("got %v want %v", got, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestModelList_Unmarshal(t *testing.T) {
|
||||||
|
data := `{"myid":{"id":"myid","properties":{}}}`
|
||||||
|
l := ModelList{}
|
||||||
|
if err := json.Unmarshal([]byte(data), &l); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
m, ok := l.At("myid")
|
||||||
|
if !ok {
|
||||||
|
t.Error("expected myid")
|
||||||
|
}
|
||||||
|
if got, want := m.Id, "myid"; got != want {
|
||||||
|
t.Errorf("got %v want %v", got, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
81
vendor/github.com/emicklei/go-restful-swagger12/model_property_ext.go
generated
vendored
Normal file
81
vendor/github.com/emicklei/go-restful-swagger12/model_property_ext.go
generated
vendored
Normal file
|
|
@ -0,0 +1,81 @@
|
||||||
|
package swagger
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (prop *ModelProperty) setDescription(field reflect.StructField) {
|
||||||
|
if tag := field.Tag.Get("description"); tag != "" {
|
||||||
|
prop.Description = tag
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (prop *ModelProperty) setDefaultValue(field reflect.StructField) {
|
||||||
|
if tag := field.Tag.Get("default"); tag != "" {
|
||||||
|
prop.DefaultValue = Special(tag)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (prop *ModelProperty) setEnumValues(field reflect.StructField) {
|
||||||
|
// We use | to separate the enum values. This value is chosen
|
||||||
|
// since its unlikely to be useful in actual enumeration values.
|
||||||
|
if tag := field.Tag.Get("enum"); tag != "" {
|
||||||
|
prop.Enum = strings.Split(tag, "|")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (prop *ModelProperty) setMaximum(field reflect.StructField) {
|
||||||
|
if tag := field.Tag.Get("maximum"); tag != "" {
|
||||||
|
prop.Maximum = tag
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (prop *ModelProperty) setType(field reflect.StructField) {
|
||||||
|
if tag := field.Tag.Get("type"); tag != "" {
|
||||||
|
// Check if the first two characters of the type tag are
|
||||||
|
// intended to emulate slice/array behaviour.
|
||||||
|
//
|
||||||
|
// If type is intended to be a slice/array then add the
|
||||||
|
// overriden type to the array item instead of the main property
|
||||||
|
if len(tag) > 2 && tag[0:2] == "[]" {
|
||||||
|
pType := "array"
|
||||||
|
prop.Type = &pType
|
||||||
|
prop.Items = new(Item)
|
||||||
|
|
||||||
|
iType := tag[2:]
|
||||||
|
prop.Items.Type = &iType
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
prop.Type = &tag
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (prop *ModelProperty) setMinimum(field reflect.StructField) {
|
||||||
|
if tag := field.Tag.Get("minimum"); tag != "" {
|
||||||
|
prop.Minimum = tag
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (prop *ModelProperty) setUniqueItems(field reflect.StructField) {
|
||||||
|
tag := field.Tag.Get("unique")
|
||||||
|
switch tag {
|
||||||
|
case "true":
|
||||||
|
v := true
|
||||||
|
prop.UniqueItems = &v
|
||||||
|
case "false":
|
||||||
|
v := false
|
||||||
|
prop.UniqueItems = &v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (prop *ModelProperty) setPropertyMetadata(field reflect.StructField) {
|
||||||
|
prop.setDescription(field)
|
||||||
|
prop.setEnumValues(field)
|
||||||
|
prop.setMinimum(field)
|
||||||
|
prop.setMaximum(field)
|
||||||
|
prop.setUniqueItems(field)
|
||||||
|
prop.setDefaultValue(field)
|
||||||
|
prop.setType(field)
|
||||||
|
}
|
||||||
70
vendor/github.com/emicklei/go-restful-swagger12/model_property_ext_test.go
generated
vendored
Normal file
70
vendor/github.com/emicklei/go-restful-swagger12/model_property_ext_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,70 @@
|
||||||
|
package swagger
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
// clear && go test -v -test.run TestThatExtraTagsAreReadIntoModel ...swagger
|
||||||
|
func TestThatExtraTagsAreReadIntoModel(t *testing.T) {
|
||||||
|
type fakeint int
|
||||||
|
type fakearray string
|
||||||
|
type Anything struct {
|
||||||
|
Name string `description:"name" modelDescription:"a test"`
|
||||||
|
Size int `minimum:"0" maximum:"10"`
|
||||||
|
Stati string `enum:"off|on" default:"on" modelDescription:"more description"`
|
||||||
|
ID string `unique:"true"`
|
||||||
|
FakeInt fakeint `type:"integer"`
|
||||||
|
FakeArray fakearray `type:"[]string"`
|
||||||
|
IP net.IP `type:"string"`
|
||||||
|
Password string
|
||||||
|
}
|
||||||
|
m := modelsFromStruct(Anything{})
|
||||||
|
props, _ := m.At("swagger.Anything")
|
||||||
|
p1, _ := props.Properties.At("Name")
|
||||||
|
if got, want := p1.Description, "name"; got != want {
|
||||||
|
t.Errorf("got %v want %v", got, want)
|
||||||
|
}
|
||||||
|
p2, _ := props.Properties.At("Size")
|
||||||
|
if got, want := p2.Minimum, "0"; got != want {
|
||||||
|
t.Errorf("got %v want %v", got, want)
|
||||||
|
}
|
||||||
|
if got, want := p2.Maximum, "10"; got != want {
|
||||||
|
t.Errorf("got %v want %v", got, want)
|
||||||
|
}
|
||||||
|
p3, _ := props.Properties.At("Stati")
|
||||||
|
if got, want := p3.Enum[0], "off"; got != want {
|
||||||
|
t.Errorf("got %v want %v", got, want)
|
||||||
|
}
|
||||||
|
if got, want := p3.Enum[1], "on"; got != want {
|
||||||
|
t.Errorf("got %v want %v", got, want)
|
||||||
|
}
|
||||||
|
p4, _ := props.Properties.At("ID")
|
||||||
|
if got, want := *p4.UniqueItems, true; got != want {
|
||||||
|
t.Errorf("got %v want %v", got, want)
|
||||||
|
}
|
||||||
|
p5, _ := props.Properties.At("Password")
|
||||||
|
if got, want := *p5.Type, "string"; got != want {
|
||||||
|
t.Errorf("got %v want %v", got, want)
|
||||||
|
}
|
||||||
|
p6, _ := props.Properties.At("FakeInt")
|
||||||
|
if got, want := *p6.Type, "integer"; got != want {
|
||||||
|
t.Errorf("got %v want %v", got, want)
|
||||||
|
}
|
||||||
|
p7, _ := props.Properties.At("FakeArray")
|
||||||
|
if got, want := *p7.Type, "array"; got != want {
|
||||||
|
t.Errorf("got %v want %v", got, want)
|
||||||
|
}
|
||||||
|
p7p, _ := props.Properties.At("FakeArray")
|
||||||
|
if got, want := *p7p.Items.Type, "string"; got != want {
|
||||||
|
t.Errorf("got %v want %v", got, want)
|
||||||
|
}
|
||||||
|
p8, _ := props.Properties.At("IP")
|
||||||
|
if got, want := *p8.Type, "string"; got != want {
|
||||||
|
t.Errorf("got %v want %v", got, want)
|
||||||
|
}
|
||||||
|
|
||||||
|
if got, want := props.Description, "a test\nmore description"; got != want {
|
||||||
|
t.Errorf("got %v want %v", got, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
87
vendor/github.com/emicklei/go-restful-swagger12/model_property_list.go
generated
vendored
Normal file
87
vendor/github.com/emicklei/go-restful-swagger12/model_property_list.go
generated
vendored
Normal file
|
|
@ -0,0 +1,87 @@
|
||||||
|
package swagger
|
||||||
|
|
||||||
|
// Copyright 2015 Ernest Micklei. All rights reserved.
|
||||||
|
// Use of this source code is governed by a license
|
||||||
|
// that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NamedModelProperty associates a name to a ModelProperty
|
||||||
|
type NamedModelProperty struct {
|
||||||
|
Name string
|
||||||
|
Property ModelProperty
|
||||||
|
}
|
||||||
|
|
||||||
|
// ModelPropertyList encapsulates a list of NamedModelProperty (association)
|
||||||
|
type ModelPropertyList struct {
|
||||||
|
List []NamedModelProperty
|
||||||
|
}
|
||||||
|
|
||||||
|
// At returns the ModelPropety by its name unless absent, then ok is false
|
||||||
|
func (l *ModelPropertyList) At(name string) (p ModelProperty, ok bool) {
|
||||||
|
for _, each := range l.List {
|
||||||
|
if each.Name == name {
|
||||||
|
return each.Property, true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return p, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put adds or replaces a ModelProperty with this name
|
||||||
|
func (l *ModelPropertyList) Put(name string, prop ModelProperty) {
|
||||||
|
// maybe replace existing
|
||||||
|
for i, each := range l.List {
|
||||||
|
if each.Name == name {
|
||||||
|
// replace
|
||||||
|
l.List[i] = NamedModelProperty{Name: name, Property: prop}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// add
|
||||||
|
l.List = append(l.List, NamedModelProperty{Name: name, Property: prop})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do enumerates all the properties, each with its assigned name
|
||||||
|
func (l *ModelPropertyList) Do(block func(name string, value ModelProperty)) {
|
||||||
|
for _, each := range l.List {
|
||||||
|
block(each.Name, each.Property)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalJSON writes the ModelPropertyList as if it was a map[string]ModelProperty
|
||||||
|
func (l ModelPropertyList) MarshalJSON() ([]byte, error) {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
encoder := json.NewEncoder(&buf)
|
||||||
|
buf.WriteString("{\n")
|
||||||
|
for i, each := range l.List {
|
||||||
|
buf.WriteString("\"")
|
||||||
|
buf.WriteString(each.Name)
|
||||||
|
buf.WriteString("\": ")
|
||||||
|
encoder.Encode(each.Property)
|
||||||
|
if i < len(l.List)-1 {
|
||||||
|
buf.WriteString(",\n")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
buf.WriteString("}")
|
||||||
|
return buf.Bytes(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON reads back a ModelPropertyList. This is an expensive operation.
|
||||||
|
func (l *ModelPropertyList) UnmarshalJSON(data []byte) error {
|
||||||
|
raw := map[string]interface{}{}
|
||||||
|
json.NewDecoder(bytes.NewReader(data)).Decode(&raw)
|
||||||
|
for k, v := range raw {
|
||||||
|
// produces JSON bytes for each value
|
||||||
|
data, err := json.Marshal(v)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
var m ModelProperty
|
||||||
|
json.NewDecoder(bytes.NewReader(data)).Decode(&m)
|
||||||
|
l.Put(k, m)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
47
vendor/github.com/emicklei/go-restful-swagger12/model_property_list_test.go
generated
vendored
Normal file
47
vendor/github.com/emicklei/go-restful-swagger12/model_property_list_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,47 @@
|
||||||
|
package swagger
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestModelPropertyList(t *testing.T) {
|
||||||
|
l := ModelPropertyList{}
|
||||||
|
p := ModelProperty{Description: "d"}
|
||||||
|
l.Put("p", p)
|
||||||
|
q, ok := l.At("p")
|
||||||
|
if !ok {
|
||||||
|
t.Error("expected p")
|
||||||
|
}
|
||||||
|
if got, want := q.Description, "d"; got != want {
|
||||||
|
t.Errorf("got %v want %v", got, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestModelPropertyList_Marshal(t *testing.T) {
|
||||||
|
l := ModelPropertyList{}
|
||||||
|
p := ModelProperty{Description: "d"}
|
||||||
|
l.Put("p", p)
|
||||||
|
data, err := json.Marshal(l)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
if got, want := string(data), `{"p":{"description":"d"}}`; got != want {
|
||||||
|
t.Errorf("got %v want %v", got, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestModelPropertyList_Unmarshal(t *testing.T) {
|
||||||
|
data := `{"p":{"description":"d"}}`
|
||||||
|
l := ModelPropertyList{}
|
||||||
|
if err := json.Unmarshal([]byte(data), &l); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
m, ok := l.At("p")
|
||||||
|
if !ok {
|
||||||
|
t.Error("expected p")
|
||||||
|
}
|
||||||
|
if got, want := m.Description, "d"; got != want {
|
||||||
|
t.Errorf("got %v want %v", got, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
36
vendor/github.com/emicklei/go-restful-swagger12/ordered_route_map.go
generated
vendored
Normal file
36
vendor/github.com/emicklei/go-restful-swagger12/ordered_route_map.go
generated
vendored
Normal file
|
|
@ -0,0 +1,36 @@
|
||||||
|
package swagger
|
||||||
|
|
||||||
|
// Copyright 2015 Ernest Micklei. All rights reserved.
|
||||||
|
// Use of this source code is governed by a license
|
||||||
|
// that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
import "github.com/emicklei/go-restful"
|
||||||
|
|
||||||
|
type orderedRouteMap struct {
|
||||||
|
elements map[string][]restful.Route
|
||||||
|
keys []string
|
||||||
|
}
|
||||||
|
|
||||||
|
func newOrderedRouteMap() *orderedRouteMap {
|
||||||
|
return &orderedRouteMap{
|
||||||
|
elements: map[string][]restful.Route{},
|
||||||
|
keys: []string{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *orderedRouteMap) Add(key string, route restful.Route) {
|
||||||
|
routes, ok := o.elements[key]
|
||||||
|
if ok {
|
||||||
|
routes = append(routes, route)
|
||||||
|
o.elements[key] = routes
|
||||||
|
return
|
||||||
|
}
|
||||||
|
o.elements[key] = []restful.Route{route}
|
||||||
|
o.keys = append(o.keys, key)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *orderedRouteMap) Do(block func(key string, routes []restful.Route)) {
|
||||||
|
for _, k := range o.keys {
|
||||||
|
block(k, o.elements[k])
|
||||||
|
}
|
||||||
|
}
|
||||||
29
vendor/github.com/emicklei/go-restful-swagger12/ordered_route_map_test.go
generated
vendored
Normal file
29
vendor/github.com/emicklei/go-restful-swagger12/ordered_route_map_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,29 @@
|
||||||
|
package swagger
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/emicklei/go-restful"
|
||||||
|
)
|
||||||
|
|
||||||
|
// go test -v -test.run TestOrderedRouteMap ...swagger
|
||||||
|
func TestOrderedRouteMap(t *testing.T) {
|
||||||
|
m := newOrderedRouteMap()
|
||||||
|
r1 := restful.Route{Path: "/r1"}
|
||||||
|
r2 := restful.Route{Path: "/r2"}
|
||||||
|
m.Add("a", r1)
|
||||||
|
m.Add("b", r2)
|
||||||
|
m.Add("b", r1)
|
||||||
|
m.Add("d", r2)
|
||||||
|
m.Add("c", r2)
|
||||||
|
order := ""
|
||||||
|
m.Do(func(k string, routes []restful.Route) {
|
||||||
|
order += k
|
||||||
|
if len(routes) == 0 {
|
||||||
|
t.Fail()
|
||||||
|
}
|
||||||
|
})
|
||||||
|
if order != "abdc" {
|
||||||
|
t.Fail()
|
||||||
|
}
|
||||||
|
}
|
||||||
42
vendor/github.com/emicklei/go-restful-swagger12/postbuild_model_test.go
generated
vendored
Normal file
42
vendor/github.com/emicklei/go-restful-swagger12/postbuild_model_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,42 @@
|
||||||
|
package swagger
|
||||||
|
|
||||||
|
import "testing"
|
||||||
|
|
||||||
|
type Boat struct {
|
||||||
|
Length int `json:"-"` // on default, this makes the fields not required
|
||||||
|
Weight int `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// PostBuildModel is from swagger.ModelBuildable
|
||||||
|
func (b Boat) PostBuildModel(m *Model) *Model {
|
||||||
|
// override required
|
||||||
|
m.Required = []string{"Length", "Weight"}
|
||||||
|
|
||||||
|
// add model property (just to test is can be added; is this a real usecase?)
|
||||||
|
extraType := "string"
|
||||||
|
m.Properties.Put("extra", ModelProperty{
|
||||||
|
Description: "extra description",
|
||||||
|
DataTypeFields: DataTypeFields{
|
||||||
|
Type: &extraType,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCustomPostModelBuilde(t *testing.T) {
|
||||||
|
testJsonFromStruct(t, Boat{}, `{
|
||||||
|
"swagger.Boat": {
|
||||||
|
"id": "swagger.Boat",
|
||||||
|
"required": [
|
||||||
|
"Length",
|
||||||
|
"Weight"
|
||||||
|
],
|
||||||
|
"properties": {
|
||||||
|
"extra": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "extra description"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}`)
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,185 @@
|
||||||
|
// Package swagger implements the structures of the Swagger
|
||||||
|
// https://github.com/wordnik/swagger-spec/blob/master/versions/1.2.md
|
||||||
|
package swagger
|
||||||
|
|
||||||
|
const swaggerVersion = "1.2"
|
||||||
|
|
||||||
|
// 4.3.3 Data Type Fields
|
||||||
|
type DataTypeFields struct {
|
||||||
|
Type *string `json:"type,omitempty"` // if Ref not used
|
||||||
|
Ref *string `json:"$ref,omitempty"` // if Type not used
|
||||||
|
Format string `json:"format,omitempty"`
|
||||||
|
DefaultValue Special `json:"defaultValue,omitempty"`
|
||||||
|
Enum []string `json:"enum,omitempty"`
|
||||||
|
Minimum string `json:"minimum,omitempty"`
|
||||||
|
Maximum string `json:"maximum,omitempty"`
|
||||||
|
Items *Item `json:"items,omitempty"`
|
||||||
|
UniqueItems *bool `json:"uniqueItems,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Special string
|
||||||
|
|
||||||
|
// 4.3.4 Items Object
|
||||||
|
type Item struct {
|
||||||
|
Type *string `json:"type,omitempty"`
|
||||||
|
Ref *string `json:"$ref,omitempty"`
|
||||||
|
Format string `json:"format,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// 5.1 Resource Listing
|
||||||
|
type ResourceListing struct {
|
||||||
|
SwaggerVersion string `json:"swaggerVersion"` // e.g 1.2
|
||||||
|
Apis []Resource `json:"apis"`
|
||||||
|
ApiVersion string `json:"apiVersion"`
|
||||||
|
Info Info `json:"info"`
|
||||||
|
Authorizations []Authorization `json:"authorizations,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// 5.1.2 Resource Object
|
||||||
|
type Resource struct {
|
||||||
|
Path string `json:"path"` // relative or absolute, must start with /
|
||||||
|
Description string `json:"description"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// 5.1.3 Info Object
|
||||||
|
type Info struct {
|
||||||
|
Title string `json:"title"`
|
||||||
|
Description string `json:"description"`
|
||||||
|
TermsOfServiceUrl string `json:"termsOfServiceUrl,omitempty"`
|
||||||
|
Contact string `json:"contact,omitempty"`
|
||||||
|
License string `json:"license,omitempty"`
|
||||||
|
LicenseUrl string `json:"licenseUrl,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// 5.1.5
|
||||||
|
type Authorization struct {
|
||||||
|
Type string `json:"type"`
|
||||||
|
PassAs string `json:"passAs"`
|
||||||
|
Keyname string `json:"keyname"`
|
||||||
|
Scopes []Scope `json:"scopes"`
|
||||||
|
GrantTypes []GrantType `json:"grandTypes"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// 5.1.6, 5.2.11
|
||||||
|
type Scope struct {
|
||||||
|
// Required. The name of the scope.
|
||||||
|
Scope string `json:"scope"`
|
||||||
|
// Recommended. A short description of the scope.
|
||||||
|
Description string `json:"description"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// 5.1.7
|
||||||
|
type GrantType struct {
|
||||||
|
Implicit Implicit `json:"implicit"`
|
||||||
|
AuthorizationCode AuthorizationCode `json:"authorization_code"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// 5.1.8 Implicit Object
|
||||||
|
type Implicit struct {
|
||||||
|
// Required. The login endpoint definition.
|
||||||
|
loginEndpoint LoginEndpoint `json:"loginEndpoint"`
|
||||||
|
// An optional alternative name to standard "access_token" OAuth2 parameter.
|
||||||
|
TokenName string `json:"tokenName"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// 5.1.9 Authorization Code Object
|
||||||
|
type AuthorizationCode struct {
|
||||||
|
TokenRequestEndpoint TokenRequestEndpoint `json:"tokenRequestEndpoint"`
|
||||||
|
TokenEndpoint TokenEndpoint `json:"tokenEndpoint"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// 5.1.10 Login Endpoint Object
|
||||||
|
type LoginEndpoint struct {
|
||||||
|
// Required. The URL of the authorization endpoint for the implicit grant flow. The value SHOULD be in a URL format.
|
||||||
|
Url string `json:"url"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// 5.1.11 Token Request Endpoint Object
|
||||||
|
type TokenRequestEndpoint struct {
|
||||||
|
// Required. The URL of the authorization endpoint for the authentication code grant flow. The value SHOULD be in a URL format.
|
||||||
|
Url string `json:"url"`
|
||||||
|
// An optional alternative name to standard "client_id" OAuth2 parameter.
|
||||||
|
ClientIdName string `json:"clientIdName"`
|
||||||
|
// An optional alternative name to the standard "client_secret" OAuth2 parameter.
|
||||||
|
ClientSecretName string `json:"clientSecretName"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// 5.1.12 Token Endpoint Object
|
||||||
|
type TokenEndpoint struct {
|
||||||
|
// Required. The URL of the token endpoint for the authentication code grant flow. The value SHOULD be in a URL format.
|
||||||
|
Url string `json:"url"`
|
||||||
|
// An optional alternative name to standard "access_token" OAuth2 parameter.
|
||||||
|
TokenName string `json:"tokenName"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// 5.2 API Declaration
|
||||||
|
type ApiDeclaration struct {
|
||||||
|
SwaggerVersion string `json:"swaggerVersion"`
|
||||||
|
ApiVersion string `json:"apiVersion"`
|
||||||
|
BasePath string `json:"basePath"`
|
||||||
|
ResourcePath string `json:"resourcePath"` // must start with /
|
||||||
|
Info Info `json:"info"`
|
||||||
|
Apis []Api `json:"apis,omitempty"`
|
||||||
|
Models ModelList `json:"models,omitempty"`
|
||||||
|
Produces []string `json:"produces,omitempty"`
|
||||||
|
Consumes []string `json:"consumes,omitempty"`
|
||||||
|
Authorizations []Authorization `json:"authorizations,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// 5.2.2 API Object
|
||||||
|
type Api struct {
|
||||||
|
Path string `json:"path"` // relative or absolute, must start with /
|
||||||
|
Description string `json:"description"`
|
||||||
|
Operations []Operation `json:"operations,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// 5.2.3 Operation Object
|
||||||
|
type Operation struct {
|
||||||
|
DataTypeFields
|
||||||
|
Method string `json:"method"`
|
||||||
|
Summary string `json:"summary,omitempty"`
|
||||||
|
Notes string `json:"notes,omitempty"`
|
||||||
|
Nickname string `json:"nickname"`
|
||||||
|
Authorizations []Authorization `json:"authorizations,omitempty"`
|
||||||
|
Parameters []Parameter `json:"parameters"`
|
||||||
|
ResponseMessages []ResponseMessage `json:"responseMessages,omitempty"` // optional
|
||||||
|
Produces []string `json:"produces,omitempty"`
|
||||||
|
Consumes []string `json:"consumes,omitempty"`
|
||||||
|
Deprecated string `json:"deprecated,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// 5.2.4 Parameter Object
|
||||||
|
type Parameter struct {
|
||||||
|
DataTypeFields
|
||||||
|
ParamType string `json:"paramType"` // path,query,body,header,form
|
||||||
|
Name string `json:"name"`
|
||||||
|
Description string `json:"description"`
|
||||||
|
Required bool `json:"required"`
|
||||||
|
AllowMultiple bool `json:"allowMultiple"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// 5.2.5 Response Message Object
|
||||||
|
type ResponseMessage struct {
|
||||||
|
Code int `json:"code"`
|
||||||
|
Message string `json:"message"`
|
||||||
|
ResponseModel string `json:"responseModel,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// 5.2.6, 5.2.7 Models Object
|
||||||
|
type Model struct {
|
||||||
|
Id string `json:"id"`
|
||||||
|
Description string `json:"description,omitempty"`
|
||||||
|
Required []string `json:"required,omitempty"`
|
||||||
|
Properties ModelPropertyList `json:"properties"`
|
||||||
|
SubTypes []string `json:"subTypes,omitempty"`
|
||||||
|
Discriminator string `json:"discriminator,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// 5.2.8 Properties Object
|
||||||
|
type ModelProperty struct {
|
||||||
|
DataTypeFields
|
||||||
|
Description string `json:"description,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// 5.2.10
|
||||||
|
type Authorizations map[string]Authorization
|
||||||
21
vendor/github.com/emicklei/go-restful-swagger12/swagger_builder.go
generated
vendored
Normal file
21
vendor/github.com/emicklei/go-restful-swagger12/swagger_builder.go
generated
vendored
Normal file
|
|
@ -0,0 +1,21 @@
|
||||||
|
package swagger
|
||||||
|
|
||||||
|
type SwaggerBuilder struct {
|
||||||
|
SwaggerService
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewSwaggerBuilder(config Config) *SwaggerBuilder {
|
||||||
|
return &SwaggerBuilder{*newSwaggerService(config)}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sb SwaggerBuilder) ProduceListing() ResourceListing {
|
||||||
|
return sb.SwaggerService.produceListing()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sb SwaggerBuilder) ProduceAllDeclarations() map[string]ApiDeclaration {
|
||||||
|
return sb.SwaggerService.produceAllDeclarations()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sb SwaggerBuilder) ProduceDeclarations(route string) (*ApiDeclaration, bool) {
|
||||||
|
return sb.SwaggerService.produceDeclarations(route)
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,318 @@
|
||||||
|
package swagger
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/emicklei/go-restful"
|
||||||
|
"github.com/emicklei/go-restful-swagger12/test_package"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestInfoStruct_Issue231(t *testing.T) {
|
||||||
|
config := Config{
|
||||||
|
Info: Info{
|
||||||
|
Title: "Title",
|
||||||
|
Description: "Description",
|
||||||
|
TermsOfServiceUrl: "http://example.com",
|
||||||
|
Contact: "example@example.com",
|
||||||
|
License: "License",
|
||||||
|
LicenseUrl: "http://example.com/license.txt",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
sws := newSwaggerService(config)
|
||||||
|
str, err := json.MarshalIndent(sws.produceListing(), "", " ")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
compareJson(t, string(str), `
|
||||||
|
{
|
||||||
|
"apiVersion": "",
|
||||||
|
"swaggerVersion": "1.2",
|
||||||
|
"apis": null,
|
||||||
|
"info": {
|
||||||
|
"title": "Title",
|
||||||
|
"description": "Description",
|
||||||
|
"termsOfServiceUrl": "http://example.com",
|
||||||
|
"contact": "example@example.com",
|
||||||
|
"license": "License",
|
||||||
|
"licenseUrl": "http://example.com/license.txt"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`)
|
||||||
|
}
|
||||||
|
|
||||||
|
// go test -v -test.run TestThatMultiplePathsOnRootAreHandled ...swagger
|
||||||
|
func TestThatMultiplePathsOnRootAreHandled(t *testing.T) {
|
||||||
|
ws1 := new(restful.WebService)
|
||||||
|
ws1.Route(ws1.GET("/_ping").To(dummy))
|
||||||
|
ws1.Route(ws1.GET("/version").To(dummy))
|
||||||
|
|
||||||
|
cfg := Config{
|
||||||
|
WebServicesUrl: "http://here.com",
|
||||||
|
ApiPath: "/apipath",
|
||||||
|
WebServices: []*restful.WebService{ws1},
|
||||||
|
}
|
||||||
|
sws := newSwaggerService(cfg)
|
||||||
|
decl := sws.composeDeclaration(ws1, "/")
|
||||||
|
if got, want := len(decl.Apis), 2; got != want {
|
||||||
|
t.Errorf("got %v want %v", got, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWriteSamples(t *testing.T) {
|
||||||
|
ws1 := new(restful.WebService)
|
||||||
|
ws1.Route(ws1.GET("/object").To(dummy).Writes(test_package.TestStruct{}))
|
||||||
|
ws1.Route(ws1.GET("/array").To(dummy).Writes([]test_package.TestStruct{}))
|
||||||
|
ws1.Route(ws1.GET("/object_and_array").To(dummy).Writes(struct{ Abc test_package.TestStruct }{}))
|
||||||
|
|
||||||
|
cfg := Config{
|
||||||
|
WebServicesUrl: "http://here.com",
|
||||||
|
ApiPath: "/apipath",
|
||||||
|
WebServices: []*restful.WebService{ws1},
|
||||||
|
}
|
||||||
|
sws := newSwaggerService(cfg)
|
||||||
|
|
||||||
|
decl := sws.composeDeclaration(ws1, "/")
|
||||||
|
|
||||||
|
str, err := json.MarshalIndent(decl.Apis, "", " ")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
compareJson(t, string(str), `
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"path": "/object",
|
||||||
|
"description": "",
|
||||||
|
"operations": [
|
||||||
|
{
|
||||||
|
"type": "test_package.TestStruct",
|
||||||
|
"method": "GET",
|
||||||
|
"nickname": "dummy",
|
||||||
|
"parameters": []
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"path": "/array",
|
||||||
|
"description": "",
|
||||||
|
"operations": [
|
||||||
|
{
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"$ref": "test_package.TestStruct"
|
||||||
|
},
|
||||||
|
"method": "GET",
|
||||||
|
"nickname": "dummy",
|
||||||
|
"parameters": []
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"path": "/object_and_array",
|
||||||
|
"description": "",
|
||||||
|
"operations": [
|
||||||
|
{
|
||||||
|
"type": "struct { Abc test_package.TestStruct }",
|
||||||
|
"method": "GET",
|
||||||
|
"nickname": "dummy",
|
||||||
|
"parameters": []
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]`)
|
||||||
|
|
||||||
|
str, err = json.MarshalIndent(decl.Models, "", " ")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
compareJson(t, string(str), `
|
||||||
|
{
|
||||||
|
"test_package.TestStruct": {
|
||||||
|
"id": "test_package.TestStruct",
|
||||||
|
"required": [
|
||||||
|
"TestField"
|
||||||
|
],
|
||||||
|
"properties": {
|
||||||
|
"TestField": {
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"||test_package.TestStruct": {
|
||||||
|
"id": "||test_package.TestStruct",
|
||||||
|
"properties": {}
|
||||||
|
},
|
||||||
|
"struct { Abc test_package.TestStruct }": {
|
||||||
|
"id": "struct { Abc test_package.TestStruct }",
|
||||||
|
"required": [
|
||||||
|
"Abc"
|
||||||
|
],
|
||||||
|
"properties": {
|
||||||
|
"Abc": {
|
||||||
|
"$ref": "test_package.TestStruct"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}`)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRoutesWithCommonPart(t *testing.T) {
|
||||||
|
ws1 := new(restful.WebService)
|
||||||
|
ws1.Path("/")
|
||||||
|
ws1.Route(ws1.GET("/foobar").To(dummy).Writes(test_package.TestStruct{}))
|
||||||
|
ws1.Route(ws1.HEAD("/foobar").To(dummy).Writes(test_package.TestStruct{}))
|
||||||
|
ws1.Route(ws1.GET("/foo").To(dummy).Writes([]test_package.TestStruct{}))
|
||||||
|
ws1.Route(ws1.HEAD("/foo").To(dummy).Writes(test_package.TestStruct{}))
|
||||||
|
|
||||||
|
cfg := Config{
|
||||||
|
WebServicesUrl: "http://here.com",
|
||||||
|
ApiPath: "/apipath",
|
||||||
|
WebServices: []*restful.WebService{ws1},
|
||||||
|
}
|
||||||
|
sws := newSwaggerService(cfg)
|
||||||
|
|
||||||
|
decl := sws.composeDeclaration(ws1, "/foo")
|
||||||
|
|
||||||
|
str, err := json.MarshalIndent(decl.Apis, "", " ")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
compareJson(t, string(str), `[
|
||||||
|
{
|
||||||
|
"path": "/foo",
|
||||||
|
"description": "",
|
||||||
|
"operations": [
|
||||||
|
{
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"$ref": "test_package.TestStruct"
|
||||||
|
},
|
||||||
|
"method": "GET",
|
||||||
|
"nickname": "dummy",
|
||||||
|
"parameters": []
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "test_package.TestStruct",
|
||||||
|
"method": "HEAD",
|
||||||
|
"nickname": "dummy",
|
||||||
|
"parameters": []
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]`)
|
||||||
|
}
|
||||||
|
|
||||||
|
// go test -v -test.run TestServiceToApi ...swagger
|
||||||
|
func TestServiceToApi(t *testing.T) {
|
||||||
|
ws := new(restful.WebService)
|
||||||
|
ws.Path("/tests")
|
||||||
|
ws.Consumes(restful.MIME_JSON)
|
||||||
|
ws.Produces(restful.MIME_XML)
|
||||||
|
ws.Route(ws.GET("/a").To(dummy).Writes(sample{}))
|
||||||
|
ws.Route(ws.PUT("/b").To(dummy).Writes(sample{}))
|
||||||
|
ws.Route(ws.POST("/c").To(dummy).Writes(sample{}))
|
||||||
|
ws.Route(ws.DELETE("/d").To(dummy).Writes(sample{}))
|
||||||
|
|
||||||
|
ws.Route(ws.GET("/d").To(dummy).Writes(sample{}))
|
||||||
|
ws.Route(ws.PUT("/c").To(dummy).Writes(sample{}))
|
||||||
|
ws.Route(ws.POST("/b").To(dummy).Writes(sample{}))
|
||||||
|
ws.Route(ws.DELETE("/a").To(dummy).Writes(sample{}))
|
||||||
|
ws.ApiVersion("1.2.3")
|
||||||
|
cfg := Config{
|
||||||
|
WebServicesUrl: "http://here.com",
|
||||||
|
ApiPath: "/apipath",
|
||||||
|
WebServices: []*restful.WebService{ws},
|
||||||
|
PostBuildHandler: func(in *ApiDeclarationList) {},
|
||||||
|
}
|
||||||
|
sws := newSwaggerService(cfg)
|
||||||
|
decl := sws.composeDeclaration(ws, "/tests")
|
||||||
|
// checks
|
||||||
|
if decl.ApiVersion != "1.2.3" {
|
||||||
|
t.Errorf("got %v want %v", decl.ApiVersion, "1.2.3")
|
||||||
|
}
|
||||||
|
if decl.BasePath != "http://here.com" {
|
||||||
|
t.Errorf("got %v want %v", decl.BasePath, "http://here.com")
|
||||||
|
}
|
||||||
|
if len(decl.Apis) != 4 {
|
||||||
|
t.Errorf("got %v want %v", len(decl.Apis), 4)
|
||||||
|
}
|
||||||
|
pathOrder := ""
|
||||||
|
for _, each := range decl.Apis {
|
||||||
|
pathOrder += each.Path
|
||||||
|
for _, other := range each.Operations {
|
||||||
|
pathOrder += other.Method
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if pathOrder != "/tests/aGETDELETE/tests/bPUTPOST/tests/cPOSTPUT/tests/dDELETEGET" {
|
||||||
|
t.Errorf("got %v want %v", pathOrder, "see test source")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func dummy(i *restful.Request, o *restful.Response) {}
|
||||||
|
|
||||||
|
// go test -v -test.run TestIssue78 ...swagger
|
||||||
|
type Response struct {
|
||||||
|
Code int
|
||||||
|
Users *[]User
|
||||||
|
Items *[]TestItem
|
||||||
|
}
|
||||||
|
type User struct {
|
||||||
|
Id, Name string
|
||||||
|
}
|
||||||
|
type TestItem struct {
|
||||||
|
Id, Name string
|
||||||
|
}
|
||||||
|
|
||||||
|
// clear && go test -v -test.run TestComposeResponseMessages ...swagger
|
||||||
|
func TestComposeResponseMessages(t *testing.T) {
|
||||||
|
responseErrors := map[int]restful.ResponseError{}
|
||||||
|
responseErrors[400] = restful.ResponseError{Code: 400, Message: "Bad Request", Model: TestItem{}}
|
||||||
|
route := restful.Route{ResponseErrors: responseErrors}
|
||||||
|
decl := new(ApiDeclaration)
|
||||||
|
decl.Models = ModelList{}
|
||||||
|
msgs := composeResponseMessages(route, decl, &Config{})
|
||||||
|
if msgs[0].ResponseModel != "swagger.TestItem" {
|
||||||
|
t.Errorf("got %s want swagger.TestItem", msgs[0].ResponseModel)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIssue78(t *testing.T) {
|
||||||
|
sws := newSwaggerService(Config{})
|
||||||
|
models := new(ModelList)
|
||||||
|
sws.addModelFromSampleTo(&Operation{}, true, Response{Items: &[]TestItem{}}, models)
|
||||||
|
model, ok := models.At("swagger.Response")
|
||||||
|
if !ok {
|
||||||
|
t.Fatal("missing response model")
|
||||||
|
}
|
||||||
|
if "swagger.Response" != model.Id {
|
||||||
|
t.Fatal("wrong model id:" + model.Id)
|
||||||
|
}
|
||||||
|
code, ok := model.Properties.At("Code")
|
||||||
|
if !ok {
|
||||||
|
t.Fatal("missing code")
|
||||||
|
}
|
||||||
|
if "integer" != *code.Type {
|
||||||
|
t.Fatal("wrong code type:" + *code.Type)
|
||||||
|
}
|
||||||
|
items, ok := model.Properties.At("Items")
|
||||||
|
if !ok {
|
||||||
|
t.Fatal("missing items")
|
||||||
|
}
|
||||||
|
if "array" != *items.Type {
|
||||||
|
t.Fatal("wrong items type:" + *items.Type)
|
||||||
|
}
|
||||||
|
items_items := items.Items
|
||||||
|
if items_items == nil {
|
||||||
|
t.Fatal("missing items->items")
|
||||||
|
}
|
||||||
|
ref := items_items.Ref
|
||||||
|
if ref == nil {
|
||||||
|
t.Fatal("missing $ref")
|
||||||
|
}
|
||||||
|
if *ref != "swagger.TestItem" {
|
||||||
|
t.Fatal("wrong $ref:" + *ref)
|
||||||
|
}
|
||||||
|
}
|
||||||
443
vendor/github.com/emicklei/go-restful-swagger12/swagger_webservice.go
generated
vendored
Normal file
443
vendor/github.com/emicklei/go-restful-swagger12/swagger_webservice.go
generated
vendored
Normal file
|
|
@ -0,0 +1,443 @@
|
||||||
|
package swagger
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/emicklei/go-restful"
|
||||||
|
// "github.com/emicklei/hopwatch"
|
||||||
|
"net/http"
|
||||||
|
"reflect"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/emicklei/go-restful/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
type SwaggerService struct {
|
||||||
|
config Config
|
||||||
|
apiDeclarationMap *ApiDeclarationList
|
||||||
|
}
|
||||||
|
|
||||||
|
func newSwaggerService(config Config) *SwaggerService {
|
||||||
|
sws := &SwaggerService{
|
||||||
|
config: config,
|
||||||
|
apiDeclarationMap: new(ApiDeclarationList)}
|
||||||
|
|
||||||
|
// Build all ApiDeclarations
|
||||||
|
for _, each := range config.WebServices {
|
||||||
|
rootPath := each.RootPath()
|
||||||
|
// skip the api service itself
|
||||||
|
if rootPath != config.ApiPath {
|
||||||
|
if rootPath == "" || rootPath == "/" {
|
||||||
|
// use routes
|
||||||
|
for _, route := range each.Routes() {
|
||||||
|
entry := staticPathFromRoute(route)
|
||||||
|
_, exists := sws.apiDeclarationMap.At(entry)
|
||||||
|
if !exists {
|
||||||
|
sws.apiDeclarationMap.Put(entry, sws.composeDeclaration(each, entry))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else { // use root path
|
||||||
|
sws.apiDeclarationMap.Put(each.RootPath(), sws.composeDeclaration(each, each.RootPath()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// if specified then call the PostBuilderHandler
|
||||||
|
if config.PostBuildHandler != nil {
|
||||||
|
config.PostBuildHandler(sws.apiDeclarationMap)
|
||||||
|
}
|
||||||
|
return sws
|
||||||
|
}
|
||||||
|
|
||||||
|
// LogInfo is the function that is called when this package needs to log. It defaults to log.Printf
|
||||||
|
var LogInfo = func(format string, v ...interface{}) {
|
||||||
|
// use the restful package-wide logger
|
||||||
|
log.Printf(format, v...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// InstallSwaggerService add the WebService that provides the API documentation of all services
|
||||||
|
// conform the Swagger documentation specifcation. (https://github.com/wordnik/swagger-core/wiki).
|
||||||
|
func InstallSwaggerService(aSwaggerConfig Config) {
|
||||||
|
RegisterSwaggerService(aSwaggerConfig, restful.DefaultContainer)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegisterSwaggerService add the WebService that provides the API documentation of all services
|
||||||
|
// conform the Swagger documentation specifcation. (https://github.com/wordnik/swagger-core/wiki).
|
||||||
|
func RegisterSwaggerService(config Config, wsContainer *restful.Container) {
|
||||||
|
sws := newSwaggerService(config)
|
||||||
|
ws := new(restful.WebService)
|
||||||
|
ws.Path(config.ApiPath)
|
||||||
|
ws.Produces(restful.MIME_JSON)
|
||||||
|
if config.DisableCORS {
|
||||||
|
ws.Filter(enableCORS)
|
||||||
|
}
|
||||||
|
ws.Route(ws.GET("/").To(sws.getListing))
|
||||||
|
ws.Route(ws.GET("/{a}").To(sws.getDeclarations))
|
||||||
|
ws.Route(ws.GET("/{a}/{b}").To(sws.getDeclarations))
|
||||||
|
ws.Route(ws.GET("/{a}/{b}/{c}").To(sws.getDeclarations))
|
||||||
|
ws.Route(ws.GET("/{a}/{b}/{c}/{d}").To(sws.getDeclarations))
|
||||||
|
ws.Route(ws.GET("/{a}/{b}/{c}/{d}/{e}").To(sws.getDeclarations))
|
||||||
|
ws.Route(ws.GET("/{a}/{b}/{c}/{d}/{e}/{f}").To(sws.getDeclarations))
|
||||||
|
ws.Route(ws.GET("/{a}/{b}/{c}/{d}/{e}/{f}/{g}").To(sws.getDeclarations))
|
||||||
|
LogInfo("[restful/swagger] listing is available at %v%v", config.WebServicesUrl, config.ApiPath)
|
||||||
|
wsContainer.Add(ws)
|
||||||
|
|
||||||
|
// Check paths for UI serving
|
||||||
|
if config.StaticHandler == nil && config.SwaggerFilePath != "" && config.SwaggerPath != "" {
|
||||||
|
swaggerPathSlash := config.SwaggerPath
|
||||||
|
// path must end with slash /
|
||||||
|
if "/" != config.SwaggerPath[len(config.SwaggerPath)-1:] {
|
||||||
|
LogInfo("[restful/swagger] use corrected SwaggerPath ; must end with slash (/)")
|
||||||
|
swaggerPathSlash += "/"
|
||||||
|
}
|
||||||
|
|
||||||
|
LogInfo("[restful/swagger] %v%v is mapped to folder %v", config.WebServicesUrl, swaggerPathSlash, config.SwaggerFilePath)
|
||||||
|
wsContainer.Handle(swaggerPathSlash, http.StripPrefix(swaggerPathSlash, http.FileServer(http.Dir(config.SwaggerFilePath))))
|
||||||
|
|
||||||
|
//if we define a custom static handler use it
|
||||||
|
} else if config.StaticHandler != nil && config.SwaggerPath != "" {
|
||||||
|
swaggerPathSlash := config.SwaggerPath
|
||||||
|
// path must end with slash /
|
||||||
|
if "/" != config.SwaggerPath[len(config.SwaggerPath)-1:] {
|
||||||
|
LogInfo("[restful/swagger] use corrected SwaggerFilePath ; must end with slash (/)")
|
||||||
|
swaggerPathSlash += "/"
|
||||||
|
|
||||||
|
}
|
||||||
|
LogInfo("[restful/swagger] %v%v is mapped to custom Handler %T", config.WebServicesUrl, swaggerPathSlash, config.StaticHandler)
|
||||||
|
wsContainer.Handle(swaggerPathSlash, config.StaticHandler)
|
||||||
|
|
||||||
|
} else {
|
||||||
|
LogInfo("[restful/swagger] Swagger(File)Path is empty ; no UI is served")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func staticPathFromRoute(r restful.Route) string {
|
||||||
|
static := r.Path
|
||||||
|
bracket := strings.Index(static, "{")
|
||||||
|
if bracket <= 1 { // result cannot be empty
|
||||||
|
return static
|
||||||
|
}
|
||||||
|
if bracket != -1 {
|
||||||
|
static = r.Path[:bracket]
|
||||||
|
}
|
||||||
|
if strings.HasSuffix(static, "/") {
|
||||||
|
return static[:len(static)-1]
|
||||||
|
} else {
|
||||||
|
return static
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func enableCORS(req *restful.Request, resp *restful.Response, chain *restful.FilterChain) {
|
||||||
|
if origin := req.HeaderParameter(restful.HEADER_Origin); origin != "" {
|
||||||
|
// prevent duplicate header
|
||||||
|
if len(resp.Header().Get(restful.HEADER_AccessControlAllowOrigin)) == 0 {
|
||||||
|
resp.AddHeader(restful.HEADER_AccessControlAllowOrigin, origin)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
chain.ProcessFilter(req, resp)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sws SwaggerService) getListing(req *restful.Request, resp *restful.Response) {
|
||||||
|
listing := sws.produceListing()
|
||||||
|
resp.WriteAsJson(listing)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sws SwaggerService) produceListing() ResourceListing {
|
||||||
|
listing := ResourceListing{SwaggerVersion: swaggerVersion, ApiVersion: sws.config.ApiVersion, Info: sws.config.Info}
|
||||||
|
sws.apiDeclarationMap.Do(func(k string, v ApiDeclaration) {
|
||||||
|
ref := Resource{Path: k}
|
||||||
|
if len(v.Apis) > 0 { // use description of first (could still be empty)
|
||||||
|
ref.Description = v.Apis[0].Description
|
||||||
|
}
|
||||||
|
listing.Apis = append(listing.Apis, ref)
|
||||||
|
})
|
||||||
|
return listing
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sws SwaggerService) getDeclarations(req *restful.Request, resp *restful.Response) {
|
||||||
|
decl, ok := sws.produceDeclarations(composeRootPath(req))
|
||||||
|
if !ok {
|
||||||
|
resp.WriteErrorString(http.StatusNotFound, "ApiDeclaration not found")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// unless WebServicesUrl is given
|
||||||
|
if len(sws.config.WebServicesUrl) == 0 {
|
||||||
|
// update base path from the actual request
|
||||||
|
// TODO how to detect https? assume http for now
|
||||||
|
var host string
|
||||||
|
// X-Forwarded-Host or Host or Request.Host
|
||||||
|
hostvalues, ok := req.Request.Header["X-Forwarded-Host"] // apache specific?
|
||||||
|
if !ok || len(hostvalues) == 0 {
|
||||||
|
forwarded, ok := req.Request.Header["Host"] // without reverse-proxy
|
||||||
|
if !ok || len(forwarded) == 0 {
|
||||||
|
// fallback to Host field
|
||||||
|
host = req.Request.Host
|
||||||
|
} else {
|
||||||
|
host = forwarded[0]
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
host = hostvalues[0]
|
||||||
|
}
|
||||||
|
// inspect Referer for the scheme (http vs https)
|
||||||
|
scheme := "http"
|
||||||
|
if referer := req.Request.Header["Referer"]; len(referer) > 0 {
|
||||||
|
if strings.HasPrefix(referer[0], "https") {
|
||||||
|
scheme = "https"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
decl.BasePath = fmt.Sprintf("%s://%s", scheme, host)
|
||||||
|
}
|
||||||
|
resp.WriteAsJson(decl)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sws SwaggerService) produceAllDeclarations() map[string]ApiDeclaration {
|
||||||
|
decls := map[string]ApiDeclaration{}
|
||||||
|
sws.apiDeclarationMap.Do(func(k string, v ApiDeclaration) {
|
||||||
|
decls[k] = v
|
||||||
|
})
|
||||||
|
return decls
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sws SwaggerService) produceDeclarations(route string) (*ApiDeclaration, bool) {
|
||||||
|
decl, ok := sws.apiDeclarationMap.At(route)
|
||||||
|
if !ok {
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
decl.BasePath = sws.config.WebServicesUrl
|
||||||
|
return &decl, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// composeDeclaration uses all routes and parameters to create a ApiDeclaration
|
||||||
|
func (sws SwaggerService) composeDeclaration(ws *restful.WebService, pathPrefix string) ApiDeclaration {
|
||||||
|
decl := ApiDeclaration{
|
||||||
|
SwaggerVersion: swaggerVersion,
|
||||||
|
BasePath: sws.config.WebServicesUrl,
|
||||||
|
ResourcePath: pathPrefix,
|
||||||
|
Models: ModelList{},
|
||||||
|
ApiVersion: ws.Version()}
|
||||||
|
|
||||||
|
// collect any path parameters
|
||||||
|
rootParams := []Parameter{}
|
||||||
|
for _, param := range ws.PathParameters() {
|
||||||
|
rootParams = append(rootParams, asSwaggerParameter(param.Data()))
|
||||||
|
}
|
||||||
|
// aggregate by path
|
||||||
|
pathToRoutes := newOrderedRouteMap()
|
||||||
|
for _, other := range ws.Routes() {
|
||||||
|
if strings.HasPrefix(other.Path, pathPrefix) {
|
||||||
|
if len(pathPrefix) > 1 && len(other.Path) > len(pathPrefix) && other.Path[len(pathPrefix)] != '/' {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
pathToRoutes.Add(other.Path, other)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pathToRoutes.Do(func(path string, routes []restful.Route) {
|
||||||
|
api := Api{Path: strings.TrimSuffix(withoutWildcard(path), "/"), Description: ws.Documentation()}
|
||||||
|
voidString := "void"
|
||||||
|
for _, route := range routes {
|
||||||
|
operation := Operation{
|
||||||
|
Method: route.Method,
|
||||||
|
Summary: route.Doc,
|
||||||
|
Notes: route.Notes,
|
||||||
|
// Type gets overwritten if there is a write sample
|
||||||
|
DataTypeFields: DataTypeFields{Type: &voidString},
|
||||||
|
Parameters: []Parameter{},
|
||||||
|
Nickname: route.Operation,
|
||||||
|
ResponseMessages: composeResponseMessages(route, &decl, &sws.config)}
|
||||||
|
|
||||||
|
operation.Consumes = route.Consumes
|
||||||
|
operation.Produces = route.Produces
|
||||||
|
|
||||||
|
// share root params if any
|
||||||
|
for _, swparam := range rootParams {
|
||||||
|
operation.Parameters = append(operation.Parameters, swparam)
|
||||||
|
}
|
||||||
|
// route specific params
|
||||||
|
for _, param := range route.ParameterDocs {
|
||||||
|
operation.Parameters = append(operation.Parameters, asSwaggerParameter(param.Data()))
|
||||||
|
}
|
||||||
|
|
||||||
|
sws.addModelsFromRouteTo(&operation, route, &decl)
|
||||||
|
api.Operations = append(api.Operations, operation)
|
||||||
|
}
|
||||||
|
decl.Apis = append(decl.Apis, api)
|
||||||
|
})
|
||||||
|
return decl
|
||||||
|
}
|
||||||
|
|
||||||
|
func withoutWildcard(path string) string {
|
||||||
|
if strings.HasSuffix(path, ":*}") {
|
||||||
|
return path[0:len(path)-3] + "}"
|
||||||
|
}
|
||||||
|
return path
|
||||||
|
}
|
||||||
|
|
||||||
|
// composeResponseMessages takes the ResponseErrors (if any) and creates ResponseMessages from them.
|
||||||
|
func composeResponseMessages(route restful.Route, decl *ApiDeclaration, config *Config) (messages []ResponseMessage) {
|
||||||
|
if route.ResponseErrors == nil {
|
||||||
|
return messages
|
||||||
|
}
|
||||||
|
// sort by code
|
||||||
|
codes := sort.IntSlice{}
|
||||||
|
for code := range route.ResponseErrors {
|
||||||
|
codes = append(codes, code)
|
||||||
|
}
|
||||||
|
codes.Sort()
|
||||||
|
for _, code := range codes {
|
||||||
|
each := route.ResponseErrors[code]
|
||||||
|
message := ResponseMessage{
|
||||||
|
Code: code,
|
||||||
|
Message: each.Message,
|
||||||
|
}
|
||||||
|
if each.Model != nil {
|
||||||
|
st := reflect.TypeOf(each.Model)
|
||||||
|
isCollection, st := detectCollectionType(st)
|
||||||
|
// collection cannot be in responsemodel
|
||||||
|
if !isCollection {
|
||||||
|
modelName := modelBuilder{}.keyFrom(st)
|
||||||
|
modelBuilder{Models: &decl.Models, Config: config}.addModel(st, "")
|
||||||
|
message.ResponseModel = modelName
|
||||||
|
}
|
||||||
|
}
|
||||||
|
messages = append(messages, message)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// addModelsFromRoute takes any read or write sample from the Route and creates a Swagger model from it.
|
||||||
|
func (sws SwaggerService) addModelsFromRouteTo(operation *Operation, route restful.Route, decl *ApiDeclaration) {
|
||||||
|
if route.ReadSample != nil {
|
||||||
|
sws.addModelFromSampleTo(operation, false, route.ReadSample, &decl.Models)
|
||||||
|
}
|
||||||
|
if route.WriteSample != nil {
|
||||||
|
sws.addModelFromSampleTo(operation, true, route.WriteSample, &decl.Models)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func detectCollectionType(st reflect.Type) (bool, reflect.Type) {
|
||||||
|
isCollection := false
|
||||||
|
if st.Kind() == reflect.Slice || st.Kind() == reflect.Array {
|
||||||
|
st = st.Elem()
|
||||||
|
isCollection = true
|
||||||
|
} else {
|
||||||
|
if st.Kind() == reflect.Ptr {
|
||||||
|
if st.Elem().Kind() == reflect.Slice || st.Elem().Kind() == reflect.Array {
|
||||||
|
st = st.Elem().Elem()
|
||||||
|
isCollection = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return isCollection, st
|
||||||
|
}
|
||||||
|
|
||||||
|
// addModelFromSample creates and adds (or overwrites) a Model from a sample resource
|
||||||
|
func (sws SwaggerService) addModelFromSampleTo(operation *Operation, isResponse bool, sample interface{}, models *ModelList) {
|
||||||
|
mb := modelBuilder{Models: models, Config: &sws.config}
|
||||||
|
if isResponse {
|
||||||
|
sampleType, items := asDataType(sample, &sws.config)
|
||||||
|
operation.Type = sampleType
|
||||||
|
operation.Items = items
|
||||||
|
}
|
||||||
|
mb.addModelFrom(sample)
|
||||||
|
}
|
||||||
|
|
||||||
|
func asSwaggerParameter(param restful.ParameterData) Parameter {
|
||||||
|
return Parameter{
|
||||||
|
DataTypeFields: DataTypeFields{
|
||||||
|
Type: ¶m.DataType,
|
||||||
|
Format: asFormat(param.DataType, param.DataFormat),
|
||||||
|
DefaultValue: Special(param.DefaultValue),
|
||||||
|
},
|
||||||
|
Name: param.Name,
|
||||||
|
Description: param.Description,
|
||||||
|
ParamType: asParamType(param.Kind),
|
||||||
|
|
||||||
|
Required: param.Required}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Between 1..7 path parameters is supported
|
||||||
|
func composeRootPath(req *restful.Request) string {
|
||||||
|
path := "/" + req.PathParameter("a")
|
||||||
|
b := req.PathParameter("b")
|
||||||
|
if b == "" {
|
||||||
|
return path
|
||||||
|
}
|
||||||
|
path = path + "/" + b
|
||||||
|
c := req.PathParameter("c")
|
||||||
|
if c == "" {
|
||||||
|
return path
|
||||||
|
}
|
||||||
|
path = path + "/" + c
|
||||||
|
d := req.PathParameter("d")
|
||||||
|
if d == "" {
|
||||||
|
return path
|
||||||
|
}
|
||||||
|
path = path + "/" + d
|
||||||
|
e := req.PathParameter("e")
|
||||||
|
if e == "" {
|
||||||
|
return path
|
||||||
|
}
|
||||||
|
path = path + "/" + e
|
||||||
|
f := req.PathParameter("f")
|
||||||
|
if f == "" {
|
||||||
|
return path
|
||||||
|
}
|
||||||
|
path = path + "/" + f
|
||||||
|
g := req.PathParameter("g")
|
||||||
|
if g == "" {
|
||||||
|
return path
|
||||||
|
}
|
||||||
|
return path + "/" + g
|
||||||
|
}
|
||||||
|
|
||||||
|
func asFormat(dataType string, dataFormat string) string {
|
||||||
|
if dataFormat != "" {
|
||||||
|
return dataFormat
|
||||||
|
}
|
||||||
|
return "" // TODO
|
||||||
|
}
|
||||||
|
|
||||||
|
func asParamType(kind int) string {
|
||||||
|
switch {
|
||||||
|
case kind == restful.PathParameterKind:
|
||||||
|
return "path"
|
||||||
|
case kind == restful.QueryParameterKind:
|
||||||
|
return "query"
|
||||||
|
case kind == restful.BodyParameterKind:
|
||||||
|
return "body"
|
||||||
|
case kind == restful.HeaderParameterKind:
|
||||||
|
return "header"
|
||||||
|
case kind == restful.FormParameterKind:
|
||||||
|
return "form"
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func asDataType(any interface{}, config *Config) (*string, *Item) {
|
||||||
|
// If it's not a collection, return the suggested model name
|
||||||
|
st := reflect.TypeOf(any)
|
||||||
|
isCollection, st := detectCollectionType(st)
|
||||||
|
modelName := modelBuilder{}.keyFrom(st)
|
||||||
|
// if it's not a collection we are done
|
||||||
|
if !isCollection {
|
||||||
|
return &modelName, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// XXX: This is not very elegant
|
||||||
|
// We create an Item object referring to the given model
|
||||||
|
models := ModelList{}
|
||||||
|
mb := modelBuilder{Models: &models, Config: config}
|
||||||
|
mb.addModelFrom(any)
|
||||||
|
|
||||||
|
elemTypeName := mb.getElementTypeName(modelName, "", st)
|
||||||
|
item := new(Item)
|
||||||
|
if mb.isPrimitiveType(elemTypeName) {
|
||||||
|
mapped := mb.jsonSchemaType(elemTypeName)
|
||||||
|
item.Type = &mapped
|
||||||
|
} else {
|
||||||
|
item.Ref = &elemTypeName
|
||||||
|
}
|
||||||
|
tmp := "array"
|
||||||
|
return &tmp, item
|
||||||
|
}
|
||||||
5
vendor/github.com/emicklei/go-restful-swagger12/test_package/struct.go
generated
vendored
Normal file
5
vendor/github.com/emicklei/go-restful-swagger12/test_package/struct.go
generated
vendored
Normal file
|
|
@ -0,0 +1,5 @@
|
||||||
|
package test_package
|
||||||
|
|
||||||
|
type TestStruct struct {
|
||||||
|
TestField string
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,86 @@
|
||||||
|
package swagger
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func testJsonFromStructWithConfig(t *testing.T, sample interface{}, expectedJson string, config *Config) bool {
|
||||||
|
m := modelsFromStructWithConfig(sample, config)
|
||||||
|
data, _ := json.MarshalIndent(m, " ", " ")
|
||||||
|
return compareJson(t, string(data), expectedJson)
|
||||||
|
}
|
||||||
|
|
||||||
|
func modelsFromStructWithConfig(sample interface{}, config *Config) *ModelList {
|
||||||
|
models := new(ModelList)
|
||||||
|
builder := modelBuilder{Models: models, Config: config}
|
||||||
|
builder.addModelFrom(sample)
|
||||||
|
return models
|
||||||
|
}
|
||||||
|
|
||||||
|
func testJsonFromStruct(t *testing.T, sample interface{}, expectedJson string) bool {
|
||||||
|
return testJsonFromStructWithConfig(t, sample, expectedJson, &Config{})
|
||||||
|
}
|
||||||
|
|
||||||
|
func modelsFromStruct(sample interface{}) *ModelList {
|
||||||
|
return modelsFromStructWithConfig(sample, &Config{})
|
||||||
|
}
|
||||||
|
|
||||||
|
func compareJson(t *testing.T, actualJsonAsString string, expectedJsonAsString string) bool {
|
||||||
|
success := false
|
||||||
|
var actualMap map[string]interface{}
|
||||||
|
json.Unmarshal([]byte(actualJsonAsString), &actualMap)
|
||||||
|
var expectedMap map[string]interface{}
|
||||||
|
err := json.Unmarshal([]byte(expectedJsonAsString), &expectedMap)
|
||||||
|
if err != nil {
|
||||||
|
var actualArray []interface{}
|
||||||
|
json.Unmarshal([]byte(actualJsonAsString), &actualArray)
|
||||||
|
var expectedArray []interface{}
|
||||||
|
err := json.Unmarshal([]byte(expectedJsonAsString), &expectedArray)
|
||||||
|
success = reflect.DeepEqual(actualArray, expectedArray)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Unparsable expected JSON: %s, actual: %v, expected: %v", err, actualJsonAsString, expectedJsonAsString)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
success = reflect.DeepEqual(actualMap, expectedMap)
|
||||||
|
}
|
||||||
|
if !success {
|
||||||
|
t.Log("---- expected -----")
|
||||||
|
t.Log(withLineNumbers(expectedJsonAsString))
|
||||||
|
t.Log("---- actual -----")
|
||||||
|
t.Log(withLineNumbers(actualJsonAsString))
|
||||||
|
t.Log("---- raw -----")
|
||||||
|
t.Log(actualJsonAsString)
|
||||||
|
t.Error("there are differences")
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func indexOfNonMatchingLine(actual, expected string) int {
|
||||||
|
a := strings.Split(actual, "\n")
|
||||||
|
e := strings.Split(expected, "\n")
|
||||||
|
size := len(a)
|
||||||
|
if len(e) < len(a) {
|
||||||
|
size = len(e)
|
||||||
|
}
|
||||||
|
for i := 0; i < size; i++ {
|
||||||
|
if a[i] != e[i] {
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
|
||||||
|
func withLineNumbers(content string) string {
|
||||||
|
var buffer bytes.Buffer
|
||||||
|
lines := strings.Split(content, "\n")
|
||||||
|
for i, each := range lines {
|
||||||
|
buffer.WriteString(fmt.Sprintf("%d:%s\n", i, each))
|
||||||
|
}
|
||||||
|
return buffer.String()
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,5 @@
|
||||||
|
root = true
|
||||||
|
|
||||||
|
[*]
|
||||||
|
indent_style = tab
|
||||||
|
indent_size = 4
|
||||||
|
|
@ -0,0 +1,11 @@
|
||||||
|
Before reporting an issue, please ensure you are using the latest release of fsnotify.
|
||||||
|
|
||||||
|
### Which operating system (GOOS) and version are you using?
|
||||||
|
|
||||||
|
Linux: lsb_release -a
|
||||||
|
macOS: sw_vers
|
||||||
|
Windows: systeminfo | findstr /B /C:OS
|
||||||
|
|
||||||
|
### Please describe the issue that occurred.
|
||||||
|
|
||||||
|
### Are you able to reproduce the issue? Please provide steps to reproduce and a code sample if possible.
|
||||||
8
vendor/github.com/fsnotify/fsnotify/.github/PULL_REQUEST_TEMPLATE.md
generated
vendored
Normal file
8
vendor/github.com/fsnotify/fsnotify/.github/PULL_REQUEST_TEMPLATE.md
generated
vendored
Normal file
|
|
@ -0,0 +1,8 @@
|
||||||
|
#### What does this pull request do?
|
||||||
|
|
||||||
|
|
||||||
|
#### Where should the reviewer start?
|
||||||
|
|
||||||
|
|
||||||
|
#### How should this be manually tested?
|
||||||
|
|
||||||
|
|
@ -0,0 +1,6 @@
|
||||||
|
# Setup a Global .gitignore for OS and editor generated files:
|
||||||
|
# https://help.github.com/articles/ignoring-files
|
||||||
|
# git config --global core.excludesfile ~/.gitignore_global
|
||||||
|
|
||||||
|
.vagrant
|
||||||
|
*.sublime-project
|
||||||
|
|
@ -0,0 +1,28 @@
|
||||||
|
sudo: false
|
||||||
|
language: go
|
||||||
|
|
||||||
|
go:
|
||||||
|
- 1.6.3
|
||||||
|
- tip
|
||||||
|
|
||||||
|
matrix:
|
||||||
|
allow_failures:
|
||||||
|
- go: tip
|
||||||
|
|
||||||
|
before_script:
|
||||||
|
- go get -u github.com/golang/lint/golint
|
||||||
|
|
||||||
|
script:
|
||||||
|
- go test -v --race ./...
|
||||||
|
|
||||||
|
after_script:
|
||||||
|
- test -z "$(gofmt -s -l -w . | tee /dev/stderr)"
|
||||||
|
- test -z "$(golint ./... | tee /dev/stderr)"
|
||||||
|
- go vet ./...
|
||||||
|
|
||||||
|
os:
|
||||||
|
- linux
|
||||||
|
- osx
|
||||||
|
|
||||||
|
notifications:
|
||||||
|
email: false
|
||||||
|
|
@ -0,0 +1,46 @@
|
||||||
|
# Names should be added to this file as
|
||||||
|
# Name or Organization <email address>
|
||||||
|
# The email address is not required for organizations.
|
||||||
|
|
||||||
|
# You can update this list using the following command:
|
||||||
|
#
|
||||||
|
# $ git shortlog -se | awk '{print $2 " " $3 " " $4}'
|
||||||
|
|
||||||
|
# Please keep the list sorted.
|
||||||
|
|
||||||
|
Adrien Bustany <adrien@bustany.org>
|
||||||
|
Amit Krishnan <amit.krishnan@oracle.com>
|
||||||
|
Bjørn Erik Pedersen <bjorn.erik.pedersen@gmail.com>
|
||||||
|
Bruno Bigras <bigras.bruno@gmail.com>
|
||||||
|
Caleb Spare <cespare@gmail.com>
|
||||||
|
Case Nelson <case@teammating.com>
|
||||||
|
Chris Howey <chris@howey.me> <howeyc@gmail.com>
|
||||||
|
Christoffer Buchholz <christoffer.buchholz@gmail.com>
|
||||||
|
Daniel Wagner-Hall <dawagner@gmail.com>
|
||||||
|
Dave Cheney <dave@cheney.net>
|
||||||
|
Evan Phoenix <evan@fallingsnow.net>
|
||||||
|
Francisco Souza <f@souza.cc>
|
||||||
|
Hari haran <hariharan.uno@gmail.com>
|
||||||
|
John C Barstow
|
||||||
|
Kelvin Fo <vmirage@gmail.com>
|
||||||
|
Ken-ichirou MATSUZAWA <chamas@h4.dion.ne.jp>
|
||||||
|
Matt Layher <mdlayher@gmail.com>
|
||||||
|
Nathan Youngman <git@nathany.com>
|
||||||
|
Patrick <patrick@dropbox.com>
|
||||||
|
Paul Hammond <paul@paulhammond.org>
|
||||||
|
Pawel Knap <pawelknap88@gmail.com>
|
||||||
|
Pieter Droogendijk <pieter@binky.org.uk>
|
||||||
|
Pursuit92 <JoshChase@techpursuit.net>
|
||||||
|
Riku Voipio <riku.voipio@linaro.org>
|
||||||
|
Rob Figueiredo <robfig@gmail.com>
|
||||||
|
Slawek Ligus <root@ooz.ie>
|
||||||
|
Soge Zhang <zhssoge@gmail.com>
|
||||||
|
Tiffany Jernigan <tiffany.jernigan@intel.com>
|
||||||
|
Tilak Sharma <tilaks@google.com>
|
||||||
|
Travis Cline <travis.cline@gmail.com>
|
||||||
|
Tudor Golubenco <tudor.g@gmail.com>
|
||||||
|
Yukang <moorekang@gmail.com>
|
||||||
|
bronze1man <bronze1man@gmail.com>
|
||||||
|
debrando <denis.brandolini@gmail.com>
|
||||||
|
henrikedwards <henrik.edwards@gmail.com>
|
||||||
|
铁哥 <guotie.9@gmail.com>
|
||||||
|
|
@ -0,0 +1,307 @@
|
||||||
|
# Changelog
|
||||||
|
|
||||||
|
## v1.4.2 / 2016-10-10
|
||||||
|
|
||||||
|
* Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack)
|
||||||
|
|
||||||
|
## v1.4.1 / 2016-10-04
|
||||||
|
|
||||||
|
* Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack)
|
||||||
|
|
||||||
|
## v1.4.0 / 2016-10-01
|
||||||
|
|
||||||
|
* add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie)
|
||||||
|
|
||||||
|
## v1.3.1 / 2016-06-28
|
||||||
|
|
||||||
|
* Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc)
|
||||||
|
|
||||||
|
## v1.3.0 / 2016-04-19
|
||||||
|
|
||||||
|
* Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135)
|
||||||
|
|
||||||
|
## v1.2.10 / 2016-03-02
|
||||||
|
|
||||||
|
* Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj)
|
||||||
|
|
||||||
|
## v1.2.9 / 2016-01-13
|
||||||
|
|
||||||
|
kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep)
|
||||||
|
|
||||||
|
## v1.2.8 / 2015-12-17
|
||||||
|
|
||||||
|
* kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test)
|
||||||
|
* inotify: fix race in test
|
||||||
|
* enable race detection for continuous integration (Linux, Mac, Windows)
|
||||||
|
|
||||||
|
## v1.2.5 / 2015-10-17
|
||||||
|
|
||||||
|
* inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki)
|
||||||
|
* inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken)
|
||||||
|
* kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie)
|
||||||
|
* kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion)
|
||||||
|
|
||||||
|
## v1.2.1 / 2015-10-14
|
||||||
|
|
||||||
|
* kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx)
|
||||||
|
|
||||||
|
## v1.2.0 / 2015-02-08
|
||||||
|
|
||||||
|
* inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD)
|
||||||
|
* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD)
|
||||||
|
* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59)
|
||||||
|
|
||||||
|
## v1.1.1 / 2015-02-05
|
||||||
|
|
||||||
|
* inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD)
|
||||||
|
|
||||||
|
## v1.1.0 / 2014-12-12
|
||||||
|
|
||||||
|
* kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43)
|
||||||
|
* add low-level functions
|
||||||
|
* only need to store flags on directories
|
||||||
|
* less mutexes [#13](https://github.com/fsnotify/fsnotify/issues/13)
|
||||||
|
* done can be an unbuffered channel
|
||||||
|
* remove calls to os.NewSyscallError
|
||||||
|
* More efficient string concatenation for Event.String() [#52](https://github.com/fsnotify/fsnotify/pull/52) (thanks @mdlayher)
|
||||||
|
* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48)
|
||||||
|
* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
|
||||||
|
|
||||||
|
## v1.0.4 / 2014-09-07
|
||||||
|
|
||||||
|
* kqueue: add dragonfly to the build tags.
|
||||||
|
* Rename source code files, rearrange code so exported APIs are at the top.
|
||||||
|
* Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang)
|
||||||
|
|
||||||
|
## v1.0.3 / 2014-08-19
|
||||||
|
|
||||||
|
* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36)
|
||||||
|
|
||||||
|
## v1.0.2 / 2014-08-17
|
||||||
|
|
||||||
|
* [Fix] Missing create events on OS X. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
|
||||||
|
* [Fix] Make ./path and path equivalent. (thanks @zhsso)
|
||||||
|
|
||||||
|
## v1.0.0 / 2014-08-15
|
||||||
|
|
||||||
|
* [API] Remove AddWatch on Windows, use Add.
|
||||||
|
* Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30)
|
||||||
|
* Minor updates based on feedback from golint.
|
||||||
|
|
||||||
|
## dev / 2014-07-09
|
||||||
|
|
||||||
|
* Moved to [github.com/fsnotify/fsnotify](https://github.com/fsnotify/fsnotify).
|
||||||
|
* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno)
|
||||||
|
|
||||||
|
## dev / 2014-07-04
|
||||||
|
|
||||||
|
* kqueue: fix incorrect mutex used in Close()
|
||||||
|
* Update example to demonstrate usage of Op.
|
||||||
|
|
||||||
|
## dev / 2014-06-28
|
||||||
|
|
||||||
|
* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/fsnotify/fsnotify/issues/4)
|
||||||
|
* Fix for String() method on Event (thanks Alex Brainman)
|
||||||
|
* Don't build on Plan 9 or Solaris (thanks @4ad)
|
||||||
|
|
||||||
|
## dev / 2014-06-21
|
||||||
|
|
||||||
|
* Events channel of type Event rather than *Event.
|
||||||
|
* [internal] use syscall constants directly for inotify and kqueue.
|
||||||
|
* [internal] kqueue: rename events to kevents and fileEvent to event.
|
||||||
|
|
||||||
|
## dev / 2014-06-19
|
||||||
|
|
||||||
|
* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally).
|
||||||
|
* [internal] remove cookie from Event struct (unused).
|
||||||
|
* [internal] Event struct has the same definition across every OS.
|
||||||
|
* [internal] remove internal watch and removeWatch methods.
|
||||||
|
|
||||||
|
## dev / 2014-06-12
|
||||||
|
|
||||||
|
* [API] Renamed Watch() to Add() and RemoveWatch() to Remove().
|
||||||
|
* [API] Pluralized channel names: Events and Errors.
|
||||||
|
* [API] Renamed FileEvent struct to Event.
|
||||||
|
* [API] Op constants replace methods like IsCreate().
|
||||||
|
|
||||||
|
## dev / 2014-06-12
|
||||||
|
|
||||||
|
* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
|
||||||
|
|
||||||
|
## dev / 2014-05-23
|
||||||
|
|
||||||
|
* [API] Remove current implementation of WatchFlags.
|
||||||
|
* current implementation doesn't take advantage of OS for efficiency
|
||||||
|
* provides little benefit over filtering events as they are received, but has extra bookkeeping and mutexes
|
||||||
|
* no tests for the current implementation
|
||||||
|
* not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195)
|
||||||
|
|
||||||
|
## v0.9.3 / 2014-12-31
|
||||||
|
|
||||||
|
* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
|
||||||
|
|
||||||
|
## v0.9.2 / 2014-08-17
|
||||||
|
|
||||||
|
* [Backport] Fix missing create events on OS X. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
|
||||||
|
|
||||||
|
## v0.9.1 / 2014-06-12
|
||||||
|
|
||||||
|
* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
|
||||||
|
|
||||||
|
## v0.9.0 / 2014-01-17
|
||||||
|
|
||||||
|
* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany)
|
||||||
|
* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare)
|
||||||
|
* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library.
|
||||||
|
|
||||||
|
## v0.8.12 / 2013-11-13
|
||||||
|
|
||||||
|
* [API] Remove FD_SET and friends from Linux adapter
|
||||||
|
|
||||||
|
## v0.8.11 / 2013-11-02
|
||||||
|
|
||||||
|
* [Doc] Add Changelog [#72][] (thanks @nathany)
|
||||||
|
* [Doc] Spotlight and double modify events on OS X [#62][] (reported by @paulhammond)
|
||||||
|
|
||||||
|
## v0.8.10 / 2013-10-19
|
||||||
|
|
||||||
|
* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott)
|
||||||
|
* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer)
|
||||||
|
* [Doc] specify OS-specific limits in README (thanks @debrando)
|
||||||
|
|
||||||
|
## v0.8.9 / 2013-09-08
|
||||||
|
|
||||||
|
* [Doc] Contributing (thanks @nathany)
|
||||||
|
* [Doc] update package path in example code [#63][] (thanks @paulhammond)
|
||||||
|
* [Doc] GoCI badge in README (Linux only) [#60][]
|
||||||
|
* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany)
|
||||||
|
|
||||||
|
## v0.8.8 / 2013-06-17
|
||||||
|
|
||||||
|
* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie)
|
||||||
|
|
||||||
|
## v0.8.7 / 2013-06-03
|
||||||
|
|
||||||
|
* [API] Make syscall flags internal
|
||||||
|
* [Fix] inotify: ignore event changes
|
||||||
|
* [Fix] race in symlink test [#45][] (reported by @srid)
|
||||||
|
* [Fix] tests on Windows
|
||||||
|
* lower case error messages
|
||||||
|
|
||||||
|
## v0.8.6 / 2013-05-23
|
||||||
|
|
||||||
|
* kqueue: Use EVT_ONLY flag on Darwin
|
||||||
|
* [Doc] Update README with full example
|
||||||
|
|
||||||
|
## v0.8.5 / 2013-05-09
|
||||||
|
|
||||||
|
* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg)
|
||||||
|
|
||||||
|
## v0.8.4 / 2013-04-07
|
||||||
|
|
||||||
|
* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz)
|
||||||
|
|
||||||
|
## v0.8.3 / 2013-03-13
|
||||||
|
|
||||||
|
* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin)
|
||||||
|
* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin)
|
||||||
|
|
||||||
|
## v0.8.2 / 2013-02-07
|
||||||
|
|
||||||
|
* [Doc] add Authors
|
||||||
|
* [Fix] fix data races for map access [#29][] (thanks @fsouza)
|
||||||
|
|
||||||
|
## v0.8.1 / 2013-01-09
|
||||||
|
|
||||||
|
* [Fix] Windows path separators
|
||||||
|
* [Doc] BSD License
|
||||||
|
|
||||||
|
## v0.8.0 / 2012-11-09
|
||||||
|
|
||||||
|
* kqueue: directory watching improvements (thanks @vmirage)
|
||||||
|
* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto)
|
||||||
|
* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr)
|
||||||
|
|
||||||
|
## v0.7.4 / 2012-10-09
|
||||||
|
|
||||||
|
* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji)
|
||||||
|
* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig)
|
||||||
|
* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig)
|
||||||
|
* [Fix] kqueue: modify after recreation of file
|
||||||
|
|
||||||
|
## v0.7.3 / 2012-09-27
|
||||||
|
|
||||||
|
* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage)
|
||||||
|
* [Fix] kqueue: no longer get duplicate CREATE events
|
||||||
|
|
||||||
|
## v0.7.2 / 2012-09-01
|
||||||
|
|
||||||
|
* kqueue: events for created directories
|
||||||
|
|
||||||
|
## v0.7.1 / 2012-07-14
|
||||||
|
|
||||||
|
* [Fix] for renaming files
|
||||||
|
|
||||||
|
## v0.7.0 / 2012-07-02
|
||||||
|
|
||||||
|
* [Feature] FSNotify flags
|
||||||
|
* [Fix] inotify: Added file name back to event path
|
||||||
|
|
||||||
|
## v0.6.0 / 2012-06-06
|
||||||
|
|
||||||
|
* kqueue: watch files after directory created (thanks @tmc)
|
||||||
|
|
||||||
|
## v0.5.1 / 2012-05-22
|
||||||
|
|
||||||
|
* [Fix] inotify: remove all watches before Close()
|
||||||
|
|
||||||
|
## v0.5.0 / 2012-05-03
|
||||||
|
|
||||||
|
* [API] kqueue: return errors during watch instead of sending over channel
|
||||||
|
* kqueue: match symlink behavior on Linux
|
||||||
|
* inotify: add `DELETE_SELF` (requested by @taralx)
|
||||||
|
* [Fix] kqueue: handle EINTR (reported by @robfig)
|
||||||
|
* [Doc] Godoc example [#1][] (thanks @davecheney)
|
||||||
|
|
||||||
|
## v0.4.0 / 2012-03-30
|
||||||
|
|
||||||
|
* Go 1 released: build with go tool
|
||||||
|
* [Feature] Windows support using winfsnotify
|
||||||
|
* Windows does not have attribute change notifications
|
||||||
|
* Roll attribute notifications into IsModify
|
||||||
|
|
||||||
|
## v0.3.0 / 2012-02-19
|
||||||
|
|
||||||
|
* kqueue: add files when watch directory
|
||||||
|
|
||||||
|
## v0.2.0 / 2011-12-30
|
||||||
|
|
||||||
|
* update to latest Go weekly code
|
||||||
|
|
||||||
|
## v0.1.0 / 2011-10-19
|
||||||
|
|
||||||
|
* kqueue: add watch on file creation to match inotify
|
||||||
|
* kqueue: create file event
|
||||||
|
* inotify: ignore `IN_IGNORED` events
|
||||||
|
* event String()
|
||||||
|
* linux: common FileEvent functions
|
||||||
|
* initial commit
|
||||||
|
|
||||||
|
[#79]: https://github.com/howeyc/fsnotify/pull/79
|
||||||
|
[#77]: https://github.com/howeyc/fsnotify/pull/77
|
||||||
|
[#72]: https://github.com/howeyc/fsnotify/issues/72
|
||||||
|
[#71]: https://github.com/howeyc/fsnotify/issues/71
|
||||||
|
[#70]: https://github.com/howeyc/fsnotify/issues/70
|
||||||
|
[#63]: https://github.com/howeyc/fsnotify/issues/63
|
||||||
|
[#62]: https://github.com/howeyc/fsnotify/issues/62
|
||||||
|
[#60]: https://github.com/howeyc/fsnotify/issues/60
|
||||||
|
[#59]: https://github.com/howeyc/fsnotify/issues/59
|
||||||
|
[#49]: https://github.com/howeyc/fsnotify/issues/49
|
||||||
|
[#45]: https://github.com/howeyc/fsnotify/issues/45
|
||||||
|
[#40]: https://github.com/howeyc/fsnotify/issues/40
|
||||||
|
[#36]: https://github.com/howeyc/fsnotify/issues/36
|
||||||
|
[#33]: https://github.com/howeyc/fsnotify/issues/33
|
||||||
|
[#29]: https://github.com/howeyc/fsnotify/issues/29
|
||||||
|
[#25]: https://github.com/howeyc/fsnotify/issues/25
|
||||||
|
[#24]: https://github.com/howeyc/fsnotify/issues/24
|
||||||
|
[#21]: https://github.com/howeyc/fsnotify/issues/21
|
||||||
|
|
@ -0,0 +1,77 @@
|
||||||
|
# Contributing
|
||||||
|
|
||||||
|
## Issues
|
||||||
|
|
||||||
|
* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/fsnotify/fsnotify/issues).
|
||||||
|
* Please indicate the platform you are using fsnotify on.
|
||||||
|
* A code example to reproduce the problem is appreciated.
|
||||||
|
|
||||||
|
## Pull Requests
|
||||||
|
|
||||||
|
### Contributor License Agreement
|
||||||
|
|
||||||
|
fsnotify is derived from code in the [golang.org/x/exp](https://godoc.org/golang.org/x/exp) package and it may be included [in the standard library](https://github.com/fsnotify/fsnotify/issues/1) in the future. Therefore fsnotify carries the same [LICENSE](https://github.com/fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so you need to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual).
|
||||||
|
|
||||||
|
Please indicate that you have signed the CLA in your pull request.
|
||||||
|
|
||||||
|
### How fsnotify is Developed
|
||||||
|
|
||||||
|
* Development is done on feature branches.
|
||||||
|
* Tests are run on BSD, Linux, OS X and Windows.
|
||||||
|
* Pull requests are reviewed and [applied to master][am] using [hub][].
|
||||||
|
* Maintainers may modify or squash commits rather than asking contributors to.
|
||||||
|
* To issue a new release, the maintainers will:
|
||||||
|
* Update the CHANGELOG
|
||||||
|
* Tag a version, which will become available through gopkg.in.
|
||||||
|
|
||||||
|
### How to Fork
|
||||||
|
|
||||||
|
For smooth sailing, always use the original import path. Installing with `go get` makes this easy.
|
||||||
|
|
||||||
|
1. Install from GitHub (`go get -u github.com/fsnotify/fsnotify`)
|
||||||
|
2. Create your feature branch (`git checkout -b my-new-feature`)
|
||||||
|
3. Ensure everything works and the tests pass (see below)
|
||||||
|
4. Commit your changes (`git commit -am 'Add some feature'`)
|
||||||
|
|
||||||
|
Contribute upstream:
|
||||||
|
|
||||||
|
1. Fork fsnotify on GitHub
|
||||||
|
2. Add your remote (`git remote add fork git@github.com:mycompany/repo.git`)
|
||||||
|
3. Push to the branch (`git push fork my-new-feature`)
|
||||||
|
4. Create a new Pull Request on GitHub
|
||||||
|
|
||||||
|
This workflow is [thoroughly explained by Katrina Owen](https://splice.com/blog/contributing-open-source-git-repositories-go/).
|
||||||
|
|
||||||
|
### Testing
|
||||||
|
|
||||||
|
fsnotify uses build tags to compile different code on Linux, BSD, OS X, and Windows.
|
||||||
|
|
||||||
|
Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on.
|
||||||
|
|
||||||
|
To aid in cross-platform testing there is a Vagrantfile for Linux and BSD.
|
||||||
|
|
||||||
|
* Install [Vagrant](http://www.vagrantup.com/) and [VirtualBox](https://www.virtualbox.org/)
|
||||||
|
* Setup [Vagrant Gopher](https://github.com/nathany/vagrant-gopher) in your `src` folder.
|
||||||
|
* Run `vagrant up` from the project folder. You can also setup just one box with `vagrant up linux` or `vagrant up bsd` (note: the BSD box doesn't support Windows hosts at this time, and NFS may prompt for your host OS password)
|
||||||
|
* Once setup, you can run the test suite on a given OS with a single command `vagrant ssh linux -c 'cd fsnotify/fsnotify; go test'`.
|
||||||
|
* When you're done, you will want to halt or destroy the Vagrant boxes.
|
||||||
|
|
||||||
|
Notice: fsnotify file system events won't trigger in shared folders. The tests get around this limitation by using the /tmp directory.
|
||||||
|
|
||||||
|
Right now there is no equivalent solution for Windows and OS X, but there are Windows VMs [freely available from Microsoft](http://www.modern.ie/en-us/virtualization-tools#downloads).
|
||||||
|
|
||||||
|
### Maintainers
|
||||||
|
|
||||||
|
Help maintaining fsnotify is welcome. To be a maintainer:
|
||||||
|
|
||||||
|
* Submit a pull request and sign the CLA as above.
|
||||||
|
* You must be able to run the test suite on Mac, Windows, Linux and BSD.
|
||||||
|
|
||||||
|
To keep master clean, the fsnotify project uses the "apply mail" workflow outlined in Nathaniel Talbott's post ["Merge pull request" Considered Harmful][am]. This requires installing [hub][].
|
||||||
|
|
||||||
|
All code changes should be internal pull requests.
|
||||||
|
|
||||||
|
Releases are tagged using [Semantic Versioning](http://semver.org/).
|
||||||
|
|
||||||
|
[hub]: https://github.com/github/hub
|
||||||
|
[am]: http://blog.spreedly.com/2014/06/24/merge-pull-request-considered-harmful/#.VGa5yZPF_Zs
|
||||||
|
|
@ -0,0 +1,28 @@
|
||||||
|
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||||
|
Copyright (c) 2012 fsnotify Authors. All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are
|
||||||
|
met:
|
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
* Redistributions in binary form must reproduce the above
|
||||||
|
copyright notice, this list of conditions and the following disclaimer
|
||||||
|
in the documentation and/or other materials provided with the
|
||||||
|
distribution.
|
||||||
|
* Neither the name of Google Inc. nor the names of its
|
||||||
|
contributors may be used to endorse or promote products derived from
|
||||||
|
this software without specific prior written permission.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
@ -0,0 +1,50 @@
|
||||||
|
# File system notifications for Go
|
||||||
|
|
||||||
|
[](https://godoc.org/github.com/fsnotify/fsnotify) [](https://goreportcard.com/report/github.com/fsnotify/fsnotify)
|
||||||
|
|
||||||
|
fsnotify utilizes [golang.org/x/sys](https://godoc.org/golang.org/x/sys) rather than `syscall` from the standard library. Ensure you have the latest version installed by running:
|
||||||
|
|
||||||
|
```console
|
||||||
|
go get -u golang.org/x/sys/...
|
||||||
|
```
|
||||||
|
|
||||||
|
Cross platform: Windows, Linux, BSD and OS X.
|
||||||
|
|
||||||
|
|Adapter |OS |Status |
|
||||||
|
|----------|----------|----------|
|
||||||
|
|inotify |Linux 2.6.27 or later, Android\*|Supported [](https://travis-ci.org/fsnotify/fsnotify)|
|
||||||
|
|kqueue |BSD, OS X, iOS\*|Supported [](https://travis-ci.org/fsnotify/fsnotify)|
|
||||||
|
|ReadDirectoryChangesW|Windows|Supported [](https://ci.appveyor.com/project/NathanYoungman/fsnotify/branch/master)|
|
||||||
|
|FSEvents |OS X |[Planned](https://github.com/fsnotify/fsnotify/issues/11)|
|
||||||
|
|FEN |Solaris 11 |[In Progress](https://github.com/fsnotify/fsnotify/issues/12)|
|
||||||
|
|fanotify |Linux 2.6.37+ | |
|
||||||
|
|USN Journals |Windows |[Maybe](https://github.com/fsnotify/fsnotify/issues/53)|
|
||||||
|
|Polling |*All* |[Maybe](https://github.com/fsnotify/fsnotify/issues/9)|
|
||||||
|
|
||||||
|
\* Android and iOS are untested.
|
||||||
|
|
||||||
|
Please see [the documentation](https://godoc.org/github.com/fsnotify/fsnotify) for usage. Consult the [Wiki](https://github.com/fsnotify/fsnotify/wiki) for the FAQ and further information.
|
||||||
|
|
||||||
|
## API stability
|
||||||
|
|
||||||
|
fsnotify is a fork of [howeyc/fsnotify](https://godoc.org/github.com/howeyc/fsnotify) with a new API as of v1.0. The API is based on [this design document](http://goo.gl/MrYxyA).
|
||||||
|
|
||||||
|
All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based on [Semantic Versioning](http://semver.org/). Further API changes are [planned](https://github.com/fsnotify/fsnotify/milestones), and will be tagged with a new major revision number.
|
||||||
|
|
||||||
|
Go 1.6 supports dependencies located in the `vendor/` folder. Unless you are creating a library, it is recommended that you copy fsnotify into `vendor/github.com/fsnotify/fsnotify` within your project, and likewise for `golang.org/x/sys`.
|
||||||
|
|
||||||
|
## Contributing
|
||||||
|
|
||||||
|
Please refer to [CONTRIBUTING][] before opening an issue or pull request.
|
||||||
|
|
||||||
|
## Example
|
||||||
|
|
||||||
|
See [example_test.go](https://github.com/fsnotify/fsnotify/blob/master/example_test.go).
|
||||||
|
|
||||||
|
[contributing]: https://github.com/fsnotify/fsnotify/blob/master/CONTRIBUTING.md
|
||||||
|
|
||||||
|
## Related Projects
|
||||||
|
|
||||||
|
* [notify](https://github.com/rjeczalik/notify)
|
||||||
|
* [fsevents](https://github.com/fsnotify/fsevents)
|
||||||
|
|
||||||
|
|
@ -0,0 +1,42 @@
|
||||||
|
// Copyright 2012 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build !plan9
|
||||||
|
|
||||||
|
package fsnotify_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
|
||||||
|
"github.com/fsnotify/fsnotify"
|
||||||
|
)
|
||||||
|
|
||||||
|
func ExampleNewWatcher() {
|
||||||
|
watcher, err := fsnotify.NewWatcher()
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
defer watcher.Close()
|
||||||
|
|
||||||
|
done := make(chan bool)
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case event := <-watcher.Events:
|
||||||
|
log.Println("event:", event)
|
||||||
|
if event.Op&fsnotify.Write == fsnotify.Write {
|
||||||
|
log.Println("modified file:", event.Name)
|
||||||
|
}
|
||||||
|
case err := <-watcher.Errors:
|
||||||
|
log.Println("error:", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
err = watcher.Add("/tmp/foo")
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
<-done
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,37 @@
|
||||||
|
// Copyright 2010 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build solaris
|
||||||
|
|
||||||
|
package fsnotify
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Watcher watches a set of files, delivering events to a channel.
|
||||||
|
type Watcher struct {
|
||||||
|
Events chan Event
|
||||||
|
Errors chan error
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
|
||||||
|
func NewWatcher() (*Watcher, error) {
|
||||||
|
return nil, errors.New("FEN based watcher not yet supported for fsnotify\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close removes all watches and closes the events channel.
|
||||||
|
func (w *Watcher) Close() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add starts watching the named file or directory (non-recursively).
|
||||||
|
func (w *Watcher) Add(name string) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove stops watching the the named file or directory (non-recursively).
|
||||||
|
func (w *Watcher) Remove(name string) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,62 @@
|
||||||
|
// Copyright 2012 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build !plan9
|
||||||
|
|
||||||
|
// Package fsnotify provides a platform-independent interface for file system notifications.
|
||||||
|
package fsnotify
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Event represents a single file system notification.
|
||||||
|
type Event struct {
|
||||||
|
Name string // Relative path to the file or directory.
|
||||||
|
Op Op // File operation that triggered the event.
|
||||||
|
}
|
||||||
|
|
||||||
|
// Op describes a set of file operations.
|
||||||
|
type Op uint32
|
||||||
|
|
||||||
|
// These are the generalized file operations that can trigger a notification.
|
||||||
|
const (
|
||||||
|
Create Op = 1 << iota
|
||||||
|
Write
|
||||||
|
Remove
|
||||||
|
Rename
|
||||||
|
Chmod
|
||||||
|
)
|
||||||
|
|
||||||
|
func (op Op) String() string {
|
||||||
|
// Use a buffer for efficient string concatenation
|
||||||
|
var buffer bytes.Buffer
|
||||||
|
|
||||||
|
if op&Create == Create {
|
||||||
|
buffer.WriteString("|CREATE")
|
||||||
|
}
|
||||||
|
if op&Remove == Remove {
|
||||||
|
buffer.WriteString("|REMOVE")
|
||||||
|
}
|
||||||
|
if op&Write == Write {
|
||||||
|
buffer.WriteString("|WRITE")
|
||||||
|
}
|
||||||
|
if op&Rename == Rename {
|
||||||
|
buffer.WriteString("|RENAME")
|
||||||
|
}
|
||||||
|
if op&Chmod == Chmod {
|
||||||
|
buffer.WriteString("|CHMOD")
|
||||||
|
}
|
||||||
|
if buffer.Len() == 0 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return buffer.String()[1:] // Strip leading pipe
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns a string representation of the event in the form
|
||||||
|
// "file: REMOVE|WRITE|..."
|
||||||
|
func (e Event) String() string {
|
||||||
|
return fmt.Sprintf("%q: %s", e.Name, e.Op.String())
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,40 @@
|
||||||
|
// Copyright 2016 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build !plan9
|
||||||
|
|
||||||
|
package fsnotify
|
||||||
|
|
||||||
|
import "testing"
|
||||||
|
|
||||||
|
func TestEventStringWithValue(t *testing.T) {
|
||||||
|
for opMask, expectedString := range map[Op]string{
|
||||||
|
Chmod | Create: `"/usr/someFile": CREATE|CHMOD`,
|
||||||
|
Rename: `"/usr/someFile": RENAME`,
|
||||||
|
Remove: `"/usr/someFile": REMOVE`,
|
||||||
|
Write | Chmod: `"/usr/someFile": WRITE|CHMOD`,
|
||||||
|
} {
|
||||||
|
event := Event{Name: "/usr/someFile", Op: opMask}
|
||||||
|
if event.String() != expectedString {
|
||||||
|
t.Fatalf("Expected %s, got: %v", expectedString, event.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEventOpStringWithValue(t *testing.T) {
|
||||||
|
expectedOpString := "WRITE|CHMOD"
|
||||||
|
event := Event{Name: "someFile", Op: Write | Chmod}
|
||||||
|
if event.Op.String() != expectedOpString {
|
||||||
|
t.Fatalf("Expected %s, got: %v", expectedOpString, event.Op.String())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEventOpStringWithNoValue(t *testing.T) {
|
||||||
|
expectedOpString := ""
|
||||||
|
event := Event{Name: "testFile", Op: 0}
|
||||||
|
if event.Op.String() != expectedOpString {
|
||||||
|
t.Fatalf("Expected %s, got: %v", expectedOpString, event.Op.String())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,325 @@
|
||||||
|
// Copyright 2010 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build linux
|
||||||
|
|
||||||
|
package fsnotify
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Watcher watches a set of files, delivering events to a channel.
|
||||||
|
type Watcher struct {
|
||||||
|
Events chan Event
|
||||||
|
Errors chan error
|
||||||
|
mu sync.Mutex // Map access
|
||||||
|
cv *sync.Cond // sync removing on rm_watch with IN_IGNORE
|
||||||
|
fd int
|
||||||
|
poller *fdPoller
|
||||||
|
watches map[string]*watch // Map of inotify watches (key: path)
|
||||||
|
paths map[int]string // Map of watched paths (key: watch descriptor)
|
||||||
|
done chan struct{} // Channel for sending a "quit message" to the reader goroutine
|
||||||
|
doneResp chan struct{} // Channel to respond to Close
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
|
||||||
|
func NewWatcher() (*Watcher, error) {
|
||||||
|
// Create inotify fd
|
||||||
|
fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC)
|
||||||
|
if fd == -1 {
|
||||||
|
return nil, errno
|
||||||
|
}
|
||||||
|
// Create epoll
|
||||||
|
poller, err := newFdPoller(fd)
|
||||||
|
if err != nil {
|
||||||
|
unix.Close(fd)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
w := &Watcher{
|
||||||
|
fd: fd,
|
||||||
|
poller: poller,
|
||||||
|
watches: make(map[string]*watch),
|
||||||
|
paths: make(map[int]string),
|
||||||
|
Events: make(chan Event),
|
||||||
|
Errors: make(chan error),
|
||||||
|
done: make(chan struct{}),
|
||||||
|
doneResp: make(chan struct{}),
|
||||||
|
}
|
||||||
|
w.cv = sync.NewCond(&w.mu)
|
||||||
|
|
||||||
|
go w.readEvents()
|
||||||
|
return w, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Watcher) isClosed() bool {
|
||||||
|
select {
|
||||||
|
case <-w.done:
|
||||||
|
return true
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close removes all watches and closes the events channel.
|
||||||
|
func (w *Watcher) Close() error {
|
||||||
|
if w.isClosed() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send 'close' signal to goroutine, and set the Watcher to closed.
|
||||||
|
close(w.done)
|
||||||
|
|
||||||
|
// Wake up goroutine
|
||||||
|
w.poller.wake()
|
||||||
|
|
||||||
|
// Wait for goroutine to close
|
||||||
|
<-w.doneResp
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add starts watching the named file or directory (non-recursively).
|
||||||
|
func (w *Watcher) Add(name string) error {
|
||||||
|
name = filepath.Clean(name)
|
||||||
|
if w.isClosed() {
|
||||||
|
return errors.New("inotify instance already closed")
|
||||||
|
}
|
||||||
|
|
||||||
|
const agnosticEvents = unix.IN_MOVED_TO | unix.IN_MOVED_FROM |
|
||||||
|
unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY |
|
||||||
|
unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF
|
||||||
|
|
||||||
|
var flags uint32 = agnosticEvents
|
||||||
|
|
||||||
|
w.mu.Lock()
|
||||||
|
watchEntry, found := w.watches[name]
|
||||||
|
w.mu.Unlock()
|
||||||
|
if found {
|
||||||
|
watchEntry.flags |= flags
|
||||||
|
flags |= unix.IN_MASK_ADD
|
||||||
|
}
|
||||||
|
wd, errno := unix.InotifyAddWatch(w.fd, name, flags)
|
||||||
|
if wd == -1 {
|
||||||
|
return errno
|
||||||
|
}
|
||||||
|
|
||||||
|
w.mu.Lock()
|
||||||
|
w.watches[name] = &watch{wd: uint32(wd), flags: flags}
|
||||||
|
w.paths[wd] = name
|
||||||
|
w.mu.Unlock()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove stops watching the named file or directory (non-recursively).
|
||||||
|
func (w *Watcher) Remove(name string) error {
|
||||||
|
name = filepath.Clean(name)
|
||||||
|
|
||||||
|
// Fetch the watch.
|
||||||
|
w.mu.Lock()
|
||||||
|
defer w.mu.Unlock()
|
||||||
|
watch, ok := w.watches[name]
|
||||||
|
|
||||||
|
// Remove it from inotify.
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("can't remove non-existent inotify watch for: %s", name)
|
||||||
|
}
|
||||||
|
// inotify_rm_watch will return EINVAL if the file has been deleted;
|
||||||
|
// the inotify will already have been removed.
|
||||||
|
// watches and pathes are deleted in ignoreLinux() implicitly and asynchronously
|
||||||
|
// by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE
|
||||||
|
// so that EINVAL means that the wd is being rm_watch()ed or its file removed
|
||||||
|
// by another thread and we have not received IN_IGNORE event.
|
||||||
|
success, errno := unix.InotifyRmWatch(w.fd, watch.wd)
|
||||||
|
if success == -1 {
|
||||||
|
// TODO: Perhaps it's not helpful to return an error here in every case.
|
||||||
|
// the only two possible errors are:
|
||||||
|
// EBADF, which happens when w.fd is not a valid file descriptor of any kind.
|
||||||
|
// EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor.
|
||||||
|
// Watch descriptors are invalidated when they are removed explicitly or implicitly;
|
||||||
|
// explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted.
|
||||||
|
return errno
|
||||||
|
}
|
||||||
|
|
||||||
|
// wait until ignoreLinux() deleting maps
|
||||||
|
exists := true
|
||||||
|
for exists {
|
||||||
|
w.cv.Wait()
|
||||||
|
_, exists = w.watches[name]
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type watch struct {
|
||||||
|
wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
|
||||||
|
flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
|
||||||
|
}
|
||||||
|
|
||||||
|
// readEvents reads from the inotify file descriptor, converts the
|
||||||
|
// received events into Event objects and sends them via the Events channel
|
||||||
|
func (w *Watcher) readEvents() {
|
||||||
|
var (
|
||||||
|
buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events
|
||||||
|
n int // Number of bytes read with read()
|
||||||
|
errno error // Syscall errno
|
||||||
|
ok bool // For poller.wait
|
||||||
|
)
|
||||||
|
|
||||||
|
defer close(w.doneResp)
|
||||||
|
defer close(w.Errors)
|
||||||
|
defer close(w.Events)
|
||||||
|
defer unix.Close(w.fd)
|
||||||
|
defer w.poller.close()
|
||||||
|
|
||||||
|
for {
|
||||||
|
// See if we have been closed.
|
||||||
|
if w.isClosed() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ok, errno = w.poller.wait()
|
||||||
|
if errno != nil {
|
||||||
|
select {
|
||||||
|
case w.Errors <- errno:
|
||||||
|
case <-w.done:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
n, errno = unix.Read(w.fd, buf[:])
|
||||||
|
// If a signal interrupted execution, see if we've been asked to close, and try again.
|
||||||
|
// http://man7.org/linux/man-pages/man7/signal.7.html :
|
||||||
|
// "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable"
|
||||||
|
if errno == unix.EINTR {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// unix.Read might have been woken up by Close. If so, we're done.
|
||||||
|
if w.isClosed() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if n < unix.SizeofInotifyEvent {
|
||||||
|
var err error
|
||||||
|
if n == 0 {
|
||||||
|
// If EOF is received. This should really never happen.
|
||||||
|
err = io.EOF
|
||||||
|
} else if n < 0 {
|
||||||
|
// If an error occurred while reading.
|
||||||
|
err = errno
|
||||||
|
} else {
|
||||||
|
// Read was too short.
|
||||||
|
err = errors.New("notify: short read in readEvents()")
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case w.Errors <- err:
|
||||||
|
case <-w.done:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
var offset uint32
|
||||||
|
// We don't know how many events we just read into the buffer
|
||||||
|
// While the offset points to at least one whole event...
|
||||||
|
for offset <= uint32(n-unix.SizeofInotifyEvent) {
|
||||||
|
// Point "raw" to the event in the buffer
|
||||||
|
raw := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset]))
|
||||||
|
|
||||||
|
mask := uint32(raw.Mask)
|
||||||
|
nameLen := uint32(raw.Len)
|
||||||
|
// If the event happened to the watched directory or the watched file, the kernel
|
||||||
|
// doesn't append the filename to the event, but we would like to always fill the
|
||||||
|
// the "Name" field with a valid filename. We retrieve the path of the watch from
|
||||||
|
// the "paths" map.
|
||||||
|
w.mu.Lock()
|
||||||
|
name := w.paths[int(raw.Wd)]
|
||||||
|
w.mu.Unlock()
|
||||||
|
if nameLen > 0 {
|
||||||
|
// Point "bytes" at the first byte of the filename
|
||||||
|
bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))
|
||||||
|
// The filename is padded with NULL bytes. TrimRight() gets rid of those.
|
||||||
|
name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000")
|
||||||
|
}
|
||||||
|
|
||||||
|
event := newEvent(name, mask)
|
||||||
|
|
||||||
|
// Send the events that are not ignored on the events channel
|
||||||
|
if !event.ignoreLinux(w, raw.Wd, mask) {
|
||||||
|
select {
|
||||||
|
case w.Events <- event:
|
||||||
|
case <-w.done:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Move to the next event in the buffer
|
||||||
|
offset += unix.SizeofInotifyEvent + nameLen
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Certain types of events can be "ignored" and not sent over the Events
|
||||||
|
// channel. Such as events marked ignore by the kernel, or MODIFY events
|
||||||
|
// against files that do not exist.
|
||||||
|
func (e *Event) ignoreLinux(w *Watcher, wd int32, mask uint32) bool {
|
||||||
|
// Ignore anything the inotify API says to ignore
|
||||||
|
if mask&unix.IN_IGNORED == unix.IN_IGNORED {
|
||||||
|
w.mu.Lock()
|
||||||
|
defer w.mu.Unlock()
|
||||||
|
name := w.paths[int(wd)]
|
||||||
|
delete(w.paths, int(wd))
|
||||||
|
delete(w.watches, name)
|
||||||
|
w.cv.Broadcast()
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the event is not a DELETE or RENAME, the file must exist.
|
||||||
|
// Otherwise the event is ignored.
|
||||||
|
// *Note*: this was put in place because it was seen that a MODIFY
|
||||||
|
// event was sent after the DELETE. This ignores that MODIFY and
|
||||||
|
// assumes a DELETE will come or has come if the file doesn't exist.
|
||||||
|
if !(e.Op&Remove == Remove || e.Op&Rename == Rename) {
|
||||||
|
_, statErr := os.Lstat(e.Name)
|
||||||
|
return os.IsNotExist(statErr)
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// newEvent returns an platform-independent Event based on an inotify mask.
|
||||||
|
func newEvent(name string, mask uint32) Event {
|
||||||
|
e := Event{Name: name}
|
||||||
|
if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO {
|
||||||
|
e.Op |= Create
|
||||||
|
}
|
||||||
|
if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE {
|
||||||
|
e.Op |= Remove
|
||||||
|
}
|
||||||
|
if mask&unix.IN_MODIFY == unix.IN_MODIFY {
|
||||||
|
e.Op |= Write
|
||||||
|
}
|
||||||
|
if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM {
|
||||||
|
e.Op |= Rename
|
||||||
|
}
|
||||||
|
if mask&unix.IN_ATTRIB == unix.IN_ATTRIB {
|
||||||
|
e.Op |= Chmod
|
||||||
|
}
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,187 @@
|
||||||
|
// Copyright 2015 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build linux
|
||||||
|
|
||||||
|
package fsnotify
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
type fdPoller struct {
|
||||||
|
fd int // File descriptor (as returned by the inotify_init() syscall)
|
||||||
|
epfd int // Epoll file descriptor
|
||||||
|
pipe [2]int // Pipe for waking up
|
||||||
|
}
|
||||||
|
|
||||||
|
func emptyPoller(fd int) *fdPoller {
|
||||||
|
poller := new(fdPoller)
|
||||||
|
poller.fd = fd
|
||||||
|
poller.epfd = -1
|
||||||
|
poller.pipe[0] = -1
|
||||||
|
poller.pipe[1] = -1
|
||||||
|
return poller
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a new inotify poller.
|
||||||
|
// This creates an inotify handler, and an epoll handler.
|
||||||
|
func newFdPoller(fd int) (*fdPoller, error) {
|
||||||
|
var errno error
|
||||||
|
poller := emptyPoller(fd)
|
||||||
|
defer func() {
|
||||||
|
if errno != nil {
|
||||||
|
poller.close()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
poller.fd = fd
|
||||||
|
|
||||||
|
// Create epoll fd
|
||||||
|
poller.epfd, errno = unix.EpollCreate1(0)
|
||||||
|
if poller.epfd == -1 {
|
||||||
|
return nil, errno
|
||||||
|
}
|
||||||
|
// Create pipe; pipe[0] is the read end, pipe[1] the write end.
|
||||||
|
errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK)
|
||||||
|
if errno != nil {
|
||||||
|
return nil, errno
|
||||||
|
}
|
||||||
|
|
||||||
|
// Register inotify fd with epoll
|
||||||
|
event := unix.EpollEvent{
|
||||||
|
Fd: int32(poller.fd),
|
||||||
|
Events: unix.EPOLLIN,
|
||||||
|
}
|
||||||
|
errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.fd, &event)
|
||||||
|
if errno != nil {
|
||||||
|
return nil, errno
|
||||||
|
}
|
||||||
|
|
||||||
|
// Register pipe fd with epoll
|
||||||
|
event = unix.EpollEvent{
|
||||||
|
Fd: int32(poller.pipe[0]),
|
||||||
|
Events: unix.EPOLLIN,
|
||||||
|
}
|
||||||
|
errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.pipe[0], &event)
|
||||||
|
if errno != nil {
|
||||||
|
return nil, errno
|
||||||
|
}
|
||||||
|
|
||||||
|
return poller, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait using epoll.
|
||||||
|
// Returns true if something is ready to be read,
|
||||||
|
// false if there is not.
|
||||||
|
func (poller *fdPoller) wait() (bool, error) {
|
||||||
|
// 3 possible events per fd, and 2 fds, makes a maximum of 6 events.
|
||||||
|
// I don't know whether epoll_wait returns the number of events returned,
|
||||||
|
// or the total number of events ready.
|
||||||
|
// I decided to catch both by making the buffer one larger than the maximum.
|
||||||
|
events := make([]unix.EpollEvent, 7)
|
||||||
|
for {
|
||||||
|
n, errno := unix.EpollWait(poller.epfd, events, -1)
|
||||||
|
if n == -1 {
|
||||||
|
if errno == unix.EINTR {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return false, errno
|
||||||
|
}
|
||||||
|
if n == 0 {
|
||||||
|
// If there are no events, try again.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if n > 6 {
|
||||||
|
// This should never happen. More events were returned than should be possible.
|
||||||
|
return false, errors.New("epoll_wait returned more events than I know what to do with")
|
||||||
|
}
|
||||||
|
ready := events[:n]
|
||||||
|
epollhup := false
|
||||||
|
epollerr := false
|
||||||
|
epollin := false
|
||||||
|
for _, event := range ready {
|
||||||
|
if event.Fd == int32(poller.fd) {
|
||||||
|
if event.Events&unix.EPOLLHUP != 0 {
|
||||||
|
// This should not happen, but if it does, treat it as a wakeup.
|
||||||
|
epollhup = true
|
||||||
|
}
|
||||||
|
if event.Events&unix.EPOLLERR != 0 {
|
||||||
|
// If an error is waiting on the file descriptor, we should pretend
|
||||||
|
// something is ready to read, and let unix.Read pick up the error.
|
||||||
|
epollerr = true
|
||||||
|
}
|
||||||
|
if event.Events&unix.EPOLLIN != 0 {
|
||||||
|
// There is data to read.
|
||||||
|
epollin = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if event.Fd == int32(poller.pipe[0]) {
|
||||||
|
if event.Events&unix.EPOLLHUP != 0 {
|
||||||
|
// Write pipe descriptor was closed, by us. This means we're closing down the
|
||||||
|
// watcher, and we should wake up.
|
||||||
|
}
|
||||||
|
if event.Events&unix.EPOLLERR != 0 {
|
||||||
|
// If an error is waiting on the pipe file descriptor.
|
||||||
|
// This is an absolute mystery, and should never ever happen.
|
||||||
|
return false, errors.New("Error on the pipe descriptor.")
|
||||||
|
}
|
||||||
|
if event.Events&unix.EPOLLIN != 0 {
|
||||||
|
// This is a regular wakeup, so we have to clear the buffer.
|
||||||
|
err := poller.clearWake()
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if epollhup || epollerr || epollin {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close the write end of the poller.
|
||||||
|
func (poller *fdPoller) wake() error {
|
||||||
|
buf := make([]byte, 1)
|
||||||
|
n, errno := unix.Write(poller.pipe[1], buf)
|
||||||
|
if n == -1 {
|
||||||
|
if errno == unix.EAGAIN {
|
||||||
|
// Buffer is full, poller will wake.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return errno
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (poller *fdPoller) clearWake() error {
|
||||||
|
// You have to be woken up a LOT in order to get to 100!
|
||||||
|
buf := make([]byte, 100)
|
||||||
|
n, errno := unix.Read(poller.pipe[0], buf)
|
||||||
|
if n == -1 {
|
||||||
|
if errno == unix.EAGAIN {
|
||||||
|
// Buffer is empty, someone else cleared our wake.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return errno
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close all poller file descriptors, but not the one passed to it.
|
||||||
|
func (poller *fdPoller) close() {
|
||||||
|
if poller.pipe[1] != -1 {
|
||||||
|
unix.Close(poller.pipe[1])
|
||||||
|
}
|
||||||
|
if poller.pipe[0] != -1 {
|
||||||
|
unix.Close(poller.pipe[0])
|
||||||
|
}
|
||||||
|
if poller.epfd != -1 {
|
||||||
|
unix.Close(poller.epfd)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,229 @@
|
||||||
|
// Copyright 2015 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build linux
|
||||||
|
|
||||||
|
package fsnotify
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
type testFd [2]int
|
||||||
|
|
||||||
|
func makeTestFd(t *testing.T) testFd {
|
||||||
|
var tfd testFd
|
||||||
|
errno := unix.Pipe(tfd[:])
|
||||||
|
if errno != nil {
|
||||||
|
t.Fatalf("Failed to create pipe: %v", errno)
|
||||||
|
}
|
||||||
|
return tfd
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tfd testFd) fd() int {
|
||||||
|
return tfd[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tfd testFd) closeWrite(t *testing.T) {
|
||||||
|
errno := unix.Close(tfd[1])
|
||||||
|
if errno != nil {
|
||||||
|
t.Fatalf("Failed to close write end of pipe: %v", errno)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tfd testFd) put(t *testing.T) {
|
||||||
|
buf := make([]byte, 10)
|
||||||
|
_, errno := unix.Write(tfd[1], buf)
|
||||||
|
if errno != nil {
|
||||||
|
t.Fatalf("Failed to write to pipe: %v", errno)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tfd testFd) get(t *testing.T) {
|
||||||
|
buf := make([]byte, 10)
|
||||||
|
_, errno := unix.Read(tfd[0], buf)
|
||||||
|
if errno != nil {
|
||||||
|
t.Fatalf("Failed to read from pipe: %v", errno)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tfd testFd) close() {
|
||||||
|
unix.Close(tfd[1])
|
||||||
|
unix.Close(tfd[0])
|
||||||
|
}
|
||||||
|
|
||||||
|
func makePoller(t *testing.T) (testFd, *fdPoller) {
|
||||||
|
tfd := makeTestFd(t)
|
||||||
|
poller, err := newFdPoller(tfd.fd())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create poller: %v", err)
|
||||||
|
}
|
||||||
|
return tfd, poller
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPollerWithBadFd(t *testing.T) {
|
||||||
|
_, err := newFdPoller(-1)
|
||||||
|
if err != unix.EBADF {
|
||||||
|
t.Fatalf("Expected EBADF, got: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPollerWithData(t *testing.T) {
|
||||||
|
tfd, poller := makePoller(t)
|
||||||
|
defer tfd.close()
|
||||||
|
defer poller.close()
|
||||||
|
|
||||||
|
tfd.put(t)
|
||||||
|
ok, err := poller.wait()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("poller failed: %v", err)
|
||||||
|
}
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("expected poller to return true")
|
||||||
|
}
|
||||||
|
tfd.get(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPollerWithWakeup(t *testing.T) {
|
||||||
|
tfd, poller := makePoller(t)
|
||||||
|
defer tfd.close()
|
||||||
|
defer poller.close()
|
||||||
|
|
||||||
|
err := poller.wake()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("wake failed: %v", err)
|
||||||
|
}
|
||||||
|
ok, err := poller.wait()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("poller failed: %v", err)
|
||||||
|
}
|
||||||
|
if ok {
|
||||||
|
t.Fatalf("expected poller to return false")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPollerWithClose(t *testing.T) {
|
||||||
|
tfd, poller := makePoller(t)
|
||||||
|
defer tfd.close()
|
||||||
|
defer poller.close()
|
||||||
|
|
||||||
|
tfd.closeWrite(t)
|
||||||
|
ok, err := poller.wait()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("poller failed: %v", err)
|
||||||
|
}
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("expected poller to return true")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPollerWithWakeupAndData(t *testing.T) {
|
||||||
|
tfd, poller := makePoller(t)
|
||||||
|
defer tfd.close()
|
||||||
|
defer poller.close()
|
||||||
|
|
||||||
|
tfd.put(t)
|
||||||
|
err := poller.wake()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("wake failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// both data and wakeup
|
||||||
|
ok, err := poller.wait()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("poller failed: %v", err)
|
||||||
|
}
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("expected poller to return true")
|
||||||
|
}
|
||||||
|
|
||||||
|
// data is still in the buffer, wakeup is cleared
|
||||||
|
ok, err = poller.wait()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("poller failed: %v", err)
|
||||||
|
}
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("expected poller to return true")
|
||||||
|
}
|
||||||
|
|
||||||
|
tfd.get(t)
|
||||||
|
// data is gone, only wakeup now
|
||||||
|
err = poller.wake()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("wake failed: %v", err)
|
||||||
|
}
|
||||||
|
ok, err = poller.wait()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("poller failed: %v", err)
|
||||||
|
}
|
||||||
|
if ok {
|
||||||
|
t.Fatalf("expected poller to return false")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPollerConcurrent(t *testing.T) {
|
||||||
|
tfd, poller := makePoller(t)
|
||||||
|
defer tfd.close()
|
||||||
|
defer poller.close()
|
||||||
|
|
||||||
|
oks := make(chan bool)
|
||||||
|
live := make(chan bool)
|
||||||
|
defer close(live)
|
||||||
|
go func() {
|
||||||
|
defer close(oks)
|
||||||
|
for {
|
||||||
|
ok, err := poller.wait()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("poller failed: %v", err)
|
||||||
|
}
|
||||||
|
oks <- ok
|
||||||
|
if !<-live {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Try a write
|
||||||
|
select {
|
||||||
|
case <-time.After(50 * time.Millisecond):
|
||||||
|
case <-oks:
|
||||||
|
t.Fatalf("poller did not wait")
|
||||||
|
}
|
||||||
|
tfd.put(t)
|
||||||
|
if !<-oks {
|
||||||
|
t.Fatalf("expected true")
|
||||||
|
}
|
||||||
|
tfd.get(t)
|
||||||
|
live <- true
|
||||||
|
|
||||||
|
// Try a wakeup
|
||||||
|
select {
|
||||||
|
case <-time.After(50 * time.Millisecond):
|
||||||
|
case <-oks:
|
||||||
|
t.Fatalf("poller did not wait")
|
||||||
|
}
|
||||||
|
err := poller.wake()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("wake failed: %v", err)
|
||||||
|
}
|
||||||
|
if <-oks {
|
||||||
|
t.Fatalf("expected false")
|
||||||
|
}
|
||||||
|
live <- true
|
||||||
|
|
||||||
|
// Try a close
|
||||||
|
select {
|
||||||
|
case <-time.After(50 * time.Millisecond):
|
||||||
|
case <-oks:
|
||||||
|
t.Fatalf("poller did not wait")
|
||||||
|
}
|
||||||
|
tfd.closeWrite(t)
|
||||||
|
if !<-oks {
|
||||||
|
t.Fatalf("expected true")
|
||||||
|
}
|
||||||
|
tfd.get(t)
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,360 @@
|
||||||
|
// Copyright 2015 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build linux
|
||||||
|
|
||||||
|
package fsnotify
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestInotifyCloseRightAway(t *testing.T) {
|
||||||
|
w, err := NewWatcher()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create watcher")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close immediately; it won't even reach the first unix.Read.
|
||||||
|
w.Close()
|
||||||
|
|
||||||
|
// Wait for the close to complete.
|
||||||
|
<-time.After(50 * time.Millisecond)
|
||||||
|
isWatcherReallyClosed(t, w)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInotifyCloseSlightlyLater(t *testing.T) {
|
||||||
|
w, err := NewWatcher()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create watcher")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait until readEvents has reached unix.Read, and Close.
|
||||||
|
<-time.After(50 * time.Millisecond)
|
||||||
|
w.Close()
|
||||||
|
|
||||||
|
// Wait for the close to complete.
|
||||||
|
<-time.After(50 * time.Millisecond)
|
||||||
|
isWatcherReallyClosed(t, w)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInotifyCloseSlightlyLaterWithWatch(t *testing.T) {
|
||||||
|
testDir := tempMkdir(t)
|
||||||
|
defer os.RemoveAll(testDir)
|
||||||
|
|
||||||
|
w, err := NewWatcher()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create watcher")
|
||||||
|
}
|
||||||
|
w.Add(testDir)
|
||||||
|
|
||||||
|
// Wait until readEvents has reached unix.Read, and Close.
|
||||||
|
<-time.After(50 * time.Millisecond)
|
||||||
|
w.Close()
|
||||||
|
|
||||||
|
// Wait for the close to complete.
|
||||||
|
<-time.After(50 * time.Millisecond)
|
||||||
|
isWatcherReallyClosed(t, w)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInotifyCloseAfterRead(t *testing.T) {
|
||||||
|
testDir := tempMkdir(t)
|
||||||
|
defer os.RemoveAll(testDir)
|
||||||
|
|
||||||
|
w, err := NewWatcher()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create watcher")
|
||||||
|
}
|
||||||
|
|
||||||
|
err = w.Add(testDir)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to add .")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate an event.
|
||||||
|
os.Create(filepath.Join(testDir, "somethingSOMETHINGsomethingSOMETHING"))
|
||||||
|
|
||||||
|
// Wait for readEvents to read the event, then close the watcher.
|
||||||
|
<-time.After(50 * time.Millisecond)
|
||||||
|
w.Close()
|
||||||
|
|
||||||
|
// Wait for the close to complete.
|
||||||
|
<-time.After(50 * time.Millisecond)
|
||||||
|
isWatcherReallyClosed(t, w)
|
||||||
|
}
|
||||||
|
|
||||||
|
func isWatcherReallyClosed(t *testing.T, w *Watcher) {
|
||||||
|
select {
|
||||||
|
case err, ok := <-w.Errors:
|
||||||
|
if ok {
|
||||||
|
t.Fatalf("w.Errors is not closed; readEvents is still alive after closing (error: %v)", err)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
t.Fatalf("w.Errors would have blocked; readEvents is still alive!")
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case _, ok := <-w.Events:
|
||||||
|
if ok {
|
||||||
|
t.Fatalf("w.Events is not closed; readEvents is still alive after closing")
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
t.Fatalf("w.Events would have blocked; readEvents is still alive!")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInotifyCloseCreate(t *testing.T) {
|
||||||
|
testDir := tempMkdir(t)
|
||||||
|
defer os.RemoveAll(testDir)
|
||||||
|
|
||||||
|
w, err := NewWatcher()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create watcher: %v", err)
|
||||||
|
}
|
||||||
|
defer w.Close()
|
||||||
|
|
||||||
|
err = w.Add(testDir)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to add testDir: %v", err)
|
||||||
|
}
|
||||||
|
h, err := os.Create(filepath.Join(testDir, "testfile"))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create file in testdir: %v", err)
|
||||||
|
}
|
||||||
|
h.Close()
|
||||||
|
select {
|
||||||
|
case _ = <-w.Events:
|
||||||
|
case err := <-w.Errors:
|
||||||
|
t.Fatalf("Error from watcher: %v", err)
|
||||||
|
case <-time.After(50 * time.Millisecond):
|
||||||
|
t.Fatalf("Took too long to wait for event")
|
||||||
|
}
|
||||||
|
|
||||||
|
// At this point, we've received one event, so the goroutine is ready.
|
||||||
|
// It's also blocking on unix.Read.
|
||||||
|
// Now we try to swap the file descriptor under its nose.
|
||||||
|
w.Close()
|
||||||
|
w, err = NewWatcher()
|
||||||
|
defer w.Close()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create second watcher: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
<-time.After(50 * time.Millisecond)
|
||||||
|
err = w.Add(testDir)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Error adding testDir again: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// This test verifies the watcher can keep up with file creations/deletions
|
||||||
|
// when under load.
|
||||||
|
func TestInotifyStress(t *testing.T) {
|
||||||
|
maxNumToCreate := 1000
|
||||||
|
|
||||||
|
testDir := tempMkdir(t)
|
||||||
|
defer os.RemoveAll(testDir)
|
||||||
|
testFilePrefix := filepath.Join(testDir, "testfile")
|
||||||
|
|
||||||
|
w, err := NewWatcher()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create watcher: %v", err)
|
||||||
|
}
|
||||||
|
defer w.Close()
|
||||||
|
|
||||||
|
err = w.Add(testDir)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to add testDir: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
doneChan := make(chan struct{})
|
||||||
|
// The buffer ensures that the file generation goroutine is never blocked.
|
||||||
|
errChan := make(chan error, 2*maxNumToCreate)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
for i := 0; i < maxNumToCreate; i++ {
|
||||||
|
testFile := fmt.Sprintf("%s%d", testFilePrefix, i)
|
||||||
|
|
||||||
|
handle, err := os.Create(testFile)
|
||||||
|
if err != nil {
|
||||||
|
errChan <- fmt.Errorf("Create failed: %v", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
err = handle.Close()
|
||||||
|
if err != nil {
|
||||||
|
errChan <- fmt.Errorf("Close failed: %v", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we delete a newly created file too quickly, inotify will skip the
|
||||||
|
// create event and only send the delete event.
|
||||||
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
|
||||||
|
for i := 0; i < maxNumToCreate; i++ {
|
||||||
|
testFile := fmt.Sprintf("%s%d", testFilePrefix, i)
|
||||||
|
err = os.Remove(testFile)
|
||||||
|
if err != nil {
|
||||||
|
errChan <- fmt.Errorf("Remove failed: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
close(doneChan)
|
||||||
|
}()
|
||||||
|
|
||||||
|
creates := 0
|
||||||
|
removes := 0
|
||||||
|
|
||||||
|
finished := false
|
||||||
|
after := time.After(10 * time.Second)
|
||||||
|
for !finished {
|
||||||
|
select {
|
||||||
|
case <-after:
|
||||||
|
t.Fatalf("Not done")
|
||||||
|
case <-doneChan:
|
||||||
|
finished = true
|
||||||
|
case err := <-errChan:
|
||||||
|
t.Fatalf("Got an error from file creator goroutine: %v", err)
|
||||||
|
case err := <-w.Errors:
|
||||||
|
t.Fatalf("Got an error from watcher: %v", err)
|
||||||
|
case evt := <-w.Events:
|
||||||
|
if !strings.HasPrefix(evt.Name, testFilePrefix) {
|
||||||
|
t.Fatalf("Got an event for an unknown file: %s", evt.Name)
|
||||||
|
}
|
||||||
|
if evt.Op == Create {
|
||||||
|
creates++
|
||||||
|
}
|
||||||
|
if evt.Op == Remove {
|
||||||
|
removes++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Drain remaining events from channels
|
||||||
|
count := 0
|
||||||
|
for count < 10 {
|
||||||
|
select {
|
||||||
|
case err := <-errChan:
|
||||||
|
t.Fatalf("Got an error from file creator goroutine: %v", err)
|
||||||
|
case err := <-w.Errors:
|
||||||
|
t.Fatalf("Got an error from watcher: %v", err)
|
||||||
|
case evt := <-w.Events:
|
||||||
|
if !strings.HasPrefix(evt.Name, testFilePrefix) {
|
||||||
|
t.Fatalf("Got an event for an unknown file: %s", evt.Name)
|
||||||
|
}
|
||||||
|
if evt.Op == Create {
|
||||||
|
creates++
|
||||||
|
}
|
||||||
|
if evt.Op == Remove {
|
||||||
|
removes++
|
||||||
|
}
|
||||||
|
count = 0
|
||||||
|
default:
|
||||||
|
count++
|
||||||
|
// Give the watcher chances to fill the channels.
|
||||||
|
time.Sleep(time.Millisecond)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if creates-removes > 1 || creates-removes < -1 {
|
||||||
|
t.Fatalf("Creates and removes should not be off by more than one: %d creates, %d removes", creates, removes)
|
||||||
|
}
|
||||||
|
if creates < 50 {
|
||||||
|
t.Fatalf("Expected at least 50 creates, got %d", creates)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInotifyRemoveTwice(t *testing.T) {
|
||||||
|
testDir := tempMkdir(t)
|
||||||
|
defer os.RemoveAll(testDir)
|
||||||
|
testFile := filepath.Join(testDir, "testfile")
|
||||||
|
|
||||||
|
handle, err := os.Create(testFile)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Create failed: %v", err)
|
||||||
|
}
|
||||||
|
handle.Close()
|
||||||
|
|
||||||
|
w, err := NewWatcher()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create watcher: %v", err)
|
||||||
|
}
|
||||||
|
defer w.Close()
|
||||||
|
|
||||||
|
err = w.Add(testFile)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to add testFile: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = os.Remove(testFile)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to remove testFile: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = w.Remove(testFile)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("no error on removing invalid file")
|
||||||
|
}
|
||||||
|
s1 := fmt.Sprintf("%s", err)
|
||||||
|
|
||||||
|
err = w.Remove(testFile)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("no error on removing invalid file")
|
||||||
|
}
|
||||||
|
s2 := fmt.Sprintf("%s", err)
|
||||||
|
|
||||||
|
if s1 != s2 {
|
||||||
|
t.Fatalf("receive different error - %s / %s", s1, s2)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInotifyInnerMapLength(t *testing.T) {
|
||||||
|
testDir := tempMkdir(t)
|
||||||
|
defer os.RemoveAll(testDir)
|
||||||
|
testFile := filepath.Join(testDir, "testfile")
|
||||||
|
|
||||||
|
handle, err := os.Create(testFile)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Create failed: %v", err)
|
||||||
|
}
|
||||||
|
handle.Close()
|
||||||
|
|
||||||
|
w, err := NewWatcher()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create watcher: %v", err)
|
||||||
|
}
|
||||||
|
defer w.Close()
|
||||||
|
|
||||||
|
err = w.Add(testFile)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to add testFile: %v", err)
|
||||||
|
}
|
||||||
|
go func() {
|
||||||
|
for err := range w.Errors {
|
||||||
|
t.Fatalf("error received: %s", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
err = os.Remove(testFile)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to remove testFile: %v", err)
|
||||||
|
}
|
||||||
|
_ = <-w.Events // consume Remove event
|
||||||
|
<-time.After(50 * time.Millisecond) // wait IN_IGNORE propagated
|
||||||
|
|
||||||
|
w.mu.Lock()
|
||||||
|
defer w.mu.Unlock()
|
||||||
|
if len(w.watches) != 0 {
|
||||||
|
t.Fatalf("Expected watches len is 0, but got: %d, %v", len(w.watches), w.watches)
|
||||||
|
}
|
||||||
|
if len(w.paths) != 0 {
|
||||||
|
t.Fatalf("Expected paths len is 0, but got: %d, %v", len(w.paths), w.paths)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,147 @@
|
||||||
|
// Copyright 2016 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package fsnotify
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
// testExchangedataForWatcher tests the watcher with the exchangedata operation on OS X.
|
||||||
|
//
|
||||||
|
// This is widely used for atomic saves on OS X, e.g. TextMate and in Apple's NSDocument.
|
||||||
|
//
|
||||||
|
// See https://developer.apple.com/library/mac/documentation/Darwin/Reference/ManPages/man2/exchangedata.2.html
|
||||||
|
// Also see: https://github.com/textmate/textmate/blob/cd016be29489eba5f3c09b7b70b06da134dda550/Frameworks/io/src/swap_file_data.cc#L20
|
||||||
|
func testExchangedataForWatcher(t *testing.T, watchDir bool) {
|
||||||
|
// Create directory to watch
|
||||||
|
testDir1 := tempMkdir(t)
|
||||||
|
|
||||||
|
// For the intermediate file
|
||||||
|
testDir2 := tempMkdir(t)
|
||||||
|
|
||||||
|
defer os.RemoveAll(testDir1)
|
||||||
|
defer os.RemoveAll(testDir2)
|
||||||
|
|
||||||
|
resolvedFilename := "TestFsnotifyEvents.file"
|
||||||
|
|
||||||
|
// TextMate does:
|
||||||
|
//
|
||||||
|
// 1. exchangedata (intermediate, resolved)
|
||||||
|
// 2. unlink intermediate
|
||||||
|
//
|
||||||
|
// Let's try to simulate that:
|
||||||
|
resolved := filepath.Join(testDir1, resolvedFilename)
|
||||||
|
intermediate := filepath.Join(testDir2, resolvedFilename+"~")
|
||||||
|
|
||||||
|
// Make sure we create the file before we start watching
|
||||||
|
createAndSyncFile(t, resolved)
|
||||||
|
|
||||||
|
watcher := newWatcher(t)
|
||||||
|
|
||||||
|
// Test both variants in isolation
|
||||||
|
if watchDir {
|
||||||
|
addWatch(t, watcher, testDir1)
|
||||||
|
} else {
|
||||||
|
addWatch(t, watcher, resolved)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Receive errors on the error channel on a separate goroutine
|
||||||
|
go func() {
|
||||||
|
for err := range watcher.Errors {
|
||||||
|
t.Fatalf("error received: %s", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Receive events on the event channel on a separate goroutine
|
||||||
|
eventstream := watcher.Events
|
||||||
|
var removeReceived counter
|
||||||
|
var createReceived counter
|
||||||
|
|
||||||
|
done := make(chan bool)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
for event := range eventstream {
|
||||||
|
// Only count relevant events
|
||||||
|
if event.Name == filepath.Clean(resolved) {
|
||||||
|
if event.Op&Remove == Remove {
|
||||||
|
removeReceived.increment()
|
||||||
|
}
|
||||||
|
if event.Op&Create == Create {
|
||||||
|
createReceived.increment()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
t.Logf("event received: %s", event)
|
||||||
|
}
|
||||||
|
done <- true
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Repeat to make sure the watched file/directory "survives" the REMOVE/CREATE loop.
|
||||||
|
for i := 1; i <= 3; i++ {
|
||||||
|
// The intermediate file is created in a folder outside the watcher
|
||||||
|
createAndSyncFile(t, intermediate)
|
||||||
|
|
||||||
|
// 1. Swap
|
||||||
|
if err := unix.Exchangedata(intermediate, resolved, 0); err != nil {
|
||||||
|
t.Fatalf("[%d] exchangedata failed: %s", i, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
time.Sleep(50 * time.Millisecond)
|
||||||
|
|
||||||
|
// 2. Delete the intermediate file
|
||||||
|
err := os.Remove(intermediate)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("[%d] remove %s failed: %s", i, intermediate, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
time.Sleep(50 * time.Millisecond)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// We expect this event to be received almost immediately, but let's wait 500 ms to be sure
|
||||||
|
time.Sleep(500 * time.Millisecond)
|
||||||
|
|
||||||
|
// The events will be (CHMOD + REMOVE + CREATE) X 2. Let's focus on the last two:
|
||||||
|
if removeReceived.value() < 3 {
|
||||||
|
t.Fatal("fsnotify remove events have not been received after 500 ms")
|
||||||
|
}
|
||||||
|
|
||||||
|
if createReceived.value() < 3 {
|
||||||
|
t.Fatal("fsnotify create events have not been received after 500 ms")
|
||||||
|
}
|
||||||
|
|
||||||
|
watcher.Close()
|
||||||
|
t.Log("waiting for the event channel to become closed...")
|
||||||
|
select {
|
||||||
|
case <-done:
|
||||||
|
t.Log("event channel closed")
|
||||||
|
case <-time.After(2 * time.Second):
|
||||||
|
t.Fatal("event stream was not closed after 2 seconds")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestExchangedataInWatchedDir test exchangedata operation on file in watched dir.
|
||||||
|
func TestExchangedataInWatchedDir(t *testing.T) {
|
||||||
|
testExchangedataForWatcher(t, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestExchangedataInWatchedDir test exchangedata operation on watched file.
|
||||||
|
func TestExchangedataInWatchedFile(t *testing.T) {
|
||||||
|
testExchangedataForWatcher(t, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
func createAndSyncFile(t *testing.T, filepath string) {
|
||||||
|
f1, err := os.OpenFile(filepath, os.O_WRONLY|os.O_CREATE, 0666)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("creating %s failed: %s", filepath, err)
|
||||||
|
}
|
||||||
|
f1.Sync()
|
||||||
|
f1.Close()
|
||||||
|
}
|
||||||
File diff suppressed because it is too large
Load Diff
|
|
@ -0,0 +1,503 @@
|
||||||
|
// Copyright 2010 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build freebsd openbsd netbsd dragonfly darwin
|
||||||
|
|
||||||
|
package fsnotify
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Watcher watches a set of files, delivering events to a channel.
|
||||||
|
type Watcher struct {
|
||||||
|
Events chan Event
|
||||||
|
Errors chan error
|
||||||
|
done chan bool // Channel for sending a "quit message" to the reader goroutine
|
||||||
|
|
||||||
|
kq int // File descriptor (as returned by the kqueue() syscall).
|
||||||
|
|
||||||
|
mu sync.Mutex // Protects access to watcher data
|
||||||
|
watches map[string]int // Map of watched file descriptors (key: path).
|
||||||
|
externalWatches map[string]bool // Map of watches added by user of the library.
|
||||||
|
dirFlags map[string]uint32 // Map of watched directories to fflags used in kqueue.
|
||||||
|
paths map[int]pathInfo // Map file descriptors to path names for processing kqueue events.
|
||||||
|
fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events).
|
||||||
|
isClosed bool // Set to true when Close() is first called
|
||||||
|
}
|
||||||
|
|
||||||
|
type pathInfo struct {
|
||||||
|
name string
|
||||||
|
isDir bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
|
||||||
|
func NewWatcher() (*Watcher, error) {
|
||||||
|
kq, err := kqueue()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
w := &Watcher{
|
||||||
|
kq: kq,
|
||||||
|
watches: make(map[string]int),
|
||||||
|
dirFlags: make(map[string]uint32),
|
||||||
|
paths: make(map[int]pathInfo),
|
||||||
|
fileExists: make(map[string]bool),
|
||||||
|
externalWatches: make(map[string]bool),
|
||||||
|
Events: make(chan Event),
|
||||||
|
Errors: make(chan error),
|
||||||
|
done: make(chan bool),
|
||||||
|
}
|
||||||
|
|
||||||
|
go w.readEvents()
|
||||||
|
return w, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close removes all watches and closes the events channel.
|
||||||
|
func (w *Watcher) Close() error {
|
||||||
|
w.mu.Lock()
|
||||||
|
if w.isClosed {
|
||||||
|
w.mu.Unlock()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
w.isClosed = true
|
||||||
|
w.mu.Unlock()
|
||||||
|
|
||||||
|
// copy paths to remove while locked
|
||||||
|
w.mu.Lock()
|
||||||
|
var pathsToRemove = make([]string, 0, len(w.watches))
|
||||||
|
for name := range w.watches {
|
||||||
|
pathsToRemove = append(pathsToRemove, name)
|
||||||
|
}
|
||||||
|
w.mu.Unlock()
|
||||||
|
// unlock before calling Remove, which also locks
|
||||||
|
|
||||||
|
var err error
|
||||||
|
for _, name := range pathsToRemove {
|
||||||
|
if e := w.Remove(name); e != nil && err == nil {
|
||||||
|
err = e
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send "quit" message to the reader goroutine:
|
||||||
|
w.done <- true
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add starts watching the named file or directory (non-recursively).
|
||||||
|
func (w *Watcher) Add(name string) error {
|
||||||
|
w.mu.Lock()
|
||||||
|
w.externalWatches[name] = true
|
||||||
|
w.mu.Unlock()
|
||||||
|
_, err := w.addWatch(name, noteAllEvents)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove stops watching the the named file or directory (non-recursively).
|
||||||
|
func (w *Watcher) Remove(name string) error {
|
||||||
|
name = filepath.Clean(name)
|
||||||
|
w.mu.Lock()
|
||||||
|
watchfd, ok := w.watches[name]
|
||||||
|
w.mu.Unlock()
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("can't remove non-existent kevent watch for: %s", name)
|
||||||
|
}
|
||||||
|
|
||||||
|
const registerRemove = unix.EV_DELETE
|
||||||
|
if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
unix.Close(watchfd)
|
||||||
|
|
||||||
|
w.mu.Lock()
|
||||||
|
isDir := w.paths[watchfd].isDir
|
||||||
|
delete(w.watches, name)
|
||||||
|
delete(w.paths, watchfd)
|
||||||
|
delete(w.dirFlags, name)
|
||||||
|
w.mu.Unlock()
|
||||||
|
|
||||||
|
// Find all watched paths that are in this directory that are not external.
|
||||||
|
if isDir {
|
||||||
|
var pathsToRemove []string
|
||||||
|
w.mu.Lock()
|
||||||
|
for _, path := range w.paths {
|
||||||
|
wdir, _ := filepath.Split(path.name)
|
||||||
|
if filepath.Clean(wdir) == name {
|
||||||
|
if !w.externalWatches[path.name] {
|
||||||
|
pathsToRemove = append(pathsToRemove, path.name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
w.mu.Unlock()
|
||||||
|
for _, name := range pathsToRemove {
|
||||||
|
// Since these are internal, not much sense in propagating error
|
||||||
|
// to the user, as that will just confuse them with an error about
|
||||||
|
// a path they did not explicitly watch themselves.
|
||||||
|
w.Remove(name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE)
|
||||||
|
const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME
|
||||||
|
|
||||||
|
// keventWaitTime to block on each read from kevent
|
||||||
|
var keventWaitTime = durationToTimespec(100 * time.Millisecond)
|
||||||
|
|
||||||
|
// addWatch adds name to the watched file set.
|
||||||
|
// The flags are interpreted as described in kevent(2).
|
||||||
|
// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks.
|
||||||
|
func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
|
||||||
|
var isDir bool
|
||||||
|
// Make ./name and name equivalent
|
||||||
|
name = filepath.Clean(name)
|
||||||
|
|
||||||
|
w.mu.Lock()
|
||||||
|
if w.isClosed {
|
||||||
|
w.mu.Unlock()
|
||||||
|
return "", errors.New("kevent instance already closed")
|
||||||
|
}
|
||||||
|
watchfd, alreadyWatching := w.watches[name]
|
||||||
|
// We already have a watch, but we can still override flags.
|
||||||
|
if alreadyWatching {
|
||||||
|
isDir = w.paths[watchfd].isDir
|
||||||
|
}
|
||||||
|
w.mu.Unlock()
|
||||||
|
|
||||||
|
if !alreadyWatching {
|
||||||
|
fi, err := os.Lstat(name)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Don't watch sockets.
|
||||||
|
if fi.Mode()&os.ModeSocket == os.ModeSocket {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Don't watch named pipes.
|
||||||
|
if fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Follow Symlinks
|
||||||
|
// Unfortunately, Linux can add bogus symlinks to watch list without
|
||||||
|
// issue, and Windows can't do symlinks period (AFAIK). To maintain
|
||||||
|
// consistency, we will act like everything is fine. There will simply
|
||||||
|
// be no file events for broken symlinks.
|
||||||
|
// Hence the returns of nil on errors.
|
||||||
|
if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
|
||||||
|
name, err = filepath.EvalSymlinks(name)
|
||||||
|
if err != nil {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
w.mu.Lock()
|
||||||
|
_, alreadyWatching = w.watches[name]
|
||||||
|
w.mu.Unlock()
|
||||||
|
|
||||||
|
if alreadyWatching {
|
||||||
|
return name, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
fi, err = os.Lstat(name)
|
||||||
|
if err != nil {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
watchfd, err = unix.Open(name, openMode, 0700)
|
||||||
|
if watchfd == -1 {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
isDir = fi.IsDir()
|
||||||
|
}
|
||||||
|
|
||||||
|
const registerAdd = unix.EV_ADD | unix.EV_CLEAR | unix.EV_ENABLE
|
||||||
|
if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil {
|
||||||
|
unix.Close(watchfd)
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !alreadyWatching {
|
||||||
|
w.mu.Lock()
|
||||||
|
w.watches[name] = watchfd
|
||||||
|
w.paths[watchfd] = pathInfo{name: name, isDir: isDir}
|
||||||
|
w.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
if isDir {
|
||||||
|
// Watch the directory if it has not been watched before,
|
||||||
|
// or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles)
|
||||||
|
w.mu.Lock()
|
||||||
|
|
||||||
|
watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE &&
|
||||||
|
(!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE)
|
||||||
|
// Store flags so this watch can be updated later
|
||||||
|
w.dirFlags[name] = flags
|
||||||
|
w.mu.Unlock()
|
||||||
|
|
||||||
|
if watchDir {
|
||||||
|
if err := w.watchDirectoryFiles(name); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return name, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// readEvents reads from kqueue and converts the received kevents into
|
||||||
|
// Event values that it sends down the Events channel.
|
||||||
|
func (w *Watcher) readEvents() {
|
||||||
|
eventBuffer := make([]unix.Kevent_t, 10)
|
||||||
|
|
||||||
|
for {
|
||||||
|
// See if there is a message on the "done" channel
|
||||||
|
select {
|
||||||
|
case <-w.done:
|
||||||
|
err := unix.Close(w.kq)
|
||||||
|
if err != nil {
|
||||||
|
w.Errors <- err
|
||||||
|
}
|
||||||
|
close(w.Events)
|
||||||
|
close(w.Errors)
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get new events
|
||||||
|
kevents, err := read(w.kq, eventBuffer, &keventWaitTime)
|
||||||
|
// EINTR is okay, the syscall was interrupted before timeout expired.
|
||||||
|
if err != nil && err != unix.EINTR {
|
||||||
|
w.Errors <- err
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Flush the events we received to the Events channel
|
||||||
|
for len(kevents) > 0 {
|
||||||
|
kevent := &kevents[0]
|
||||||
|
watchfd := int(kevent.Ident)
|
||||||
|
mask := uint32(kevent.Fflags)
|
||||||
|
w.mu.Lock()
|
||||||
|
path := w.paths[watchfd]
|
||||||
|
w.mu.Unlock()
|
||||||
|
event := newEvent(path.name, mask)
|
||||||
|
|
||||||
|
if path.isDir && !(event.Op&Remove == Remove) {
|
||||||
|
// Double check to make sure the directory exists. This can happen when
|
||||||
|
// we do a rm -fr on a recursively watched folders and we receive a
|
||||||
|
// modification event first but the folder has been deleted and later
|
||||||
|
// receive the delete event
|
||||||
|
if _, err := os.Lstat(event.Name); os.IsNotExist(err) {
|
||||||
|
// mark is as delete event
|
||||||
|
event.Op |= Remove
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if event.Op&Rename == Rename || event.Op&Remove == Remove {
|
||||||
|
w.Remove(event.Name)
|
||||||
|
w.mu.Lock()
|
||||||
|
delete(w.fileExists, event.Name)
|
||||||
|
w.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) {
|
||||||
|
w.sendDirectoryChangeEvents(event.Name)
|
||||||
|
} else {
|
||||||
|
// Send the event on the Events channel
|
||||||
|
w.Events <- event
|
||||||
|
}
|
||||||
|
|
||||||
|
if event.Op&Remove == Remove {
|
||||||
|
// Look for a file that may have overwritten this.
|
||||||
|
// For example, mv f1 f2 will delete f2, then create f2.
|
||||||
|
if path.isDir {
|
||||||
|
fileDir := filepath.Clean(event.Name)
|
||||||
|
w.mu.Lock()
|
||||||
|
_, found := w.watches[fileDir]
|
||||||
|
w.mu.Unlock()
|
||||||
|
if found {
|
||||||
|
// make sure the directory exists before we watch for changes. When we
|
||||||
|
// do a recursive watch and perform rm -fr, the parent directory might
|
||||||
|
// have gone missing, ignore the missing directory and let the
|
||||||
|
// upcoming delete event remove the watch from the parent directory.
|
||||||
|
if _, err := os.Lstat(fileDir); err == nil {
|
||||||
|
w.sendDirectoryChangeEvents(fileDir)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
filePath := filepath.Clean(event.Name)
|
||||||
|
if fileInfo, err := os.Lstat(filePath); err == nil {
|
||||||
|
w.sendFileCreatedEventIfNew(filePath, fileInfo)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Move to next event
|
||||||
|
kevents = kevents[1:]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// newEvent returns an platform-independent Event based on kqueue Fflags.
|
||||||
|
func newEvent(name string, mask uint32) Event {
|
||||||
|
e := Event{Name: name}
|
||||||
|
if mask&unix.NOTE_DELETE == unix.NOTE_DELETE {
|
||||||
|
e.Op |= Remove
|
||||||
|
}
|
||||||
|
if mask&unix.NOTE_WRITE == unix.NOTE_WRITE {
|
||||||
|
e.Op |= Write
|
||||||
|
}
|
||||||
|
if mask&unix.NOTE_RENAME == unix.NOTE_RENAME {
|
||||||
|
e.Op |= Rename
|
||||||
|
}
|
||||||
|
if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB {
|
||||||
|
e.Op |= Chmod
|
||||||
|
}
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
func newCreateEvent(name string) Event {
|
||||||
|
return Event{Name: name, Op: Create}
|
||||||
|
}
|
||||||
|
|
||||||
|
// watchDirectoryFiles to mimic inotify when adding a watch on a directory
|
||||||
|
func (w *Watcher) watchDirectoryFiles(dirPath string) error {
|
||||||
|
// Get all files
|
||||||
|
files, err := ioutil.ReadDir(dirPath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, fileInfo := range files {
|
||||||
|
filePath := filepath.Join(dirPath, fileInfo.Name())
|
||||||
|
filePath, err = w.internalWatch(filePath, fileInfo)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
w.mu.Lock()
|
||||||
|
w.fileExists[filePath] = true
|
||||||
|
w.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// sendDirectoryEvents searches the directory for newly created files
|
||||||
|
// and sends them over the event channel. This functionality is to have
|
||||||
|
// the BSD version of fsnotify match Linux inotify which provides a
|
||||||
|
// create event for files created in a watched directory.
|
||||||
|
func (w *Watcher) sendDirectoryChangeEvents(dirPath string) {
|
||||||
|
// Get all files
|
||||||
|
files, err := ioutil.ReadDir(dirPath)
|
||||||
|
if err != nil {
|
||||||
|
w.Errors <- err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Search for new files
|
||||||
|
for _, fileInfo := range files {
|
||||||
|
filePath := filepath.Join(dirPath, fileInfo.Name())
|
||||||
|
err := w.sendFileCreatedEventIfNew(filePath, fileInfo)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// sendFileCreatedEvent sends a create event if the file isn't already being tracked.
|
||||||
|
func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) {
|
||||||
|
w.mu.Lock()
|
||||||
|
_, doesExist := w.fileExists[filePath]
|
||||||
|
w.mu.Unlock()
|
||||||
|
if !doesExist {
|
||||||
|
// Send create event
|
||||||
|
w.Events <- newCreateEvent(filePath)
|
||||||
|
}
|
||||||
|
|
||||||
|
// like watchDirectoryFiles (but without doing another ReadDir)
|
||||||
|
filePath, err = w.internalWatch(filePath, fileInfo)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
w.mu.Lock()
|
||||||
|
w.fileExists[filePath] = true
|
||||||
|
w.mu.Unlock()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) {
|
||||||
|
if fileInfo.IsDir() {
|
||||||
|
// mimic Linux providing delete events for subdirectories
|
||||||
|
// but preserve the flags used if currently watching subdirectory
|
||||||
|
w.mu.Lock()
|
||||||
|
flags := w.dirFlags[name]
|
||||||
|
w.mu.Unlock()
|
||||||
|
|
||||||
|
flags |= unix.NOTE_DELETE | unix.NOTE_RENAME
|
||||||
|
return w.addWatch(name, flags)
|
||||||
|
}
|
||||||
|
|
||||||
|
// watch file to mimic Linux inotify
|
||||||
|
return w.addWatch(name, noteAllEvents)
|
||||||
|
}
|
||||||
|
|
||||||
|
// kqueue creates a new kernel event queue and returns a descriptor.
|
||||||
|
func kqueue() (kq int, err error) {
|
||||||
|
kq, err = unix.Kqueue()
|
||||||
|
if kq == -1 {
|
||||||
|
return kq, err
|
||||||
|
}
|
||||||
|
return kq, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// register events with the queue
|
||||||
|
func register(kq int, fds []int, flags int, fflags uint32) error {
|
||||||
|
changes := make([]unix.Kevent_t, len(fds))
|
||||||
|
|
||||||
|
for i, fd := range fds {
|
||||||
|
// SetKevent converts int to the platform-specific types:
|
||||||
|
unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags)
|
||||||
|
changes[i].Fflags = fflags
|
||||||
|
}
|
||||||
|
|
||||||
|
// register the events
|
||||||
|
success, err := unix.Kevent(kq, changes, nil, nil)
|
||||||
|
if success == -1 {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// read retrieves pending events, or waits until an event occurs.
|
||||||
|
// A timeout of nil blocks indefinitely, while 0 polls the queue.
|
||||||
|
func read(kq int, events []unix.Kevent_t, timeout *unix.Timespec) ([]unix.Kevent_t, error) {
|
||||||
|
n, err := unix.Kevent(kq, nil, events, timeout)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return events[0:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// durationToTimespec prepares a timeout value
|
||||||
|
func durationToTimespec(d time.Duration) unix.Timespec {
|
||||||
|
return unix.NsecToTimespec(d.Nanoseconds())
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,11 @@
|
||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build freebsd openbsd netbsd dragonfly
|
||||||
|
|
||||||
|
package fsnotify
|
||||||
|
|
||||||
|
import "golang.org/x/sys/unix"
|
||||||
|
|
||||||
|
const openMode = unix.O_NONBLOCK | unix.O_RDONLY
|
||||||
|
|
@ -0,0 +1,12 @@
|
||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build darwin
|
||||||
|
|
||||||
|
package fsnotify
|
||||||
|
|
||||||
|
import "golang.org/x/sys/unix"
|
||||||
|
|
||||||
|
// note: this constant is not defined on BSD
|
||||||
|
const openMode = unix.O_EVTONLY
|
||||||
|
|
@ -0,0 +1,561 @@
|
||||||
|
// Copyright 2011 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build windows
|
||||||
|
|
||||||
|
package fsnotify
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
"sync"
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Watcher watches a set of files, delivering events to a channel.
|
||||||
|
type Watcher struct {
|
||||||
|
Events chan Event
|
||||||
|
Errors chan error
|
||||||
|
isClosed bool // Set to true when Close() is first called
|
||||||
|
mu sync.Mutex // Map access
|
||||||
|
port syscall.Handle // Handle to completion port
|
||||||
|
watches watchMap // Map of watches (key: i-number)
|
||||||
|
input chan *input // Inputs to the reader are sent on this channel
|
||||||
|
quit chan chan<- error
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
|
||||||
|
func NewWatcher() (*Watcher, error) {
|
||||||
|
port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0)
|
||||||
|
if e != nil {
|
||||||
|
return nil, os.NewSyscallError("CreateIoCompletionPort", e)
|
||||||
|
}
|
||||||
|
w := &Watcher{
|
||||||
|
port: port,
|
||||||
|
watches: make(watchMap),
|
||||||
|
input: make(chan *input, 1),
|
||||||
|
Events: make(chan Event, 50),
|
||||||
|
Errors: make(chan error),
|
||||||
|
quit: make(chan chan<- error, 1),
|
||||||
|
}
|
||||||
|
go w.readEvents()
|
||||||
|
return w, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close removes all watches and closes the events channel.
|
||||||
|
func (w *Watcher) Close() error {
|
||||||
|
if w.isClosed {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
w.isClosed = true
|
||||||
|
|
||||||
|
// Send "quit" message to the reader goroutine
|
||||||
|
ch := make(chan error)
|
||||||
|
w.quit <- ch
|
||||||
|
if err := w.wakeupReader(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return <-ch
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add starts watching the named file or directory (non-recursively).
|
||||||
|
func (w *Watcher) Add(name string) error {
|
||||||
|
if w.isClosed {
|
||||||
|
return errors.New("watcher already closed")
|
||||||
|
}
|
||||||
|
in := &input{
|
||||||
|
op: opAddWatch,
|
||||||
|
path: filepath.Clean(name),
|
||||||
|
flags: sysFSALLEVENTS,
|
||||||
|
reply: make(chan error),
|
||||||
|
}
|
||||||
|
w.input <- in
|
||||||
|
if err := w.wakeupReader(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return <-in.reply
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove stops watching the the named file or directory (non-recursively).
|
||||||
|
func (w *Watcher) Remove(name string) error {
|
||||||
|
in := &input{
|
||||||
|
op: opRemoveWatch,
|
||||||
|
path: filepath.Clean(name),
|
||||||
|
reply: make(chan error),
|
||||||
|
}
|
||||||
|
w.input <- in
|
||||||
|
if err := w.wakeupReader(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return <-in.reply
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Options for AddWatch
|
||||||
|
sysFSONESHOT = 0x80000000
|
||||||
|
sysFSONLYDIR = 0x1000000
|
||||||
|
|
||||||
|
// Events
|
||||||
|
sysFSACCESS = 0x1
|
||||||
|
sysFSALLEVENTS = 0xfff
|
||||||
|
sysFSATTRIB = 0x4
|
||||||
|
sysFSCLOSE = 0x18
|
||||||
|
sysFSCREATE = 0x100
|
||||||
|
sysFSDELETE = 0x200
|
||||||
|
sysFSDELETESELF = 0x400
|
||||||
|
sysFSMODIFY = 0x2
|
||||||
|
sysFSMOVE = 0xc0
|
||||||
|
sysFSMOVEDFROM = 0x40
|
||||||
|
sysFSMOVEDTO = 0x80
|
||||||
|
sysFSMOVESELF = 0x800
|
||||||
|
|
||||||
|
// Special events
|
||||||
|
sysFSIGNORED = 0x8000
|
||||||
|
sysFSQOVERFLOW = 0x4000
|
||||||
|
)
|
||||||
|
|
||||||
|
func newEvent(name string, mask uint32) Event {
|
||||||
|
e := Event{Name: name}
|
||||||
|
if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO {
|
||||||
|
e.Op |= Create
|
||||||
|
}
|
||||||
|
if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF {
|
||||||
|
e.Op |= Remove
|
||||||
|
}
|
||||||
|
if mask&sysFSMODIFY == sysFSMODIFY {
|
||||||
|
e.Op |= Write
|
||||||
|
}
|
||||||
|
if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM {
|
||||||
|
e.Op |= Rename
|
||||||
|
}
|
||||||
|
if mask&sysFSATTRIB == sysFSATTRIB {
|
||||||
|
e.Op |= Chmod
|
||||||
|
}
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
opAddWatch = iota
|
||||||
|
opRemoveWatch
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
provisional uint64 = 1 << (32 + iota)
|
||||||
|
)
|
||||||
|
|
||||||
|
type input struct {
|
||||||
|
op int
|
||||||
|
path string
|
||||||
|
flags uint32
|
||||||
|
reply chan error
|
||||||
|
}
|
||||||
|
|
||||||
|
type inode struct {
|
||||||
|
handle syscall.Handle
|
||||||
|
volume uint32
|
||||||
|
index uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
type watch struct {
|
||||||
|
ov syscall.Overlapped
|
||||||
|
ino *inode // i-number
|
||||||
|
path string // Directory path
|
||||||
|
mask uint64 // Directory itself is being watched with these notify flags
|
||||||
|
names map[string]uint64 // Map of names being watched and their notify flags
|
||||||
|
rename string // Remembers the old name while renaming a file
|
||||||
|
buf [4096]byte
|
||||||
|
}
|
||||||
|
|
||||||
|
type indexMap map[uint64]*watch
|
||||||
|
type watchMap map[uint32]indexMap
|
||||||
|
|
||||||
|
func (w *Watcher) wakeupReader() error {
|
||||||
|
e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil)
|
||||||
|
if e != nil {
|
||||||
|
return os.NewSyscallError("PostQueuedCompletionStatus", e)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getDir(pathname string) (dir string, err error) {
|
||||||
|
attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname))
|
||||||
|
if e != nil {
|
||||||
|
return "", os.NewSyscallError("GetFileAttributes", e)
|
||||||
|
}
|
||||||
|
if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 {
|
||||||
|
dir = pathname
|
||||||
|
} else {
|
||||||
|
dir, _ = filepath.Split(pathname)
|
||||||
|
dir = filepath.Clean(dir)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func getIno(path string) (ino *inode, err error) {
|
||||||
|
h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path),
|
||||||
|
syscall.FILE_LIST_DIRECTORY,
|
||||||
|
syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
|
||||||
|
nil, syscall.OPEN_EXISTING,
|
||||||
|
syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0)
|
||||||
|
if e != nil {
|
||||||
|
return nil, os.NewSyscallError("CreateFile", e)
|
||||||
|
}
|
||||||
|
var fi syscall.ByHandleFileInformation
|
||||||
|
if e = syscall.GetFileInformationByHandle(h, &fi); e != nil {
|
||||||
|
syscall.CloseHandle(h)
|
||||||
|
return nil, os.NewSyscallError("GetFileInformationByHandle", e)
|
||||||
|
}
|
||||||
|
ino = &inode{
|
||||||
|
handle: h,
|
||||||
|
volume: fi.VolumeSerialNumber,
|
||||||
|
index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow),
|
||||||
|
}
|
||||||
|
return ino, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Must run within the I/O thread.
|
||||||
|
func (m watchMap) get(ino *inode) *watch {
|
||||||
|
if i := m[ino.volume]; i != nil {
|
||||||
|
return i[ino.index]
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Must run within the I/O thread.
|
||||||
|
func (m watchMap) set(ino *inode, watch *watch) {
|
||||||
|
i := m[ino.volume]
|
||||||
|
if i == nil {
|
||||||
|
i = make(indexMap)
|
||||||
|
m[ino.volume] = i
|
||||||
|
}
|
||||||
|
i[ino.index] = watch
|
||||||
|
}
|
||||||
|
|
||||||
|
// Must run within the I/O thread.
|
||||||
|
func (w *Watcher) addWatch(pathname string, flags uint64) error {
|
||||||
|
dir, err := getDir(pathname)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if flags&sysFSONLYDIR != 0 && pathname != dir {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
ino, err := getIno(dir)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
w.mu.Lock()
|
||||||
|
watchEntry := w.watches.get(ino)
|
||||||
|
w.mu.Unlock()
|
||||||
|
if watchEntry == nil {
|
||||||
|
if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil {
|
||||||
|
syscall.CloseHandle(ino.handle)
|
||||||
|
return os.NewSyscallError("CreateIoCompletionPort", e)
|
||||||
|
}
|
||||||
|
watchEntry = &watch{
|
||||||
|
ino: ino,
|
||||||
|
path: dir,
|
||||||
|
names: make(map[string]uint64),
|
||||||
|
}
|
||||||
|
w.mu.Lock()
|
||||||
|
w.watches.set(ino, watchEntry)
|
||||||
|
w.mu.Unlock()
|
||||||
|
flags |= provisional
|
||||||
|
} else {
|
||||||
|
syscall.CloseHandle(ino.handle)
|
||||||
|
}
|
||||||
|
if pathname == dir {
|
||||||
|
watchEntry.mask |= flags
|
||||||
|
} else {
|
||||||
|
watchEntry.names[filepath.Base(pathname)] |= flags
|
||||||
|
}
|
||||||
|
if err = w.startRead(watchEntry); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if pathname == dir {
|
||||||
|
watchEntry.mask &= ^provisional
|
||||||
|
} else {
|
||||||
|
watchEntry.names[filepath.Base(pathname)] &= ^provisional
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Must run within the I/O thread.
|
||||||
|
func (w *Watcher) remWatch(pathname string) error {
|
||||||
|
dir, err := getDir(pathname)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
ino, err := getIno(dir)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
w.mu.Lock()
|
||||||
|
watch := w.watches.get(ino)
|
||||||
|
w.mu.Unlock()
|
||||||
|
if watch == nil {
|
||||||
|
return fmt.Errorf("can't remove non-existent watch for: %s", pathname)
|
||||||
|
}
|
||||||
|
if pathname == dir {
|
||||||
|
w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
|
||||||
|
watch.mask = 0
|
||||||
|
} else {
|
||||||
|
name := filepath.Base(pathname)
|
||||||
|
w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED)
|
||||||
|
delete(watch.names, name)
|
||||||
|
}
|
||||||
|
return w.startRead(watch)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Must run within the I/O thread.
|
||||||
|
func (w *Watcher) deleteWatch(watch *watch) {
|
||||||
|
for name, mask := range watch.names {
|
||||||
|
if mask&provisional == 0 {
|
||||||
|
w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED)
|
||||||
|
}
|
||||||
|
delete(watch.names, name)
|
||||||
|
}
|
||||||
|
if watch.mask != 0 {
|
||||||
|
if watch.mask&provisional == 0 {
|
||||||
|
w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
|
||||||
|
}
|
||||||
|
watch.mask = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Must run within the I/O thread.
|
||||||
|
func (w *Watcher) startRead(watch *watch) error {
|
||||||
|
if e := syscall.CancelIo(watch.ino.handle); e != nil {
|
||||||
|
w.Errors <- os.NewSyscallError("CancelIo", e)
|
||||||
|
w.deleteWatch(watch)
|
||||||
|
}
|
||||||
|
mask := toWindowsFlags(watch.mask)
|
||||||
|
for _, m := range watch.names {
|
||||||
|
mask |= toWindowsFlags(m)
|
||||||
|
}
|
||||||
|
if mask == 0 {
|
||||||
|
if e := syscall.CloseHandle(watch.ino.handle); e != nil {
|
||||||
|
w.Errors <- os.NewSyscallError("CloseHandle", e)
|
||||||
|
}
|
||||||
|
w.mu.Lock()
|
||||||
|
delete(w.watches[watch.ino.volume], watch.ino.index)
|
||||||
|
w.mu.Unlock()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0],
|
||||||
|
uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0)
|
||||||
|
if e != nil {
|
||||||
|
err := os.NewSyscallError("ReadDirectoryChanges", e)
|
||||||
|
if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 {
|
||||||
|
// Watched directory was probably removed
|
||||||
|
if w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) {
|
||||||
|
if watch.mask&sysFSONESHOT != 0 {
|
||||||
|
watch.mask = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
|
w.deleteWatch(watch)
|
||||||
|
w.startRead(watch)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// readEvents reads from the I/O completion port, converts the
|
||||||
|
// received events into Event objects and sends them via the Events channel.
|
||||||
|
// Entry point to the I/O thread.
|
||||||
|
func (w *Watcher) readEvents() {
|
||||||
|
var (
|
||||||
|
n, key uint32
|
||||||
|
ov *syscall.Overlapped
|
||||||
|
)
|
||||||
|
runtime.LockOSThread()
|
||||||
|
|
||||||
|
for {
|
||||||
|
e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE)
|
||||||
|
watch := (*watch)(unsafe.Pointer(ov))
|
||||||
|
|
||||||
|
if watch == nil {
|
||||||
|
select {
|
||||||
|
case ch := <-w.quit:
|
||||||
|
w.mu.Lock()
|
||||||
|
var indexes []indexMap
|
||||||
|
for _, index := range w.watches {
|
||||||
|
indexes = append(indexes, index)
|
||||||
|
}
|
||||||
|
w.mu.Unlock()
|
||||||
|
for _, index := range indexes {
|
||||||
|
for _, watch := range index {
|
||||||
|
w.deleteWatch(watch)
|
||||||
|
w.startRead(watch)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var err error
|
||||||
|
if e := syscall.CloseHandle(w.port); e != nil {
|
||||||
|
err = os.NewSyscallError("CloseHandle", e)
|
||||||
|
}
|
||||||
|
close(w.Events)
|
||||||
|
close(w.Errors)
|
||||||
|
ch <- err
|
||||||
|
return
|
||||||
|
case in := <-w.input:
|
||||||
|
switch in.op {
|
||||||
|
case opAddWatch:
|
||||||
|
in.reply <- w.addWatch(in.path, uint64(in.flags))
|
||||||
|
case opRemoveWatch:
|
||||||
|
in.reply <- w.remWatch(in.path)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
switch e {
|
||||||
|
case syscall.ERROR_MORE_DATA:
|
||||||
|
if watch == nil {
|
||||||
|
w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer")
|
||||||
|
} else {
|
||||||
|
// The i/o succeeded but the buffer is full.
|
||||||
|
// In theory we should be building up a full packet.
|
||||||
|
// In practice we can get away with just carrying on.
|
||||||
|
n = uint32(unsafe.Sizeof(watch.buf))
|
||||||
|
}
|
||||||
|
case syscall.ERROR_ACCESS_DENIED:
|
||||||
|
// Watched directory was probably removed
|
||||||
|
w.sendEvent(watch.path, watch.mask&sysFSDELETESELF)
|
||||||
|
w.deleteWatch(watch)
|
||||||
|
w.startRead(watch)
|
||||||
|
continue
|
||||||
|
case syscall.ERROR_OPERATION_ABORTED:
|
||||||
|
// CancelIo was called on this handle
|
||||||
|
continue
|
||||||
|
default:
|
||||||
|
w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e)
|
||||||
|
continue
|
||||||
|
case nil:
|
||||||
|
}
|
||||||
|
|
||||||
|
var offset uint32
|
||||||
|
for {
|
||||||
|
if n == 0 {
|
||||||
|
w.Events <- newEvent("", sysFSQOVERFLOW)
|
||||||
|
w.Errors <- errors.New("short read in readEvents()")
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// Point "raw" to the event in the buffer
|
||||||
|
raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset]))
|
||||||
|
buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName))
|
||||||
|
name := syscall.UTF16ToString(buf[:raw.FileNameLength/2])
|
||||||
|
fullname := filepath.Join(watch.path, name)
|
||||||
|
|
||||||
|
var mask uint64
|
||||||
|
switch raw.Action {
|
||||||
|
case syscall.FILE_ACTION_REMOVED:
|
||||||
|
mask = sysFSDELETESELF
|
||||||
|
case syscall.FILE_ACTION_MODIFIED:
|
||||||
|
mask = sysFSMODIFY
|
||||||
|
case syscall.FILE_ACTION_RENAMED_OLD_NAME:
|
||||||
|
watch.rename = name
|
||||||
|
case syscall.FILE_ACTION_RENAMED_NEW_NAME:
|
||||||
|
if watch.names[watch.rename] != 0 {
|
||||||
|
watch.names[name] |= watch.names[watch.rename]
|
||||||
|
delete(watch.names, watch.rename)
|
||||||
|
mask = sysFSMOVESELF
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sendNameEvent := func() {
|
||||||
|
if w.sendEvent(fullname, watch.names[name]&mask) {
|
||||||
|
if watch.names[name]&sysFSONESHOT != 0 {
|
||||||
|
delete(watch.names, name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME {
|
||||||
|
sendNameEvent()
|
||||||
|
}
|
||||||
|
if raw.Action == syscall.FILE_ACTION_REMOVED {
|
||||||
|
w.sendEvent(fullname, watch.names[name]&sysFSIGNORED)
|
||||||
|
delete(watch.names, name)
|
||||||
|
}
|
||||||
|
if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) {
|
||||||
|
if watch.mask&sysFSONESHOT != 0 {
|
||||||
|
watch.mask = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME {
|
||||||
|
fullname = filepath.Join(watch.path, watch.rename)
|
||||||
|
sendNameEvent()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Move to the next event in the buffer
|
||||||
|
if raw.NextEntryOffset == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
offset += raw.NextEntryOffset
|
||||||
|
|
||||||
|
// Error!
|
||||||
|
if offset >= n {
|
||||||
|
w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.")
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := w.startRead(watch); err != nil {
|
||||||
|
w.Errors <- err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Watcher) sendEvent(name string, mask uint64) bool {
|
||||||
|
if mask == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
event := newEvent(name, uint32(mask))
|
||||||
|
select {
|
||||||
|
case ch := <-w.quit:
|
||||||
|
w.quit <- ch
|
||||||
|
case w.Events <- event:
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func toWindowsFlags(mask uint64) uint32 {
|
||||||
|
var m uint32
|
||||||
|
if mask&sysFSACCESS != 0 {
|
||||||
|
m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS
|
||||||
|
}
|
||||||
|
if mask&sysFSMODIFY != 0 {
|
||||||
|
m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE
|
||||||
|
}
|
||||||
|
if mask&sysFSATTRIB != 0 {
|
||||||
|
m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES
|
||||||
|
}
|
||||||
|
if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 {
|
||||||
|
m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME
|
||||||
|
}
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
|
func toFSnotifyFlags(action uint32) uint64 {
|
||||||
|
switch action {
|
||||||
|
case syscall.FILE_ACTION_ADDED:
|
||||||
|
return sysFSCREATE
|
||||||
|
case syscall.FILE_ACTION_REMOVED:
|
||||||
|
return sysFSDELETE
|
||||||
|
case syscall.FILE_ACTION_MODIFIED:
|
||||||
|
return sysFSMODIFY
|
||||||
|
case syscall.FILE_ACTION_RENAMED_OLD_NAME:
|
||||||
|
return sysFSMOVEDFROM
|
||||||
|
case syscall.FILE_ACTION_RENAMED_NEW_NAME:
|
||||||
|
return sysFSMOVEDTO
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,20 @@
|
||||||
|
# OSX leaves these everywhere on SMB shares
|
||||||
|
._*
|
||||||
|
|
||||||
|
# Eclipse files
|
||||||
|
.classpath
|
||||||
|
.project
|
||||||
|
.settings/**
|
||||||
|
|
||||||
|
# Emacs save files
|
||||||
|
*~
|
||||||
|
|
||||||
|
# Vim-related files
|
||||||
|
[._]*.s[a-w][a-z]
|
||||||
|
[._]s[a-w][a-z]
|
||||||
|
*.un~
|
||||||
|
Session.vim
|
||||||
|
.netrwhist
|
||||||
|
|
||||||
|
# Go test binaries
|
||||||
|
*.test
|
||||||
|
|
@ -0,0 +1,7 @@
|
||||||
|
language: go
|
||||||
|
go:
|
||||||
|
- 1.3
|
||||||
|
- 1.4
|
||||||
|
script:
|
||||||
|
- go test
|
||||||
|
- go build
|
||||||
|
|
@ -0,0 +1,50 @@
|
||||||
|
The MIT License (MIT)
|
||||||
|
|
||||||
|
Copyright (c) 2014 Sam Ghods
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
|
||||||
|
|
||||||
|
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are
|
||||||
|
met:
|
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
* Redistributions in binary form must reproduce the above
|
||||||
|
copyright notice, this list of conditions and the following disclaimer
|
||||||
|
in the documentation and/or other materials provided with the
|
||||||
|
distribution.
|
||||||
|
* Neither the name of Google Inc. nor the names of its
|
||||||
|
contributors may be used to endorse or promote products derived from
|
||||||
|
this software without specific prior written permission.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
@ -0,0 +1,121 @@
|
||||||
|
# YAML marshaling and unmarshaling support for Go
|
||||||
|
|
||||||
|
[](https://travis-ci.org/ghodss/yaml)
|
||||||
|
|
||||||
|
## Introduction
|
||||||
|
|
||||||
|
A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs.
|
||||||
|
|
||||||
|
In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/).
|
||||||
|
|
||||||
|
## Compatibility
|
||||||
|
|
||||||
|
This package uses [go-yaml](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility).
|
||||||
|
|
||||||
|
## Caveats
|
||||||
|
|
||||||
|
**Caveat #1:** When using `yaml.Marshal` and `yaml.Unmarshal`, binary data should NOT be preceded with the `!!binary` YAML tag. If you do, go-yaml will convert the binary data from base64 to native binary data, which is not compatible with JSON. You can still use binary in your YAML files though - just store them without the `!!binary` tag and decode the base64 in your code (e.g. in the custom JSON methods `MarshalJSON` and `UnmarshalJSON`). This also has the benefit that your YAML and your JSON binary data will be decoded exactly the same way. As an example:
|
||||||
|
|
||||||
|
```
|
||||||
|
BAD:
|
||||||
|
exampleKey: !!binary gIGC
|
||||||
|
|
||||||
|
GOOD:
|
||||||
|
exampleKey: gIGC
|
||||||
|
... and decode the base64 data in your code.
|
||||||
|
```
|
||||||
|
|
||||||
|
**Caveat #2:** When using `YAMLToJSON` directly, maps with keys that are maps will result in an error since this is not supported by JSON. This error will occur in `Unmarshal` as well since you can't unmarshal map keys anyways since struct fields can't be keys.
|
||||||
|
|
||||||
|
## Installation and usage
|
||||||
|
|
||||||
|
To install, run:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ go get github.com/ghodss/yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
And import using:
|
||||||
|
|
||||||
|
```
|
||||||
|
import "github.com/ghodss/yaml"
|
||||||
|
```
|
||||||
|
|
||||||
|
Usage is very similar to the JSON library:
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/ghodss/yaml"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Person struct {
|
||||||
|
Name string `json:"name"` // Affects YAML field names too.
|
||||||
|
Age int `json:"age"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
// Marshal a Person struct to YAML.
|
||||||
|
p := Person{"John", 30}
|
||||||
|
y, err := yaml.Marshal(p)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("err: %v\n", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
fmt.Println(string(y))
|
||||||
|
/* Output:
|
||||||
|
age: 30
|
||||||
|
name: John
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Unmarshal the YAML back into a Person struct.
|
||||||
|
var p2 Person
|
||||||
|
err = yaml.Unmarshal(y, &p2)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("err: %v\n", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
fmt.Println(p2)
|
||||||
|
/* Output:
|
||||||
|
{John 30}
|
||||||
|
*/
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
`yaml.YAMLToJSON` and `yaml.JSONToYAML` methods are also available:
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/ghodss/yaml"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
j := []byte(`{"name": "John", "age": 30}`)
|
||||||
|
y, err := yaml.JSONToYAML(j)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("err: %v\n", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
fmt.Println(string(y))
|
||||||
|
/* Output:
|
||||||
|
name: John
|
||||||
|
age: 30
|
||||||
|
*/
|
||||||
|
j2, err := yaml.YAMLToJSON(y)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("err: %v\n", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
fmt.Println(string(j2))
|
||||||
|
/* Output:
|
||||||
|
{"age":30,"name":"John"}
|
||||||
|
*/
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
@ -0,0 +1,501 @@
|
||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
package yaml
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding"
|
||||||
|
"encoding/json"
|
||||||
|
"reflect"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"unicode"
|
||||||
|
"unicode/utf8"
|
||||||
|
)
|
||||||
|
|
||||||
|
// indirect walks down v allocating pointers as needed,
|
||||||
|
// until it gets to a non-pointer.
|
||||||
|
// if it encounters an Unmarshaler, indirect stops and returns that.
|
||||||
|
// if decodingNull is true, indirect stops at the last pointer so it can be set to nil.
|
||||||
|
func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) {
|
||||||
|
// If v is a named type and is addressable,
|
||||||
|
// start with its address, so that if the type has pointer methods,
|
||||||
|
// we find them.
|
||||||
|
if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
|
||||||
|
v = v.Addr()
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
// Load value from interface, but only if the result will be
|
||||||
|
// usefully addressable.
|
||||||
|
if v.Kind() == reflect.Interface && !v.IsNil() {
|
||||||
|
e := v.Elem()
|
||||||
|
if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) {
|
||||||
|
v = e
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if v.Kind() != reflect.Ptr {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if v.IsNil() {
|
||||||
|
if v.CanSet() {
|
||||||
|
v.Set(reflect.New(v.Type().Elem()))
|
||||||
|
} else {
|
||||||
|
v = reflect.New(v.Type().Elem())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if v.Type().NumMethod() > 0 {
|
||||||
|
if u, ok := v.Interface().(json.Unmarshaler); ok {
|
||||||
|
return u, nil, reflect.Value{}
|
||||||
|
}
|
||||||
|
if u, ok := v.Interface().(encoding.TextUnmarshaler); ok {
|
||||||
|
return nil, u, reflect.Value{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
v = v.Elem()
|
||||||
|
}
|
||||||
|
return nil, nil, v
|
||||||
|
}
|
||||||
|
|
||||||
|
// A field represents a single field found in a struct.
|
||||||
|
type field struct {
|
||||||
|
name string
|
||||||
|
nameBytes []byte // []byte(name)
|
||||||
|
equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent
|
||||||
|
|
||||||
|
tag bool
|
||||||
|
index []int
|
||||||
|
typ reflect.Type
|
||||||
|
omitEmpty bool
|
||||||
|
quoted bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func fillField(f field) field {
|
||||||
|
f.nameBytes = []byte(f.name)
|
||||||
|
f.equalFold = foldFunc(f.nameBytes)
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
|
||||||
|
// byName sorts field by name, breaking ties with depth,
|
||||||
|
// then breaking ties with "name came from json tag", then
|
||||||
|
// breaking ties with index sequence.
|
||||||
|
type byName []field
|
||||||
|
|
||||||
|
func (x byName) Len() int { return len(x) }
|
||||||
|
|
||||||
|
func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||||
|
|
||||||
|
func (x byName) Less(i, j int) bool {
|
||||||
|
if x[i].name != x[j].name {
|
||||||
|
return x[i].name < x[j].name
|
||||||
|
}
|
||||||
|
if len(x[i].index) != len(x[j].index) {
|
||||||
|
return len(x[i].index) < len(x[j].index)
|
||||||
|
}
|
||||||
|
if x[i].tag != x[j].tag {
|
||||||
|
return x[i].tag
|
||||||
|
}
|
||||||
|
return byIndex(x).Less(i, j)
|
||||||
|
}
|
||||||
|
|
||||||
|
// byIndex sorts field by index sequence.
|
||||||
|
type byIndex []field
|
||||||
|
|
||||||
|
func (x byIndex) Len() int { return len(x) }
|
||||||
|
|
||||||
|
func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||||
|
|
||||||
|
func (x byIndex) Less(i, j int) bool {
|
||||||
|
for k, xik := range x[i].index {
|
||||||
|
if k >= len(x[j].index) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if xik != x[j].index[k] {
|
||||||
|
return xik < x[j].index[k]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return len(x[i].index) < len(x[j].index)
|
||||||
|
}
|
||||||
|
|
||||||
|
// typeFields returns a list of fields that JSON should recognize for the given type.
|
||||||
|
// The algorithm is breadth-first search over the set of structs to include - the top struct
|
||||||
|
// and then any reachable anonymous structs.
|
||||||
|
func typeFields(t reflect.Type) []field {
|
||||||
|
// Anonymous fields to explore at the current level and the next.
|
||||||
|
current := []field{}
|
||||||
|
next := []field{{typ: t}}
|
||||||
|
|
||||||
|
// Count of queued names for current level and the next.
|
||||||
|
count := map[reflect.Type]int{}
|
||||||
|
nextCount := map[reflect.Type]int{}
|
||||||
|
|
||||||
|
// Types already visited at an earlier level.
|
||||||
|
visited := map[reflect.Type]bool{}
|
||||||
|
|
||||||
|
// Fields found.
|
||||||
|
var fields []field
|
||||||
|
|
||||||
|
for len(next) > 0 {
|
||||||
|
current, next = next, current[:0]
|
||||||
|
count, nextCount = nextCount, map[reflect.Type]int{}
|
||||||
|
|
||||||
|
for _, f := range current {
|
||||||
|
if visited[f.typ] {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
visited[f.typ] = true
|
||||||
|
|
||||||
|
// Scan f.typ for fields to include.
|
||||||
|
for i := 0; i < f.typ.NumField(); i++ {
|
||||||
|
sf := f.typ.Field(i)
|
||||||
|
if sf.PkgPath != "" { // unexported
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
tag := sf.Tag.Get("json")
|
||||||
|
if tag == "-" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
name, opts := parseTag(tag)
|
||||||
|
if !isValidTag(name) {
|
||||||
|
name = ""
|
||||||
|
}
|
||||||
|
index := make([]int, len(f.index)+1)
|
||||||
|
copy(index, f.index)
|
||||||
|
index[len(f.index)] = i
|
||||||
|
|
||||||
|
ft := sf.Type
|
||||||
|
if ft.Name() == "" && ft.Kind() == reflect.Ptr {
|
||||||
|
// Follow pointer.
|
||||||
|
ft = ft.Elem()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Record found field and index sequence.
|
||||||
|
if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
|
||||||
|
tagged := name != ""
|
||||||
|
if name == "" {
|
||||||
|
name = sf.Name
|
||||||
|
}
|
||||||
|
fields = append(fields, fillField(field{
|
||||||
|
name: name,
|
||||||
|
tag: tagged,
|
||||||
|
index: index,
|
||||||
|
typ: ft,
|
||||||
|
omitEmpty: opts.Contains("omitempty"),
|
||||||
|
quoted: opts.Contains("string"),
|
||||||
|
}))
|
||||||
|
if count[f.typ] > 1 {
|
||||||
|
// If there were multiple instances, add a second,
|
||||||
|
// so that the annihilation code will see a duplicate.
|
||||||
|
// It only cares about the distinction between 1 or 2,
|
||||||
|
// so don't bother generating any more copies.
|
||||||
|
fields = append(fields, fields[len(fields)-1])
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Record new anonymous struct to explore in next round.
|
||||||
|
nextCount[ft]++
|
||||||
|
if nextCount[ft] == 1 {
|
||||||
|
next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft}))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.Sort(byName(fields))
|
||||||
|
|
||||||
|
// Delete all fields that are hidden by the Go rules for embedded fields,
|
||||||
|
// except that fields with JSON tags are promoted.
|
||||||
|
|
||||||
|
// The fields are sorted in primary order of name, secondary order
|
||||||
|
// of field index length. Loop over names; for each name, delete
|
||||||
|
// hidden fields by choosing the one dominant field that survives.
|
||||||
|
out := fields[:0]
|
||||||
|
for advance, i := 0, 0; i < len(fields); i += advance {
|
||||||
|
// One iteration per name.
|
||||||
|
// Find the sequence of fields with the name of this first field.
|
||||||
|
fi := fields[i]
|
||||||
|
name := fi.name
|
||||||
|
for advance = 1; i+advance < len(fields); advance++ {
|
||||||
|
fj := fields[i+advance]
|
||||||
|
if fj.name != name {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if advance == 1 { // Only one field with this name
|
||||||
|
out = append(out, fi)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
dominant, ok := dominantField(fields[i : i+advance])
|
||||||
|
if ok {
|
||||||
|
out = append(out, dominant)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fields = out
|
||||||
|
sort.Sort(byIndex(fields))
|
||||||
|
|
||||||
|
return fields
|
||||||
|
}
|
||||||
|
|
||||||
|
// dominantField looks through the fields, all of which are known to
|
||||||
|
// have the same name, to find the single field that dominates the
|
||||||
|
// others using Go's embedding rules, modified by the presence of
|
||||||
|
// JSON tags. If there are multiple top-level fields, the boolean
|
||||||
|
// will be false: This condition is an error in Go and we skip all
|
||||||
|
// the fields.
|
||||||
|
func dominantField(fields []field) (field, bool) {
|
||||||
|
// The fields are sorted in increasing index-length order. The winner
|
||||||
|
// must therefore be one with the shortest index length. Drop all
|
||||||
|
// longer entries, which is easy: just truncate the slice.
|
||||||
|
length := len(fields[0].index)
|
||||||
|
tagged := -1 // Index of first tagged field.
|
||||||
|
for i, f := range fields {
|
||||||
|
if len(f.index) > length {
|
||||||
|
fields = fields[:i]
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if f.tag {
|
||||||
|
if tagged >= 0 {
|
||||||
|
// Multiple tagged fields at the same level: conflict.
|
||||||
|
// Return no field.
|
||||||
|
return field{}, false
|
||||||
|
}
|
||||||
|
tagged = i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if tagged >= 0 {
|
||||||
|
return fields[tagged], true
|
||||||
|
}
|
||||||
|
// All remaining fields have the same length. If there's more than one,
|
||||||
|
// we have a conflict (two fields named "X" at the same level) and we
|
||||||
|
// return no field.
|
||||||
|
if len(fields) > 1 {
|
||||||
|
return field{}, false
|
||||||
|
}
|
||||||
|
return fields[0], true
|
||||||
|
}
|
||||||
|
|
||||||
|
var fieldCache struct {
|
||||||
|
sync.RWMutex
|
||||||
|
m map[reflect.Type][]field
|
||||||
|
}
|
||||||
|
|
||||||
|
// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
|
||||||
|
func cachedTypeFields(t reflect.Type) []field {
|
||||||
|
fieldCache.RLock()
|
||||||
|
f := fieldCache.m[t]
|
||||||
|
fieldCache.RUnlock()
|
||||||
|
if f != nil {
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compute fields without lock.
|
||||||
|
// Might duplicate effort but won't hold other computations back.
|
||||||
|
f = typeFields(t)
|
||||||
|
if f == nil {
|
||||||
|
f = []field{}
|
||||||
|
}
|
||||||
|
|
||||||
|
fieldCache.Lock()
|
||||||
|
if fieldCache.m == nil {
|
||||||
|
fieldCache.m = map[reflect.Type][]field{}
|
||||||
|
}
|
||||||
|
fieldCache.m[t] = f
|
||||||
|
fieldCache.Unlock()
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
|
||||||
|
func isValidTag(s string) bool {
|
||||||
|
if s == "" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for _, c := range s {
|
||||||
|
switch {
|
||||||
|
case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
|
||||||
|
// Backslash and quote chars are reserved, but
|
||||||
|
// otherwise any punctuation chars are allowed
|
||||||
|
// in a tag name.
|
||||||
|
default:
|
||||||
|
if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
caseMask = ^byte(0x20) // Mask to ignore case in ASCII.
|
||||||
|
kelvin = '\u212a'
|
||||||
|
smallLongEss = '\u017f'
|
||||||
|
)
|
||||||
|
|
||||||
|
// foldFunc returns one of four different case folding equivalence
|
||||||
|
// functions, from most general (and slow) to fastest:
|
||||||
|
//
|
||||||
|
// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8
|
||||||
|
// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S')
|
||||||
|
// 3) asciiEqualFold, no special, but includes non-letters (including _)
|
||||||
|
// 4) simpleLetterEqualFold, no specials, no non-letters.
|
||||||
|
//
|
||||||
|
// The letters S and K are special because they map to 3 runes, not just 2:
|
||||||
|
// * S maps to s and to U+017F 'ſ' Latin small letter long s
|
||||||
|
// * k maps to K and to U+212A 'K' Kelvin sign
|
||||||
|
// See http://play.golang.org/p/tTxjOc0OGo
|
||||||
|
//
|
||||||
|
// The returned function is specialized for matching against s and
|
||||||
|
// should only be given s. It's not curried for performance reasons.
|
||||||
|
func foldFunc(s []byte) func(s, t []byte) bool {
|
||||||
|
nonLetter := false
|
||||||
|
special := false // special letter
|
||||||
|
for _, b := range s {
|
||||||
|
if b >= utf8.RuneSelf {
|
||||||
|
return bytes.EqualFold
|
||||||
|
}
|
||||||
|
upper := b & caseMask
|
||||||
|
if upper < 'A' || upper > 'Z' {
|
||||||
|
nonLetter = true
|
||||||
|
} else if upper == 'K' || upper == 'S' {
|
||||||
|
// See above for why these letters are special.
|
||||||
|
special = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if special {
|
||||||
|
return equalFoldRight
|
||||||
|
}
|
||||||
|
if nonLetter {
|
||||||
|
return asciiEqualFold
|
||||||
|
}
|
||||||
|
return simpleLetterEqualFold
|
||||||
|
}
|
||||||
|
|
||||||
|
// equalFoldRight is a specialization of bytes.EqualFold when s is
|
||||||
|
// known to be all ASCII (including punctuation), but contains an 's',
|
||||||
|
// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t.
|
||||||
|
// See comments on foldFunc.
|
||||||
|
func equalFoldRight(s, t []byte) bool {
|
||||||
|
for _, sb := range s {
|
||||||
|
if len(t) == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
tb := t[0]
|
||||||
|
if tb < utf8.RuneSelf {
|
||||||
|
if sb != tb {
|
||||||
|
sbUpper := sb & caseMask
|
||||||
|
if 'A' <= sbUpper && sbUpper <= 'Z' {
|
||||||
|
if sbUpper != tb&caseMask {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
t = t[1:]
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// sb is ASCII and t is not. t must be either kelvin
|
||||||
|
// sign or long s; sb must be s, S, k, or K.
|
||||||
|
tr, size := utf8.DecodeRune(t)
|
||||||
|
switch sb {
|
||||||
|
case 's', 'S':
|
||||||
|
if tr != smallLongEss {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
case 'k', 'K':
|
||||||
|
if tr != kelvin {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
t = t[size:]
|
||||||
|
|
||||||
|
}
|
||||||
|
if len(t) > 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// asciiEqualFold is a specialization of bytes.EqualFold for use when
|
||||||
|
// s is all ASCII (but may contain non-letters) and contains no
|
||||||
|
// special-folding letters.
|
||||||
|
// See comments on foldFunc.
|
||||||
|
func asciiEqualFold(s, t []byte) bool {
|
||||||
|
if len(s) != len(t) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for i, sb := range s {
|
||||||
|
tb := t[i]
|
||||||
|
if sb == tb {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') {
|
||||||
|
if sb&caseMask != tb&caseMask {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// simpleLetterEqualFold is a specialization of bytes.EqualFold for
|
||||||
|
// use when s is all ASCII letters (no underscores, etc) and also
|
||||||
|
// doesn't contain 'k', 'K', 's', or 'S'.
|
||||||
|
// See comments on foldFunc.
|
||||||
|
func simpleLetterEqualFold(s, t []byte) bool {
|
||||||
|
if len(s) != len(t) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for i, b := range s {
|
||||||
|
if b&caseMask != t[i]&caseMask {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// tagOptions is the string following a comma in a struct field's "json"
|
||||||
|
// tag, or the empty string. It does not include the leading comma.
|
||||||
|
type tagOptions string
|
||||||
|
|
||||||
|
// parseTag splits a struct field's json tag into its name and
|
||||||
|
// comma-separated options.
|
||||||
|
func parseTag(tag string) (string, tagOptions) {
|
||||||
|
if idx := strings.Index(tag, ","); idx != -1 {
|
||||||
|
return tag[:idx], tagOptions(tag[idx+1:])
|
||||||
|
}
|
||||||
|
return tag, tagOptions("")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Contains reports whether a comma-separated list of options
|
||||||
|
// contains a particular substr flag. substr must be surrounded by a
|
||||||
|
// string boundary or commas.
|
||||||
|
func (o tagOptions) Contains(optionName string) bool {
|
||||||
|
if len(o) == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
s := string(o)
|
||||||
|
for s != "" {
|
||||||
|
var next string
|
||||||
|
i := strings.Index(s, ",")
|
||||||
|
if i >= 0 {
|
||||||
|
s, next = s[:i], s[i+1:]
|
||||||
|
}
|
||||||
|
if s == optionName {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
s = next
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,277 @@
|
||||||
|
package yaml
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"gopkg.in/yaml.v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Marshals the object into JSON then converts JSON to YAML and returns the
|
||||||
|
// YAML.
|
||||||
|
func Marshal(o interface{}) ([]byte, error) {
|
||||||
|
j, err := json.Marshal(o)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error marshaling into JSON: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
y, err := JSONToYAML(j)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error converting JSON to YAML: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return y, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Converts YAML to JSON then uses JSON to unmarshal into an object.
|
||||||
|
func Unmarshal(y []byte, o interface{}) error {
|
||||||
|
vo := reflect.ValueOf(o)
|
||||||
|
j, err := yamlToJSON(y, &vo)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error converting YAML to JSON: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = json.Unmarshal(j, o)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error unmarshaling JSON: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert JSON to YAML.
|
||||||
|
func JSONToYAML(j []byte) ([]byte, error) {
|
||||||
|
// Convert the JSON to an object.
|
||||||
|
var jsonObj interface{}
|
||||||
|
// We are using yaml.Unmarshal here (instead of json.Unmarshal) because the
|
||||||
|
// Go JSON library doesn't try to pick the right number type (int, float,
|
||||||
|
// etc.) when unmarshalling to interface{}, it just picks float64
|
||||||
|
// universally. go-yaml does go through the effort of picking the right
|
||||||
|
// number type, so we can preserve number type throughout this process.
|
||||||
|
err := yaml.Unmarshal(j, &jsonObj)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Marshal this object into YAML.
|
||||||
|
return yaml.Marshal(jsonObj)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert YAML to JSON. Since JSON is a subset of YAML, passing JSON through
|
||||||
|
// this method should be a no-op.
|
||||||
|
//
|
||||||
|
// Things YAML can do that are not supported by JSON:
|
||||||
|
// * In YAML you can have binary and null keys in your maps. These are invalid
|
||||||
|
// in JSON. (int and float keys are converted to strings.)
|
||||||
|
// * Binary data in YAML with the !!binary tag is not supported. If you want to
|
||||||
|
// use binary data with this library, encode the data as base64 as usual but do
|
||||||
|
// not use the !!binary tag in your YAML. This will ensure the original base64
|
||||||
|
// encoded data makes it all the way through to the JSON.
|
||||||
|
func YAMLToJSON(y []byte) ([]byte, error) {
|
||||||
|
return yamlToJSON(y, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func yamlToJSON(y []byte, jsonTarget *reflect.Value) ([]byte, error) {
|
||||||
|
// Convert the YAML to an object.
|
||||||
|
var yamlObj interface{}
|
||||||
|
err := yaml.Unmarshal(y, &yamlObj)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// YAML objects are not completely compatible with JSON objects (e.g. you
|
||||||
|
// can have non-string keys in YAML). So, convert the YAML-compatible object
|
||||||
|
// to a JSON-compatible object, failing with an error if irrecoverable
|
||||||
|
// incompatibilties happen along the way.
|
||||||
|
jsonObj, err := convertToJSONableObject(yamlObj, jsonTarget)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert this object to JSON and return the data.
|
||||||
|
return json.Marshal(jsonObj)
|
||||||
|
}
|
||||||
|
|
||||||
|
func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (interface{}, error) {
|
||||||
|
var err error
|
||||||
|
|
||||||
|
// Resolve jsonTarget to a concrete value (i.e. not a pointer or an
|
||||||
|
// interface). We pass decodingNull as false because we're not actually
|
||||||
|
// decoding into the value, we're just checking if the ultimate target is a
|
||||||
|
// string.
|
||||||
|
if jsonTarget != nil {
|
||||||
|
ju, tu, pv := indirect(*jsonTarget, false)
|
||||||
|
// We have a JSON or Text Umarshaler at this level, so we can't be trying
|
||||||
|
// to decode into a string.
|
||||||
|
if ju != nil || tu != nil {
|
||||||
|
jsonTarget = nil
|
||||||
|
} else {
|
||||||
|
jsonTarget = &pv
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If yamlObj is a number or a boolean, check if jsonTarget is a string -
|
||||||
|
// if so, coerce. Else return normal.
|
||||||
|
// If yamlObj is a map or array, find the field that each key is
|
||||||
|
// unmarshaling to, and when you recurse pass the reflect.Value for that
|
||||||
|
// field back into this function.
|
||||||
|
switch typedYAMLObj := yamlObj.(type) {
|
||||||
|
case map[interface{}]interface{}:
|
||||||
|
// JSON does not support arbitrary keys in a map, so we must convert
|
||||||
|
// these keys to strings.
|
||||||
|
//
|
||||||
|
// From my reading of go-yaml v2 (specifically the resolve function),
|
||||||
|
// keys can only have the types string, int, int64, float64, binary
|
||||||
|
// (unsupported), or null (unsupported).
|
||||||
|
strMap := make(map[string]interface{})
|
||||||
|
for k, v := range typedYAMLObj {
|
||||||
|
// Resolve the key to a string first.
|
||||||
|
var keyString string
|
||||||
|
switch typedKey := k.(type) {
|
||||||
|
case string:
|
||||||
|
keyString = typedKey
|
||||||
|
case int:
|
||||||
|
keyString = strconv.Itoa(typedKey)
|
||||||
|
case int64:
|
||||||
|
// go-yaml will only return an int64 as a key if the system
|
||||||
|
// architecture is 32-bit and the key's value is between 32-bit
|
||||||
|
// and 64-bit. Otherwise the key type will simply be int.
|
||||||
|
keyString = strconv.FormatInt(typedKey, 10)
|
||||||
|
case float64:
|
||||||
|
// Stolen from go-yaml to use the same conversion to string as
|
||||||
|
// the go-yaml library uses to convert float to string when
|
||||||
|
// Marshaling.
|
||||||
|
s := strconv.FormatFloat(typedKey, 'g', -1, 32)
|
||||||
|
switch s {
|
||||||
|
case "+Inf":
|
||||||
|
s = ".inf"
|
||||||
|
case "-Inf":
|
||||||
|
s = "-.inf"
|
||||||
|
case "NaN":
|
||||||
|
s = ".nan"
|
||||||
|
}
|
||||||
|
keyString = s
|
||||||
|
case bool:
|
||||||
|
if typedKey {
|
||||||
|
keyString = "true"
|
||||||
|
} else {
|
||||||
|
keyString = "false"
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("Unsupported map key of type: %s, key: %+#v, value: %+#v",
|
||||||
|
reflect.TypeOf(k), k, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// jsonTarget should be a struct or a map. If it's a struct, find
|
||||||
|
// the field it's going to map to and pass its reflect.Value. If
|
||||||
|
// it's a map, find the element type of the map and pass the
|
||||||
|
// reflect.Value created from that type. If it's neither, just pass
|
||||||
|
// nil - JSON conversion will error for us if it's a real issue.
|
||||||
|
if jsonTarget != nil {
|
||||||
|
t := *jsonTarget
|
||||||
|
if t.Kind() == reflect.Struct {
|
||||||
|
keyBytes := []byte(keyString)
|
||||||
|
// Find the field that the JSON library would use.
|
||||||
|
var f *field
|
||||||
|
fields := cachedTypeFields(t.Type())
|
||||||
|
for i := range fields {
|
||||||
|
ff := &fields[i]
|
||||||
|
if bytes.Equal(ff.nameBytes, keyBytes) {
|
||||||
|
f = ff
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// Do case-insensitive comparison.
|
||||||
|
if f == nil && ff.equalFold(ff.nameBytes, keyBytes) {
|
||||||
|
f = ff
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if f != nil {
|
||||||
|
// Find the reflect.Value of the most preferential
|
||||||
|
// struct field.
|
||||||
|
jtf := t.Field(f.index[0])
|
||||||
|
strMap[keyString], err = convertToJSONableObject(v, &jtf)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
} else if t.Kind() == reflect.Map {
|
||||||
|
// Create a zero value of the map's element type to use as
|
||||||
|
// the JSON target.
|
||||||
|
jtv := reflect.Zero(t.Type().Elem())
|
||||||
|
strMap[keyString], err = convertToJSONableObject(v, &jtv)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
strMap[keyString], err = convertToJSONableObject(v, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return strMap, nil
|
||||||
|
case []interface{}:
|
||||||
|
// We need to recurse into arrays in case there are any
|
||||||
|
// map[interface{}]interface{}'s inside and to convert any
|
||||||
|
// numbers to strings.
|
||||||
|
|
||||||
|
// If jsonTarget is a slice (which it really should be), find the
|
||||||
|
// thing it's going to map to. If it's not a slice, just pass nil
|
||||||
|
// - JSON conversion will error for us if it's a real issue.
|
||||||
|
var jsonSliceElemValue *reflect.Value
|
||||||
|
if jsonTarget != nil {
|
||||||
|
t := *jsonTarget
|
||||||
|
if t.Kind() == reflect.Slice {
|
||||||
|
// By default slices point to nil, but we need a reflect.Value
|
||||||
|
// pointing to a value of the slice type, so we create one here.
|
||||||
|
ev := reflect.Indirect(reflect.New(t.Type().Elem()))
|
||||||
|
jsonSliceElemValue = &ev
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make and use a new array.
|
||||||
|
arr := make([]interface{}, len(typedYAMLObj))
|
||||||
|
for i, v := range typedYAMLObj {
|
||||||
|
arr[i], err = convertToJSONableObject(v, jsonSliceElemValue)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return arr, nil
|
||||||
|
default:
|
||||||
|
// If the target type is a string and the YAML type is a number,
|
||||||
|
// convert the YAML type to a string.
|
||||||
|
if jsonTarget != nil && (*jsonTarget).Kind() == reflect.String {
|
||||||
|
// Based on my reading of go-yaml, it may return int, int64,
|
||||||
|
// float64, or uint64.
|
||||||
|
var s string
|
||||||
|
switch typedVal := typedYAMLObj.(type) {
|
||||||
|
case int:
|
||||||
|
s = strconv.FormatInt(int64(typedVal), 10)
|
||||||
|
case int64:
|
||||||
|
s = strconv.FormatInt(typedVal, 10)
|
||||||
|
case float64:
|
||||||
|
s = strconv.FormatFloat(typedVal, 'g', -1, 32)
|
||||||
|
case uint64:
|
||||||
|
s = strconv.FormatUint(typedVal, 10)
|
||||||
|
case bool:
|
||||||
|
if typedVal {
|
||||||
|
s = "true"
|
||||||
|
} else {
|
||||||
|
s = "false"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(s) > 0 {
|
||||||
|
yamlObj = interface{}(s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return yamlObj, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,287 @@
|
||||||
|
package yaml
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"reflect"
|
||||||
|
"strconv"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
type MarshalTest struct {
|
||||||
|
A string
|
||||||
|
B int64
|
||||||
|
// Would like to test float64, but it's not supported in go-yaml.
|
||||||
|
// (See https://github.com/go-yaml/yaml/issues/83.)
|
||||||
|
C float32
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMarshal(t *testing.T) {
|
||||||
|
f32String := strconv.FormatFloat(math.MaxFloat32, 'g', -1, 32)
|
||||||
|
s := MarshalTest{"a", math.MaxInt64, math.MaxFloat32}
|
||||||
|
e := []byte(fmt.Sprintf("A: a\nB: %d\nC: %s\n", math.MaxInt64, f32String))
|
||||||
|
|
||||||
|
y, err := Marshal(s)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("error marshaling YAML: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !reflect.DeepEqual(y, e) {
|
||||||
|
t.Errorf("marshal YAML was unsuccessful, expected: %#v, got: %#v",
|
||||||
|
string(e), string(y))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type UnmarshalString struct {
|
||||||
|
A string
|
||||||
|
True string
|
||||||
|
}
|
||||||
|
|
||||||
|
type UnmarshalStringMap struct {
|
||||||
|
A map[string]string
|
||||||
|
}
|
||||||
|
|
||||||
|
type UnmarshalNestedString struct {
|
||||||
|
A NestedString
|
||||||
|
}
|
||||||
|
|
||||||
|
type NestedString struct {
|
||||||
|
A string
|
||||||
|
}
|
||||||
|
|
||||||
|
type UnmarshalSlice struct {
|
||||||
|
A []NestedSlice
|
||||||
|
}
|
||||||
|
|
||||||
|
type NestedSlice struct {
|
||||||
|
B string
|
||||||
|
C *string
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnmarshal(t *testing.T) {
|
||||||
|
y := []byte("a: 1")
|
||||||
|
s1 := UnmarshalString{}
|
||||||
|
e1 := UnmarshalString{A: "1"}
|
||||||
|
unmarshal(t, y, &s1, &e1)
|
||||||
|
|
||||||
|
y = []byte("a: true")
|
||||||
|
s1 = UnmarshalString{}
|
||||||
|
e1 = UnmarshalString{A: "true"}
|
||||||
|
unmarshal(t, y, &s1, &e1)
|
||||||
|
|
||||||
|
y = []byte("true: 1")
|
||||||
|
s1 = UnmarshalString{}
|
||||||
|
e1 = UnmarshalString{True: "1"}
|
||||||
|
unmarshal(t, y, &s1, &e1)
|
||||||
|
|
||||||
|
y = []byte("a:\n a: 1")
|
||||||
|
s2 := UnmarshalNestedString{}
|
||||||
|
e2 := UnmarshalNestedString{NestedString{"1"}}
|
||||||
|
unmarshal(t, y, &s2, &e2)
|
||||||
|
|
||||||
|
y = []byte("a:\n - b: abc\n c: def\n - b: 123\n c: 456\n")
|
||||||
|
s3 := UnmarshalSlice{}
|
||||||
|
e3 := UnmarshalSlice{[]NestedSlice{NestedSlice{"abc", strPtr("def")}, NestedSlice{"123", strPtr("456")}}}
|
||||||
|
unmarshal(t, y, &s3, &e3)
|
||||||
|
|
||||||
|
y = []byte("a:\n b: 1")
|
||||||
|
s4 := UnmarshalStringMap{}
|
||||||
|
e4 := UnmarshalStringMap{map[string]string{"b": "1"}}
|
||||||
|
unmarshal(t, y, &s4, &e4)
|
||||||
|
|
||||||
|
y = []byte(`
|
||||||
|
a:
|
||||||
|
name: TestA
|
||||||
|
b:
|
||||||
|
name: TestB
|
||||||
|
`)
|
||||||
|
type NamedThing struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
}
|
||||||
|
s5 := map[string]*NamedThing{}
|
||||||
|
e5 := map[string]*NamedThing{
|
||||||
|
"a": &NamedThing{Name: "TestA"},
|
||||||
|
"b": &NamedThing{Name: "TestB"},
|
||||||
|
}
|
||||||
|
unmarshal(t, y, &s5, &e5)
|
||||||
|
}
|
||||||
|
|
||||||
|
func unmarshal(t *testing.T, y []byte, s, e interface{}) {
|
||||||
|
err := Unmarshal(y, s)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("error unmarshaling YAML: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !reflect.DeepEqual(s, e) {
|
||||||
|
t.Errorf("unmarshal YAML was unsuccessful, expected: %+#v, got: %+#v",
|
||||||
|
e, s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type Case struct {
|
||||||
|
input string
|
||||||
|
output string
|
||||||
|
// By default we test that reversing the output == input. But if there is a
|
||||||
|
// difference in the reversed output, you can optionally specify it here.
|
||||||
|
reverse *string
|
||||||
|
}
|
||||||
|
|
||||||
|
type RunType int
|
||||||
|
|
||||||
|
const (
|
||||||
|
RunTypeJSONToYAML RunType = iota
|
||||||
|
RunTypeYAMLToJSON
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestJSONToYAML(t *testing.T) {
|
||||||
|
cases := []Case{
|
||||||
|
{
|
||||||
|
`{"t":"a"}`,
|
||||||
|
"t: a\n",
|
||||||
|
nil,
|
||||||
|
}, {
|
||||||
|
`{"t":null}`,
|
||||||
|
"t: null\n",
|
||||||
|
nil,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
runCases(t, RunTypeJSONToYAML, cases)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestYAMLToJSON(t *testing.T) {
|
||||||
|
cases := []Case{
|
||||||
|
{
|
||||||
|
"t: a\n",
|
||||||
|
`{"t":"a"}`,
|
||||||
|
nil,
|
||||||
|
}, {
|
||||||
|
"t: \n",
|
||||||
|
`{"t":null}`,
|
||||||
|
strPtr("t: null\n"),
|
||||||
|
}, {
|
||||||
|
"t: null\n",
|
||||||
|
`{"t":null}`,
|
||||||
|
nil,
|
||||||
|
}, {
|
||||||
|
"1: a\n",
|
||||||
|
`{"1":"a"}`,
|
||||||
|
strPtr("\"1\": a\n"),
|
||||||
|
}, {
|
||||||
|
"1000000000000000000000000000000000000: a\n",
|
||||||
|
`{"1e+36":"a"}`,
|
||||||
|
strPtr("\"1e+36\": a\n"),
|
||||||
|
}, {
|
||||||
|
"1e+36: a\n",
|
||||||
|
`{"1e+36":"a"}`,
|
||||||
|
strPtr("\"1e+36\": a\n"),
|
||||||
|
}, {
|
||||||
|
"\"1e+36\": a\n",
|
||||||
|
`{"1e+36":"a"}`,
|
||||||
|
nil,
|
||||||
|
}, {
|
||||||
|
"\"1.2\": a\n",
|
||||||
|
`{"1.2":"a"}`,
|
||||||
|
nil,
|
||||||
|
}, {
|
||||||
|
"- t: a\n",
|
||||||
|
`[{"t":"a"}]`,
|
||||||
|
nil,
|
||||||
|
}, {
|
||||||
|
"- t: a\n" +
|
||||||
|
"- t:\n" +
|
||||||
|
" b: 1\n" +
|
||||||
|
" c: 2\n",
|
||||||
|
`[{"t":"a"},{"t":{"b":1,"c":2}}]`,
|
||||||
|
nil,
|
||||||
|
}, {
|
||||||
|
`[{t: a}, {t: {b: 1, c: 2}}]`,
|
||||||
|
`[{"t":"a"},{"t":{"b":1,"c":2}}]`,
|
||||||
|
strPtr("- t: a\n" +
|
||||||
|
"- t:\n" +
|
||||||
|
" b: 1\n" +
|
||||||
|
" c: 2\n"),
|
||||||
|
}, {
|
||||||
|
"- t: \n",
|
||||||
|
`[{"t":null}]`,
|
||||||
|
strPtr("- t: null\n"),
|
||||||
|
}, {
|
||||||
|
"- t: null\n",
|
||||||
|
`[{"t":null}]`,
|
||||||
|
nil,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cases that should produce errors.
|
||||||
|
_ = []Case{
|
||||||
|
{
|
||||||
|
"~: a",
|
||||||
|
`{"null":"a"}`,
|
||||||
|
nil,
|
||||||
|
}, {
|
||||||
|
"a: !!binary gIGC\n",
|
||||||
|
"{\"a\":\"\x80\x81\x82\"}",
|
||||||
|
nil,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
runCases(t, RunTypeYAMLToJSON, cases)
|
||||||
|
}
|
||||||
|
|
||||||
|
func runCases(t *testing.T, runType RunType, cases []Case) {
|
||||||
|
var f func([]byte) ([]byte, error)
|
||||||
|
var invF func([]byte) ([]byte, error)
|
||||||
|
var msg string
|
||||||
|
var invMsg string
|
||||||
|
if runType == RunTypeJSONToYAML {
|
||||||
|
f = JSONToYAML
|
||||||
|
invF = YAMLToJSON
|
||||||
|
msg = "JSON to YAML"
|
||||||
|
invMsg = "YAML back to JSON"
|
||||||
|
} else {
|
||||||
|
f = YAMLToJSON
|
||||||
|
invF = JSONToYAML
|
||||||
|
msg = "YAML to JSON"
|
||||||
|
invMsg = "JSON back to YAML"
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, c := range cases {
|
||||||
|
// Convert the string.
|
||||||
|
t.Logf("converting %s\n", c.input)
|
||||||
|
output, err := f([]byte(c.input))
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to convert %s, input: `%s`, err: %v", msg, c.input, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check it against the expected output.
|
||||||
|
if string(output) != c.output {
|
||||||
|
t.Errorf("Failed to convert %s, input: `%s`, expected `%s`, got `%s`",
|
||||||
|
msg, c.input, c.output, string(output))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the string that we will compare the reversed output to.
|
||||||
|
reverse := c.input
|
||||||
|
// If a special reverse string was specified, use that instead.
|
||||||
|
if c.reverse != nil {
|
||||||
|
reverse = *c.reverse
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reverse the output.
|
||||||
|
input, err := invF(output)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to convert %s, input: `%s`, err: %v", invMsg, string(output), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check the reverse is equal to the input (or to *c.reverse).
|
||||||
|
if string(input) != reverse {
|
||||||
|
t.Errorf("Failed to convert %s, input: `%s`, expected `%s`, got `%s`",
|
||||||
|
invMsg, string(output), reverse, string(input))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// To be able to easily fill in the *Case.reverse string above.
|
||||||
|
func strPtr(s string) *string {
|
||||||
|
return &s
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,16 @@
|
||||||
|
.DS_Store
|
||||||
|
*.[568ao]
|
||||||
|
*.ao
|
||||||
|
*.so
|
||||||
|
*.pyc
|
||||||
|
._*
|
||||||
|
.nfs.*
|
||||||
|
[568a].out
|
||||||
|
*~
|
||||||
|
*.orig
|
||||||
|
core
|
||||||
|
_obj
|
||||||
|
_test
|
||||||
|
_testmain.go
|
||||||
|
protoc-gen-go/testdata/multi/*.pb.go
|
||||||
|
_conformance/_conformance
|
||||||
|
|
@ -0,0 +1,18 @@
|
||||||
|
sudo: false
|
||||||
|
language: go
|
||||||
|
go:
|
||||||
|
- 1.6.x
|
||||||
|
- 1.7.x
|
||||||
|
- 1.8.x
|
||||||
|
- 1.9.x
|
||||||
|
|
||||||
|
install:
|
||||||
|
- go get -v -d -t github.com/golang/protobuf/...
|
||||||
|
- curl -L https://github.com/google/protobuf/releases/download/v3.3.0/protoc-3.3.0-linux-x86_64.zip -o /tmp/protoc.zip
|
||||||
|
- unzip /tmp/protoc.zip -d $HOME/protoc
|
||||||
|
|
||||||
|
env:
|
||||||
|
- PATH=$HOME/protoc/bin:$PATH
|
||||||
|
|
||||||
|
script:
|
||||||
|
- make all test
|
||||||
|
|
@ -0,0 +1,3 @@
|
||||||
|
# This source code refers to The Go Authors for copyright purposes.
|
||||||
|
# The master list of authors is in the main Go distribution,
|
||||||
|
# visible at http://tip.golang.org/AUTHORS.
|
||||||
|
|
@ -0,0 +1,3 @@
|
||||||
|
# This source code was written by the Go contributors.
|
||||||
|
# The master list of contributors is in the main Go distribution,
|
||||||
|
# visible at http://tip.golang.org/CONTRIBUTORS.
|
||||||
|
|
@ -0,0 +1,31 @@
|
||||||
|
Go support for Protocol Buffers - Google's data interchange format
|
||||||
|
|
||||||
|
Copyright 2010 The Go Authors. All rights reserved.
|
||||||
|
https://github.com/golang/protobuf
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are
|
||||||
|
met:
|
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
* Redistributions in binary form must reproduce the above
|
||||||
|
copyright notice, this list of conditions and the following disclaimer
|
||||||
|
in the documentation and/or other materials provided with the
|
||||||
|
distribution.
|
||||||
|
* Neither the name of Google Inc. nor the names of its
|
||||||
|
contributors may be used to endorse or promote products derived from
|
||||||
|
this software without specific prior written permission.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
|
@ -0,0 +1,40 @@
|
||||||
|
# Go support for Protocol Buffers - Google's data interchange format
|
||||||
|
#
|
||||||
|
# Copyright 2010 The Go Authors. All rights reserved.
|
||||||
|
# https://github.com/golang/protobuf
|
||||||
|
#
|
||||||
|
# Redistribution and use in source and binary forms, with or without
|
||||||
|
# modification, are permitted provided that the following conditions are
|
||||||
|
# met:
|
||||||
|
#
|
||||||
|
# * Redistributions of source code must retain the above copyright
|
||||||
|
# notice, this list of conditions and the following disclaimer.
|
||||||
|
# * Redistributions in binary form must reproduce the above
|
||||||
|
# copyright notice, this list of conditions and the following disclaimer
|
||||||
|
# in the documentation and/or other materials provided with the
|
||||||
|
# distribution.
|
||||||
|
# * Neither the name of Google Inc. nor the names of its
|
||||||
|
# contributors may be used to endorse or promote products derived from
|
||||||
|
# this software without specific prior written permission.
|
||||||
|
#
|
||||||
|
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
# Includable Makefile to add a rule for generating .pb.go files from .proto files
|
||||||
|
# (Google protocol buffer descriptions).
|
||||||
|
# Typical use if myproto.proto is a file in package mypackage in this directory:
|
||||||
|
#
|
||||||
|
# include $(GOROOT)/src/pkg/github.com/golang/protobuf/Make.protobuf
|
||||||
|
|
||||||
|
%.pb.go: %.proto
|
||||||
|
protoc --go_out=. $<
|
||||||
|
|
||||||
|
|
@ -0,0 +1,55 @@
|
||||||
|
# Go support for Protocol Buffers - Google's data interchange format
|
||||||
|
#
|
||||||
|
# Copyright 2010 The Go Authors. All rights reserved.
|
||||||
|
# https://github.com/golang/protobuf
|
||||||
|
#
|
||||||
|
# Redistribution and use in source and binary forms, with or without
|
||||||
|
# modification, are permitted provided that the following conditions are
|
||||||
|
# met:
|
||||||
|
#
|
||||||
|
# * Redistributions of source code must retain the above copyright
|
||||||
|
# notice, this list of conditions and the following disclaimer.
|
||||||
|
# * Redistributions in binary form must reproduce the above
|
||||||
|
# copyright notice, this list of conditions and the following disclaimer
|
||||||
|
# in the documentation and/or other materials provided with the
|
||||||
|
# distribution.
|
||||||
|
# * Neither the name of Google Inc. nor the names of its
|
||||||
|
# contributors may be used to endorse or promote products derived from
|
||||||
|
# this software without specific prior written permission.
|
||||||
|
#
|
||||||
|
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
|
||||||
|
all: install
|
||||||
|
|
||||||
|
install:
|
||||||
|
go install ./proto ./jsonpb ./ptypes
|
||||||
|
go install ./protoc-gen-go
|
||||||
|
|
||||||
|
test:
|
||||||
|
go test ./proto ./jsonpb ./ptypes
|
||||||
|
make -C protoc-gen-go/testdata test
|
||||||
|
|
||||||
|
clean:
|
||||||
|
go clean ./...
|
||||||
|
|
||||||
|
nuke:
|
||||||
|
go clean -i ./...
|
||||||
|
|
||||||
|
regenerate:
|
||||||
|
make -C protoc-gen-go/descriptor regenerate
|
||||||
|
make -C protoc-gen-go/plugin regenerate
|
||||||
|
make -C protoc-gen-go/testdata regenerate
|
||||||
|
make -C proto/testdata regenerate
|
||||||
|
make -C jsonpb/jsonpb_test_proto regenerate
|
||||||
|
make -C _conformance regenerate
|
||||||
|
|
@ -0,0 +1,244 @@
|
||||||
|
# Go support for Protocol Buffers
|
||||||
|
|
||||||
|
[](https://travis-ci.org/golang/protobuf)
|
||||||
|
[](https://godoc.org/github.com/golang/protobuf)
|
||||||
|
|
||||||
|
Google's data interchange format.
|
||||||
|
Copyright 2010 The Go Authors.
|
||||||
|
https://github.com/golang/protobuf
|
||||||
|
|
||||||
|
This package and the code it generates requires at least Go 1.4.
|
||||||
|
|
||||||
|
This software implements Go bindings for protocol buffers. For
|
||||||
|
information about protocol buffers themselves, see
|
||||||
|
https://developers.google.com/protocol-buffers/
|
||||||
|
|
||||||
|
## Installation ##
|
||||||
|
|
||||||
|
To use this software, you must:
|
||||||
|
- Install the standard C++ implementation of protocol buffers from
|
||||||
|
https://developers.google.com/protocol-buffers/
|
||||||
|
- Of course, install the Go compiler and tools from
|
||||||
|
https://golang.org/
|
||||||
|
See
|
||||||
|
https://golang.org/doc/install
|
||||||
|
for details or, if you are using gccgo, follow the instructions at
|
||||||
|
https://golang.org/doc/install/gccgo
|
||||||
|
- Grab the code from the repository and install the proto package.
|
||||||
|
The simplest way is to run `go get -u github.com/golang/protobuf/protoc-gen-go`.
|
||||||
|
The compiler plugin, protoc-gen-go, will be installed in $GOBIN,
|
||||||
|
defaulting to $GOPATH/bin. It must be in your $PATH for the protocol
|
||||||
|
compiler, protoc, to find it.
|
||||||
|
|
||||||
|
This software has two parts: a 'protocol compiler plugin' that
|
||||||
|
generates Go source files that, once compiled, can access and manage
|
||||||
|
protocol buffers; and a library that implements run-time support for
|
||||||
|
encoding (marshaling), decoding (unmarshaling), and accessing protocol
|
||||||
|
buffers.
|
||||||
|
|
||||||
|
There is support for gRPC in Go using protocol buffers.
|
||||||
|
See the note at the bottom of this file for details.
|
||||||
|
|
||||||
|
There are no insertion points in the plugin.
|
||||||
|
|
||||||
|
|
||||||
|
## Using protocol buffers with Go ##
|
||||||
|
|
||||||
|
Once the software is installed, there are two steps to using it.
|
||||||
|
First you must compile the protocol buffer definitions and then import
|
||||||
|
them, with the support library, into your program.
|
||||||
|
|
||||||
|
To compile the protocol buffer definition, run protoc with the --go_out
|
||||||
|
parameter set to the directory you want to output the Go code to.
|
||||||
|
|
||||||
|
protoc --go_out=. *.proto
|
||||||
|
|
||||||
|
The generated files will be suffixed .pb.go. See the Test code below
|
||||||
|
for an example using such a file.
|
||||||
|
|
||||||
|
|
||||||
|
The package comment for the proto library contains text describing
|
||||||
|
the interface provided in Go for protocol buffers. Here is an edited
|
||||||
|
version.
|
||||||
|
|
||||||
|
==========
|
||||||
|
|
||||||
|
The proto package converts data structures to and from the
|
||||||
|
wire format of protocol buffers. It works in concert with the
|
||||||
|
Go source code generated for .proto files by the protocol compiler.
|
||||||
|
|
||||||
|
A summary of the properties of the protocol buffer interface
|
||||||
|
for a protocol buffer variable v:
|
||||||
|
|
||||||
|
- Names are turned from camel_case to CamelCase for export.
|
||||||
|
- There are no methods on v to set fields; just treat
|
||||||
|
them as structure fields.
|
||||||
|
- There are getters that return a field's value if set,
|
||||||
|
and return the field's default value if unset.
|
||||||
|
The getters work even if the receiver is a nil message.
|
||||||
|
- The zero value for a struct is its correct initialization state.
|
||||||
|
All desired fields must be set before marshaling.
|
||||||
|
- A Reset() method will restore a protobuf struct to its zero state.
|
||||||
|
- Non-repeated fields are pointers to the values; nil means unset.
|
||||||
|
That is, optional or required field int32 f becomes F *int32.
|
||||||
|
- Repeated fields are slices.
|
||||||
|
- Helper functions are available to aid the setting of fields.
|
||||||
|
Helpers for getting values are superseded by the
|
||||||
|
GetFoo methods and their use is deprecated.
|
||||||
|
msg.Foo = proto.String("hello") // set field
|
||||||
|
- Constants are defined to hold the default values of all fields that
|
||||||
|
have them. They have the form Default_StructName_FieldName.
|
||||||
|
Because the getter methods handle defaulted values,
|
||||||
|
direct use of these constants should be rare.
|
||||||
|
- Enums are given type names and maps from names to values.
|
||||||
|
Enum values are prefixed with the enum's type name. Enum types have
|
||||||
|
a String method, and a Enum method to assist in message construction.
|
||||||
|
- Nested groups and enums have type names prefixed with the name of
|
||||||
|
the surrounding message type.
|
||||||
|
- Extensions are given descriptor names that start with E_,
|
||||||
|
followed by an underscore-delimited list of the nested messages
|
||||||
|
that contain it (if any) followed by the CamelCased name of the
|
||||||
|
extension field itself. HasExtension, ClearExtension, GetExtension
|
||||||
|
and SetExtension are functions for manipulating extensions.
|
||||||
|
- Oneof field sets are given a single field in their message,
|
||||||
|
with distinguished wrapper types for each possible field value.
|
||||||
|
- Marshal and Unmarshal are functions to encode and decode the wire format.
|
||||||
|
|
||||||
|
When the .proto file specifies `syntax="proto3"`, there are some differences:
|
||||||
|
|
||||||
|
- Non-repeated fields of non-message type are values instead of pointers.
|
||||||
|
- Enum types do not get an Enum method.
|
||||||
|
|
||||||
|
Consider file test.proto, containing
|
||||||
|
|
||||||
|
```proto
|
||||||
|
syntax = "proto2";
|
||||||
|
package example;
|
||||||
|
|
||||||
|
enum FOO { X = 17; };
|
||||||
|
|
||||||
|
message Test {
|
||||||
|
required string label = 1;
|
||||||
|
optional int32 type = 2 [default=77];
|
||||||
|
repeated int64 reps = 3;
|
||||||
|
optional group OptionalGroup = 4 {
|
||||||
|
required string RequiredField = 5;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
To create and play with a Test object from the example package,
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
"path/to/example"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
test := &example.Test {
|
||||||
|
Label: proto.String("hello"),
|
||||||
|
Type: proto.Int32(17),
|
||||||
|
Reps: []int64{1, 2, 3},
|
||||||
|
Optionalgroup: &example.Test_OptionalGroup {
|
||||||
|
RequiredField: proto.String("good bye"),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
data, err := proto.Marshal(test)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal("marshaling error: ", err)
|
||||||
|
}
|
||||||
|
newTest := &example.Test{}
|
||||||
|
err = proto.Unmarshal(data, newTest)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal("unmarshaling error: ", err)
|
||||||
|
}
|
||||||
|
// Now test and newTest contain the same data.
|
||||||
|
if test.GetLabel() != newTest.GetLabel() {
|
||||||
|
log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
|
||||||
|
}
|
||||||
|
// etc.
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Parameters ##
|
||||||
|
|
||||||
|
To pass extra parameters to the plugin, use a comma-separated
|
||||||
|
parameter list separated from the output directory by a colon:
|
||||||
|
|
||||||
|
|
||||||
|
protoc --go_out=plugins=grpc,import_path=mypackage:. *.proto
|
||||||
|
|
||||||
|
|
||||||
|
- `import_prefix=xxx` - a prefix that is added onto the beginning of
|
||||||
|
all imports. Useful for things like generating protos in a
|
||||||
|
subdirectory, or regenerating vendored protobufs in-place.
|
||||||
|
- `import_path=foo/bar` - used as the package if no input files
|
||||||
|
declare `go_package`. If it contains slashes, everything up to the
|
||||||
|
rightmost slash is ignored.
|
||||||
|
- `plugins=plugin1+plugin2` - specifies the list of sub-plugins to
|
||||||
|
load. The only plugin in this repo is `grpc`.
|
||||||
|
- `Mfoo/bar.proto=quux/shme` - declares that foo/bar.proto is
|
||||||
|
associated with Go package quux/shme. This is subject to the
|
||||||
|
import_prefix parameter.
|
||||||
|
|
||||||
|
## gRPC Support ##
|
||||||
|
|
||||||
|
If a proto file specifies RPC services, protoc-gen-go can be instructed to
|
||||||
|
generate code compatible with gRPC (http://www.grpc.io/). To do this, pass
|
||||||
|
the `plugins` parameter to protoc-gen-go; the usual way is to insert it into
|
||||||
|
the --go_out argument to protoc:
|
||||||
|
|
||||||
|
protoc --go_out=plugins=grpc:. *.proto
|
||||||
|
|
||||||
|
## Compatibility ##
|
||||||
|
|
||||||
|
The library and the generated code are expected to be stable over time.
|
||||||
|
However, we reserve the right to make breaking changes without notice for the
|
||||||
|
following reasons:
|
||||||
|
|
||||||
|
- Security. A security issue in the specification or implementation may come to
|
||||||
|
light whose resolution requires breaking compatibility. We reserve the right
|
||||||
|
to address such security issues.
|
||||||
|
- Unspecified behavior. There are some aspects of the Protocol Buffers
|
||||||
|
specification that are undefined. Programs that depend on such unspecified
|
||||||
|
behavior may break in future releases.
|
||||||
|
- Specification errors or changes. If it becomes necessary to address an
|
||||||
|
inconsistency, incompleteness, or change in the Protocol Buffers
|
||||||
|
specification, resolving the issue could affect the meaning or legality of
|
||||||
|
existing programs. We reserve the right to address such issues, including
|
||||||
|
updating the implementations.
|
||||||
|
- Bugs. If the library has a bug that violates the specification, a program
|
||||||
|
that depends on the buggy behavior may break if the bug is fixed. We reserve
|
||||||
|
the right to fix such bugs.
|
||||||
|
- Adding methods or fields to generated structs. These may conflict with field
|
||||||
|
names that already exist in a schema, causing applications to break. When the
|
||||||
|
code generator encounters a field in the schema that would collide with a
|
||||||
|
generated field or method name, the code generator will append an underscore
|
||||||
|
to the generated field or method name.
|
||||||
|
- Adding, removing, or changing methods or fields in generated structs that
|
||||||
|
start with `XXX`. These parts of the generated code are exported out of
|
||||||
|
necessity, but should not be considered part of the public API.
|
||||||
|
- Adding, removing, or changing unexported symbols in generated code.
|
||||||
|
|
||||||
|
Any breaking changes outside of these will be announced 6 months in advance to
|
||||||
|
protobuf@googlegroups.com.
|
||||||
|
|
||||||
|
You should, whenever possible, use generated code created by the `protoc-gen-go`
|
||||||
|
tool built at the same commit as the `proto` package. The `proto` package
|
||||||
|
declares package-level constants in the form `ProtoPackageIsVersionX`.
|
||||||
|
Application code and generated code may depend on one of these constants to
|
||||||
|
ensure that compilation will fail if the available version of the proto library
|
||||||
|
is too old. Whenever we make a change to the generated code that requires newer
|
||||||
|
library support, in the same commit we will increment the version number of the
|
||||||
|
generated code and declare a new package-level constant whose name incorporates
|
||||||
|
the latest version number. Removing a compatibility constant is considered a
|
||||||
|
breaking change and would be subject to the announcement policy stated above.
|
||||||
|
|
||||||
|
The `protoc-gen-go/generator` package exposes a plugin interface,
|
||||||
|
which is used by the gRPC code generation. This interface is not
|
||||||
|
supported and is subject to incompatible changes without notice.
|
||||||
|
|
@ -0,0 +1,33 @@
|
||||||
|
# Go support for Protocol Buffers - Google's data interchange format
|
||||||
|
#
|
||||||
|
# Copyright 2016 The Go Authors. All rights reserved.
|
||||||
|
# https://github.com/golang/protobuf
|
||||||
|
#
|
||||||
|
# Redistribution and use in source and binary forms, with or without
|
||||||
|
# modification, are permitted provided that the following conditions are
|
||||||
|
# met:
|
||||||
|
#
|
||||||
|
# * Redistributions of source code must retain the above copyright
|
||||||
|
# notice, this list of conditions and the following disclaimer.
|
||||||
|
# * Redistributions in binary form must reproduce the above
|
||||||
|
# copyright notice, this list of conditions and the following disclaimer
|
||||||
|
# in the documentation and/or other materials provided with the
|
||||||
|
# distribution.
|
||||||
|
# * Neither the name of Google Inc. nor the names of its
|
||||||
|
# contributors may be used to endorse or promote products derived from
|
||||||
|
# this software without specific prior written permission.
|
||||||
|
#
|
||||||
|
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
regenerate:
|
||||||
|
protoc --go_out=Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any,Mgoogle/protobuf/duration.proto=github.com/golang/protobuf/ptypes/duration,Mgoogle/protobuf/struct.proto=github.com/golang/protobuf/ptypes/struct,Mgoogle/protobuf/timestamp.proto=github.com/golang/protobuf/ptypes/timestamp,Mgoogle/protobuf/wrappers.proto=github.com/golang/protobuf/ptypes/wrappers,Mgoogle/protobuf/field_mask.proto=google.golang.org/genproto/protobuf:. conformance_proto/conformance.proto
|
||||||
|
|
@ -0,0 +1,161 @@
|
||||||
|
// Go support for Protocol Buffers - Google's data interchange format
|
||||||
|
//
|
||||||
|
// Copyright 2016 The Go Authors. All rights reserved.
|
||||||
|
// https://github.com/golang/protobuf
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
// conformance implements the conformance test subprocess protocol as
|
||||||
|
// documented in conformance.proto.
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
pb "github.com/golang/protobuf/_conformance/conformance_proto"
|
||||||
|
"github.com/golang/protobuf/jsonpb"
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
var sizeBuf [4]byte
|
||||||
|
inbuf := make([]byte, 0, 4096)
|
||||||
|
outbuf := proto.NewBuffer(nil)
|
||||||
|
for {
|
||||||
|
if _, err := io.ReadFull(os.Stdin, sizeBuf[:]); err == io.EOF {
|
||||||
|
break
|
||||||
|
} else if err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, "go conformance: read request:", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
size := binary.LittleEndian.Uint32(sizeBuf[:])
|
||||||
|
if int(size) > cap(inbuf) {
|
||||||
|
inbuf = make([]byte, size)
|
||||||
|
}
|
||||||
|
inbuf = inbuf[:size]
|
||||||
|
if _, err := io.ReadFull(os.Stdin, inbuf); err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, "go conformance: read request:", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
req := new(pb.ConformanceRequest)
|
||||||
|
if err := proto.Unmarshal(inbuf, req); err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, "go conformance: parse request:", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
res := handle(req)
|
||||||
|
|
||||||
|
if err := outbuf.Marshal(res); err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, "go conformance: marshal response:", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
binary.LittleEndian.PutUint32(sizeBuf[:], uint32(len(outbuf.Bytes())))
|
||||||
|
if _, err := os.Stdout.Write(sizeBuf[:]); err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, "go conformance: write response:", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
if _, err := os.Stdout.Write(outbuf.Bytes()); err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, "go conformance: write response:", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
outbuf.Reset()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var jsonMarshaler = jsonpb.Marshaler{
|
||||||
|
OrigName: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
func handle(req *pb.ConformanceRequest) *pb.ConformanceResponse {
|
||||||
|
var err error
|
||||||
|
var msg pb.TestAllTypes
|
||||||
|
switch p := req.Payload.(type) {
|
||||||
|
case *pb.ConformanceRequest_ProtobufPayload:
|
||||||
|
err = proto.Unmarshal(p.ProtobufPayload, &msg)
|
||||||
|
case *pb.ConformanceRequest_JsonPayload:
|
||||||
|
err = jsonpb.UnmarshalString(p.JsonPayload, &msg)
|
||||||
|
if err != nil && err.Error() == "unmarshaling Any not supported yet" {
|
||||||
|
return &pb.ConformanceResponse{
|
||||||
|
Result: &pb.ConformanceResponse_Skipped{
|
||||||
|
Skipped: err.Error(),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return &pb.ConformanceResponse{
|
||||||
|
Result: &pb.ConformanceResponse_RuntimeError{
|
||||||
|
RuntimeError: "unknown request payload type",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return &pb.ConformanceResponse{
|
||||||
|
Result: &pb.ConformanceResponse_ParseError{
|
||||||
|
ParseError: err.Error(),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
switch req.RequestedOutputFormat {
|
||||||
|
case pb.WireFormat_PROTOBUF:
|
||||||
|
p, err := proto.Marshal(&msg)
|
||||||
|
if err != nil {
|
||||||
|
return &pb.ConformanceResponse{
|
||||||
|
Result: &pb.ConformanceResponse_SerializeError{
|
||||||
|
SerializeError: err.Error(),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return &pb.ConformanceResponse{
|
||||||
|
Result: &pb.ConformanceResponse_ProtobufPayload{
|
||||||
|
ProtobufPayload: p,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
case pb.WireFormat_JSON:
|
||||||
|
p, err := jsonMarshaler.MarshalToString(&msg)
|
||||||
|
if err != nil {
|
||||||
|
return &pb.ConformanceResponse{
|
||||||
|
Result: &pb.ConformanceResponse_SerializeError{
|
||||||
|
SerializeError: err.Error(),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return &pb.ConformanceResponse{
|
||||||
|
Result: &pb.ConformanceResponse_JsonPayload{
|
||||||
|
JsonPayload: p,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return &pb.ConformanceResponse{
|
||||||
|
Result: &pb.ConformanceResponse_RuntimeError{
|
||||||
|
RuntimeError: "unknown output format",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
1885
vendor/github.com/golang/protobuf/_conformance/conformance_proto/conformance.pb.go
generated
vendored
Normal file
1885
vendor/github.com/golang/protobuf/_conformance/conformance_proto/conformance.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
285
vendor/github.com/golang/protobuf/_conformance/conformance_proto/conformance.proto
generated
vendored
Normal file
285
vendor/github.com/golang/protobuf/_conformance/conformance_proto/conformance.proto
generated
vendored
Normal file
|
|
@ -0,0 +1,285 @@
|
||||||
|
// Protocol Buffers - Google's data interchange format
|
||||||
|
// Copyright 2008 Google Inc. All rights reserved.
|
||||||
|
// https://developers.google.com/protocol-buffers/
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
syntax = "proto3";
|
||||||
|
package conformance;
|
||||||
|
option java_package = "com.google.protobuf.conformance";
|
||||||
|
|
||||||
|
import "google/protobuf/any.proto";
|
||||||
|
import "google/protobuf/duration.proto";
|
||||||
|
import "google/protobuf/field_mask.proto";
|
||||||
|
import "google/protobuf/struct.proto";
|
||||||
|
import "google/protobuf/timestamp.proto";
|
||||||
|
import "google/protobuf/wrappers.proto";
|
||||||
|
|
||||||
|
// This defines the conformance testing protocol. This protocol exists between
|
||||||
|
// the conformance test suite itself and the code being tested. For each test,
|
||||||
|
// the suite will send a ConformanceRequest message and expect a
|
||||||
|
// ConformanceResponse message.
|
||||||
|
//
|
||||||
|
// You can either run the tests in two different ways:
|
||||||
|
//
|
||||||
|
// 1. in-process (using the interface in conformance_test.h).
|
||||||
|
//
|
||||||
|
// 2. as a sub-process communicating over a pipe. Information about how to
|
||||||
|
// do this is in conformance_test_runner.cc.
|
||||||
|
//
|
||||||
|
// Pros/cons of the two approaches:
|
||||||
|
//
|
||||||
|
// - running as a sub-process is much simpler for languages other than C/C++.
|
||||||
|
//
|
||||||
|
// - running as a sub-process may be more tricky in unusual environments like
|
||||||
|
// iOS apps, where fork/stdin/stdout are not available.
|
||||||
|
|
||||||
|
enum WireFormat {
|
||||||
|
UNSPECIFIED = 0;
|
||||||
|
PROTOBUF = 1;
|
||||||
|
JSON = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Represents a single test case's input. The testee should:
|
||||||
|
//
|
||||||
|
// 1. parse this proto (which should always succeed)
|
||||||
|
// 2. parse the protobuf or JSON payload in "payload" (which may fail)
|
||||||
|
// 3. if the parse succeeded, serialize the message in the requested format.
|
||||||
|
message ConformanceRequest {
|
||||||
|
// The payload (whether protobuf of JSON) is always for a TestAllTypes proto
|
||||||
|
// (see below).
|
||||||
|
oneof payload {
|
||||||
|
bytes protobuf_payload = 1;
|
||||||
|
string json_payload = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Which format should the testee serialize its message to?
|
||||||
|
WireFormat requested_output_format = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Represents a single test case's output.
|
||||||
|
message ConformanceResponse {
|
||||||
|
oneof result {
|
||||||
|
// This string should be set to indicate parsing failed. The string can
|
||||||
|
// provide more information about the parse error if it is available.
|
||||||
|
//
|
||||||
|
// Setting this string does not necessarily mean the testee failed the
|
||||||
|
// test. Some of the test cases are intentionally invalid input.
|
||||||
|
string parse_error = 1;
|
||||||
|
|
||||||
|
// If the input was successfully parsed but errors occurred when
|
||||||
|
// serializing it to the requested output format, set the error message in
|
||||||
|
// this field.
|
||||||
|
string serialize_error = 6;
|
||||||
|
|
||||||
|
// This should be set if some other error occurred. This will always
|
||||||
|
// indicate that the test failed. The string can provide more information
|
||||||
|
// about the failure.
|
||||||
|
string runtime_error = 2;
|
||||||
|
|
||||||
|
// If the input was successfully parsed and the requested output was
|
||||||
|
// protobuf, serialize it to protobuf and set it in this field.
|
||||||
|
bytes protobuf_payload = 3;
|
||||||
|
|
||||||
|
// If the input was successfully parsed and the requested output was JSON,
|
||||||
|
// serialize to JSON and set it in this field.
|
||||||
|
string json_payload = 4;
|
||||||
|
|
||||||
|
// For when the testee skipped the test, likely because a certain feature
|
||||||
|
// wasn't supported, like JSON input/output.
|
||||||
|
string skipped = 5;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// This proto includes every type of field in both singular and repeated
|
||||||
|
// forms.
|
||||||
|
message TestAllTypes {
|
||||||
|
message NestedMessage {
|
||||||
|
int32 a = 1;
|
||||||
|
TestAllTypes corecursive = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
enum NestedEnum {
|
||||||
|
FOO = 0;
|
||||||
|
BAR = 1;
|
||||||
|
BAZ = 2;
|
||||||
|
NEG = -1; // Intentionally negative.
|
||||||
|
}
|
||||||
|
|
||||||
|
// Singular
|
||||||
|
int32 optional_int32 = 1;
|
||||||
|
int64 optional_int64 = 2;
|
||||||
|
uint32 optional_uint32 = 3;
|
||||||
|
uint64 optional_uint64 = 4;
|
||||||
|
sint32 optional_sint32 = 5;
|
||||||
|
sint64 optional_sint64 = 6;
|
||||||
|
fixed32 optional_fixed32 = 7;
|
||||||
|
fixed64 optional_fixed64 = 8;
|
||||||
|
sfixed32 optional_sfixed32 = 9;
|
||||||
|
sfixed64 optional_sfixed64 = 10;
|
||||||
|
float optional_float = 11;
|
||||||
|
double optional_double = 12;
|
||||||
|
bool optional_bool = 13;
|
||||||
|
string optional_string = 14;
|
||||||
|
bytes optional_bytes = 15;
|
||||||
|
|
||||||
|
NestedMessage optional_nested_message = 18;
|
||||||
|
ForeignMessage optional_foreign_message = 19;
|
||||||
|
|
||||||
|
NestedEnum optional_nested_enum = 21;
|
||||||
|
ForeignEnum optional_foreign_enum = 22;
|
||||||
|
|
||||||
|
string optional_string_piece = 24 [ctype=STRING_PIECE];
|
||||||
|
string optional_cord = 25 [ctype=CORD];
|
||||||
|
|
||||||
|
TestAllTypes recursive_message = 27;
|
||||||
|
|
||||||
|
// Repeated
|
||||||
|
repeated int32 repeated_int32 = 31;
|
||||||
|
repeated int64 repeated_int64 = 32;
|
||||||
|
repeated uint32 repeated_uint32 = 33;
|
||||||
|
repeated uint64 repeated_uint64 = 34;
|
||||||
|
repeated sint32 repeated_sint32 = 35;
|
||||||
|
repeated sint64 repeated_sint64 = 36;
|
||||||
|
repeated fixed32 repeated_fixed32 = 37;
|
||||||
|
repeated fixed64 repeated_fixed64 = 38;
|
||||||
|
repeated sfixed32 repeated_sfixed32 = 39;
|
||||||
|
repeated sfixed64 repeated_sfixed64 = 40;
|
||||||
|
repeated float repeated_float = 41;
|
||||||
|
repeated double repeated_double = 42;
|
||||||
|
repeated bool repeated_bool = 43;
|
||||||
|
repeated string repeated_string = 44;
|
||||||
|
repeated bytes repeated_bytes = 45;
|
||||||
|
|
||||||
|
repeated NestedMessage repeated_nested_message = 48;
|
||||||
|
repeated ForeignMessage repeated_foreign_message = 49;
|
||||||
|
|
||||||
|
repeated NestedEnum repeated_nested_enum = 51;
|
||||||
|
repeated ForeignEnum repeated_foreign_enum = 52;
|
||||||
|
|
||||||
|
repeated string repeated_string_piece = 54 [ctype=STRING_PIECE];
|
||||||
|
repeated string repeated_cord = 55 [ctype=CORD];
|
||||||
|
|
||||||
|
// Map
|
||||||
|
map < int32, int32> map_int32_int32 = 56;
|
||||||
|
map < int64, int64> map_int64_int64 = 57;
|
||||||
|
map < uint32, uint32> map_uint32_uint32 = 58;
|
||||||
|
map < uint64, uint64> map_uint64_uint64 = 59;
|
||||||
|
map < sint32, sint32> map_sint32_sint32 = 60;
|
||||||
|
map < sint64, sint64> map_sint64_sint64 = 61;
|
||||||
|
map < fixed32, fixed32> map_fixed32_fixed32 = 62;
|
||||||
|
map < fixed64, fixed64> map_fixed64_fixed64 = 63;
|
||||||
|
map <sfixed32, sfixed32> map_sfixed32_sfixed32 = 64;
|
||||||
|
map <sfixed64, sfixed64> map_sfixed64_sfixed64 = 65;
|
||||||
|
map < int32, float> map_int32_float = 66;
|
||||||
|
map < int32, double> map_int32_double = 67;
|
||||||
|
map < bool, bool> map_bool_bool = 68;
|
||||||
|
map < string, string> map_string_string = 69;
|
||||||
|
map < string, bytes> map_string_bytes = 70;
|
||||||
|
map < string, NestedMessage> map_string_nested_message = 71;
|
||||||
|
map < string, ForeignMessage> map_string_foreign_message = 72;
|
||||||
|
map < string, NestedEnum> map_string_nested_enum = 73;
|
||||||
|
map < string, ForeignEnum> map_string_foreign_enum = 74;
|
||||||
|
|
||||||
|
oneof oneof_field {
|
||||||
|
uint32 oneof_uint32 = 111;
|
||||||
|
NestedMessage oneof_nested_message = 112;
|
||||||
|
string oneof_string = 113;
|
||||||
|
bytes oneof_bytes = 114;
|
||||||
|
bool oneof_bool = 115;
|
||||||
|
uint64 oneof_uint64 = 116;
|
||||||
|
float oneof_float = 117;
|
||||||
|
double oneof_double = 118;
|
||||||
|
NestedEnum oneof_enum = 119;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Well-known types
|
||||||
|
google.protobuf.BoolValue optional_bool_wrapper = 201;
|
||||||
|
google.protobuf.Int32Value optional_int32_wrapper = 202;
|
||||||
|
google.protobuf.Int64Value optional_int64_wrapper = 203;
|
||||||
|
google.protobuf.UInt32Value optional_uint32_wrapper = 204;
|
||||||
|
google.protobuf.UInt64Value optional_uint64_wrapper = 205;
|
||||||
|
google.protobuf.FloatValue optional_float_wrapper = 206;
|
||||||
|
google.protobuf.DoubleValue optional_double_wrapper = 207;
|
||||||
|
google.protobuf.StringValue optional_string_wrapper = 208;
|
||||||
|
google.protobuf.BytesValue optional_bytes_wrapper = 209;
|
||||||
|
|
||||||
|
repeated google.protobuf.BoolValue repeated_bool_wrapper = 211;
|
||||||
|
repeated google.protobuf.Int32Value repeated_int32_wrapper = 212;
|
||||||
|
repeated google.protobuf.Int64Value repeated_int64_wrapper = 213;
|
||||||
|
repeated google.protobuf.UInt32Value repeated_uint32_wrapper = 214;
|
||||||
|
repeated google.protobuf.UInt64Value repeated_uint64_wrapper = 215;
|
||||||
|
repeated google.protobuf.FloatValue repeated_float_wrapper = 216;
|
||||||
|
repeated google.protobuf.DoubleValue repeated_double_wrapper = 217;
|
||||||
|
repeated google.protobuf.StringValue repeated_string_wrapper = 218;
|
||||||
|
repeated google.protobuf.BytesValue repeated_bytes_wrapper = 219;
|
||||||
|
|
||||||
|
google.protobuf.Duration optional_duration = 301;
|
||||||
|
google.protobuf.Timestamp optional_timestamp = 302;
|
||||||
|
google.protobuf.FieldMask optional_field_mask = 303;
|
||||||
|
google.protobuf.Struct optional_struct = 304;
|
||||||
|
google.protobuf.Any optional_any = 305;
|
||||||
|
google.protobuf.Value optional_value = 306;
|
||||||
|
|
||||||
|
repeated google.protobuf.Duration repeated_duration = 311;
|
||||||
|
repeated google.protobuf.Timestamp repeated_timestamp = 312;
|
||||||
|
repeated google.protobuf.FieldMask repeated_fieldmask = 313;
|
||||||
|
repeated google.protobuf.Struct repeated_struct = 324;
|
||||||
|
repeated google.protobuf.Any repeated_any = 315;
|
||||||
|
repeated google.protobuf.Value repeated_value = 316;
|
||||||
|
|
||||||
|
// Test field-name-to-JSON-name convention.
|
||||||
|
// (protobuf says names can be any valid C/C++ identifier.)
|
||||||
|
int32 fieldname1 = 401;
|
||||||
|
int32 field_name2 = 402;
|
||||||
|
int32 _field_name3 = 403;
|
||||||
|
int32 field__name4_ = 404;
|
||||||
|
int32 field0name5 = 405;
|
||||||
|
int32 field_0_name6 = 406;
|
||||||
|
int32 fieldName7 = 407;
|
||||||
|
int32 FieldName8 = 408;
|
||||||
|
int32 field_Name9 = 409;
|
||||||
|
int32 Field_Name10 = 410;
|
||||||
|
int32 FIELD_NAME11 = 411;
|
||||||
|
int32 FIELD_name12 = 412;
|
||||||
|
int32 __field_name13 = 413;
|
||||||
|
int32 __Field_name14 = 414;
|
||||||
|
int32 field__name15 = 415;
|
||||||
|
int32 field__Name16 = 416;
|
||||||
|
int32 field_name17__ = 417;
|
||||||
|
int32 Field_name18__ = 418;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ForeignMessage {
|
||||||
|
int32 c = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
enum ForeignEnum {
|
||||||
|
FOREIGN_FOO = 0;
|
||||||
|
FOREIGN_BAR = 1;
|
||||||
|
FOREIGN_BAZ = 2;
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,93 @@
|
||||||
|
// Go support for Protocol Buffers - Google's data interchange format
|
||||||
|
//
|
||||||
|
// Copyright 2016 The Go Authors. All rights reserved.
|
||||||
|
// https://github.com/golang/protobuf
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
// Package descriptor provides functions for obtaining protocol buffer
|
||||||
|
// descriptors for generated Go types.
|
||||||
|
//
|
||||||
|
// These functions cannot go in package proto because they depend on the
|
||||||
|
// generated protobuf descriptor messages, which themselves depend on proto.
|
||||||
|
package descriptor
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"compress/gzip"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor"
|
||||||
|
)
|
||||||
|
|
||||||
|
// extractFile extracts a FileDescriptorProto from a gzip'd buffer.
|
||||||
|
func extractFile(gz []byte) (*protobuf.FileDescriptorProto, error) {
|
||||||
|
r, err := gzip.NewReader(bytes.NewReader(gz))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to open gzip reader: %v", err)
|
||||||
|
}
|
||||||
|
defer r.Close()
|
||||||
|
|
||||||
|
b, err := ioutil.ReadAll(r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to uncompress descriptor: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fd := new(protobuf.FileDescriptorProto)
|
||||||
|
if err := proto.Unmarshal(b, fd); err != nil {
|
||||||
|
return nil, fmt.Errorf("malformed FileDescriptorProto: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return fd, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Message is a proto.Message with a method to return its descriptor.
|
||||||
|
//
|
||||||
|
// Message types generated by the protocol compiler always satisfy
|
||||||
|
// the Message interface.
|
||||||
|
type Message interface {
|
||||||
|
proto.Message
|
||||||
|
Descriptor() ([]byte, []int)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForMessage returns a FileDescriptorProto and a DescriptorProto from within it
|
||||||
|
// describing the given message.
|
||||||
|
func ForMessage(msg Message) (fd *protobuf.FileDescriptorProto, md *protobuf.DescriptorProto) {
|
||||||
|
gz, path := msg.Descriptor()
|
||||||
|
fd, err := extractFile(gz)
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Sprintf("invalid FileDescriptorProto for %T: %v", msg, err))
|
||||||
|
}
|
||||||
|
|
||||||
|
md = fd.MessageType[path[0]]
|
||||||
|
for _, i := range path[1:] {
|
||||||
|
md = md.NestedType[i]
|
||||||
|
}
|
||||||
|
return fd, md
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,32 @@
|
||||||
|
package descriptor_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/golang/protobuf/descriptor"
|
||||||
|
tpb "github.com/golang/protobuf/proto/testdata"
|
||||||
|
protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestMessage(t *testing.T) {
|
||||||
|
var msg *protobuf.DescriptorProto
|
||||||
|
fd, md := descriptor.ForMessage(msg)
|
||||||
|
if pkg, want := fd.GetPackage(), "google.protobuf"; pkg != want {
|
||||||
|
t.Errorf("descriptor.ForMessage(%T).GetPackage() = %q; want %q", msg, pkg, want)
|
||||||
|
}
|
||||||
|
if name, want := md.GetName(), "DescriptorProto"; name != want {
|
||||||
|
t.Fatalf("descriptor.ForMessage(%T).GetName() = %q; want %q", msg, name, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Example_Options() {
|
||||||
|
var msg *tpb.MyMessageSet
|
||||||
|
_, md := descriptor.ForMessage(msg)
|
||||||
|
if md.GetOptions().GetMessageSetWireFormat() {
|
||||||
|
fmt.Printf("%v uses option message_set_wire_format.\n", md.GetName())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Output:
|
||||||
|
// MyMessageSet uses option message_set_wire_format.
|
||||||
|
}
|
||||||
File diff suppressed because it is too large
Load Diff
|
|
@ -0,0 +1,897 @@
|
||||||
|
// Go support for Protocol Buffers - Google's data interchange format
|
||||||
|
//
|
||||||
|
// Copyright 2015 The Go Authors. All rights reserved.
|
||||||
|
// https://github.com/golang/protobuf
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
package jsonpb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"io"
|
||||||
|
"math"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
|
||||||
|
pb "github.com/golang/protobuf/jsonpb/jsonpb_test_proto"
|
||||||
|
proto3pb "github.com/golang/protobuf/proto/proto3_proto"
|
||||||
|
"github.com/golang/protobuf/ptypes"
|
||||||
|
anypb "github.com/golang/protobuf/ptypes/any"
|
||||||
|
durpb "github.com/golang/protobuf/ptypes/duration"
|
||||||
|
stpb "github.com/golang/protobuf/ptypes/struct"
|
||||||
|
tspb "github.com/golang/protobuf/ptypes/timestamp"
|
||||||
|
wpb "github.com/golang/protobuf/ptypes/wrappers"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
marshaler = Marshaler{}
|
||||||
|
|
||||||
|
marshalerAllOptions = Marshaler{
|
||||||
|
Indent: " ",
|
||||||
|
}
|
||||||
|
|
||||||
|
simpleObject = &pb.Simple{
|
||||||
|
OInt32: proto.Int32(-32),
|
||||||
|
OInt64: proto.Int64(-6400000000),
|
||||||
|
OUint32: proto.Uint32(32),
|
||||||
|
OUint64: proto.Uint64(6400000000),
|
||||||
|
OSint32: proto.Int32(-13),
|
||||||
|
OSint64: proto.Int64(-2600000000),
|
||||||
|
OFloat: proto.Float32(3.14),
|
||||||
|
ODouble: proto.Float64(6.02214179e23),
|
||||||
|
OBool: proto.Bool(true),
|
||||||
|
OString: proto.String("hello \"there\""),
|
||||||
|
OBytes: []byte("beep boop"),
|
||||||
|
}
|
||||||
|
|
||||||
|
simpleObjectJSON = `{` +
|
||||||
|
`"oBool":true,` +
|
||||||
|
`"oInt32":-32,` +
|
||||||
|
`"oInt64":"-6400000000",` +
|
||||||
|
`"oUint32":32,` +
|
||||||
|
`"oUint64":"6400000000",` +
|
||||||
|
`"oSint32":-13,` +
|
||||||
|
`"oSint64":"-2600000000",` +
|
||||||
|
`"oFloat":3.14,` +
|
||||||
|
`"oDouble":6.02214179e+23,` +
|
||||||
|
`"oString":"hello \"there\"",` +
|
||||||
|
`"oBytes":"YmVlcCBib29w"` +
|
||||||
|
`}`
|
||||||
|
|
||||||
|
simpleObjectPrettyJSON = `{
|
||||||
|
"oBool": true,
|
||||||
|
"oInt32": -32,
|
||||||
|
"oInt64": "-6400000000",
|
||||||
|
"oUint32": 32,
|
||||||
|
"oUint64": "6400000000",
|
||||||
|
"oSint32": -13,
|
||||||
|
"oSint64": "-2600000000",
|
||||||
|
"oFloat": 3.14,
|
||||||
|
"oDouble": 6.02214179e+23,
|
||||||
|
"oString": "hello \"there\"",
|
||||||
|
"oBytes": "YmVlcCBib29w"
|
||||||
|
}`
|
||||||
|
|
||||||
|
repeatsObject = &pb.Repeats{
|
||||||
|
RBool: []bool{true, false, true},
|
||||||
|
RInt32: []int32{-3, -4, -5},
|
||||||
|
RInt64: []int64{-123456789, -987654321},
|
||||||
|
RUint32: []uint32{1, 2, 3},
|
||||||
|
RUint64: []uint64{6789012345, 3456789012},
|
||||||
|
RSint32: []int32{-1, -2, -3},
|
||||||
|
RSint64: []int64{-6789012345, -3456789012},
|
||||||
|
RFloat: []float32{3.14, 6.28},
|
||||||
|
RDouble: []float64{299792458 * 1e20, 6.62606957e-34},
|
||||||
|
RString: []string{"happy", "days"},
|
||||||
|
RBytes: [][]byte{[]byte("skittles"), []byte("m&m's")},
|
||||||
|
}
|
||||||
|
|
||||||
|
repeatsObjectJSON = `{` +
|
||||||
|
`"rBool":[true,false,true],` +
|
||||||
|
`"rInt32":[-3,-4,-5],` +
|
||||||
|
`"rInt64":["-123456789","-987654321"],` +
|
||||||
|
`"rUint32":[1,2,3],` +
|
||||||
|
`"rUint64":["6789012345","3456789012"],` +
|
||||||
|
`"rSint32":[-1,-2,-3],` +
|
||||||
|
`"rSint64":["-6789012345","-3456789012"],` +
|
||||||
|
`"rFloat":[3.14,6.28],` +
|
||||||
|
`"rDouble":[2.99792458e+28,6.62606957e-34],` +
|
||||||
|
`"rString":["happy","days"],` +
|
||||||
|
`"rBytes":["c2tpdHRsZXM=","bSZtJ3M="]` +
|
||||||
|
`}`
|
||||||
|
|
||||||
|
repeatsObjectPrettyJSON = `{
|
||||||
|
"rBool": [
|
||||||
|
true,
|
||||||
|
false,
|
||||||
|
true
|
||||||
|
],
|
||||||
|
"rInt32": [
|
||||||
|
-3,
|
||||||
|
-4,
|
||||||
|
-5
|
||||||
|
],
|
||||||
|
"rInt64": [
|
||||||
|
"-123456789",
|
||||||
|
"-987654321"
|
||||||
|
],
|
||||||
|
"rUint32": [
|
||||||
|
1,
|
||||||
|
2,
|
||||||
|
3
|
||||||
|
],
|
||||||
|
"rUint64": [
|
||||||
|
"6789012345",
|
||||||
|
"3456789012"
|
||||||
|
],
|
||||||
|
"rSint32": [
|
||||||
|
-1,
|
||||||
|
-2,
|
||||||
|
-3
|
||||||
|
],
|
||||||
|
"rSint64": [
|
||||||
|
"-6789012345",
|
||||||
|
"-3456789012"
|
||||||
|
],
|
||||||
|
"rFloat": [
|
||||||
|
3.14,
|
||||||
|
6.28
|
||||||
|
],
|
||||||
|
"rDouble": [
|
||||||
|
2.99792458e+28,
|
||||||
|
6.62606957e-34
|
||||||
|
],
|
||||||
|
"rString": [
|
||||||
|
"happy",
|
||||||
|
"days"
|
||||||
|
],
|
||||||
|
"rBytes": [
|
||||||
|
"c2tpdHRsZXM=",
|
||||||
|
"bSZtJ3M="
|
||||||
|
]
|
||||||
|
}`
|
||||||
|
|
||||||
|
innerSimple = &pb.Simple{OInt32: proto.Int32(-32)}
|
||||||
|
innerSimple2 = &pb.Simple{OInt64: proto.Int64(25)}
|
||||||
|
innerRepeats = &pb.Repeats{RString: []string{"roses", "red"}}
|
||||||
|
innerRepeats2 = &pb.Repeats{RString: []string{"violets", "blue"}}
|
||||||
|
complexObject = &pb.Widget{
|
||||||
|
Color: pb.Widget_GREEN.Enum(),
|
||||||
|
RColor: []pb.Widget_Color{pb.Widget_RED, pb.Widget_GREEN, pb.Widget_BLUE},
|
||||||
|
Simple: innerSimple,
|
||||||
|
RSimple: []*pb.Simple{innerSimple, innerSimple2},
|
||||||
|
Repeats: innerRepeats,
|
||||||
|
RRepeats: []*pb.Repeats{innerRepeats, innerRepeats2},
|
||||||
|
}
|
||||||
|
|
||||||
|
complexObjectJSON = `{"color":"GREEN",` +
|
||||||
|
`"rColor":["RED","GREEN","BLUE"],` +
|
||||||
|
`"simple":{"oInt32":-32},` +
|
||||||
|
`"rSimple":[{"oInt32":-32},{"oInt64":"25"}],` +
|
||||||
|
`"repeats":{"rString":["roses","red"]},` +
|
||||||
|
`"rRepeats":[{"rString":["roses","red"]},{"rString":["violets","blue"]}]` +
|
||||||
|
`}`
|
||||||
|
|
||||||
|
complexObjectPrettyJSON = `{
|
||||||
|
"color": "GREEN",
|
||||||
|
"rColor": [
|
||||||
|
"RED",
|
||||||
|
"GREEN",
|
||||||
|
"BLUE"
|
||||||
|
],
|
||||||
|
"simple": {
|
||||||
|
"oInt32": -32
|
||||||
|
},
|
||||||
|
"rSimple": [
|
||||||
|
{
|
||||||
|
"oInt32": -32
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"oInt64": "25"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"repeats": {
|
||||||
|
"rString": [
|
||||||
|
"roses",
|
||||||
|
"red"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"rRepeats": [
|
||||||
|
{
|
||||||
|
"rString": [
|
||||||
|
"roses",
|
||||||
|
"red"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"rString": [
|
||||||
|
"violets",
|
||||||
|
"blue"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}`
|
||||||
|
|
||||||
|
colorPrettyJSON = `{
|
||||||
|
"color": 2
|
||||||
|
}`
|
||||||
|
|
||||||
|
colorListPrettyJSON = `{
|
||||||
|
"color": 1000,
|
||||||
|
"rColor": [
|
||||||
|
"RED"
|
||||||
|
]
|
||||||
|
}`
|
||||||
|
|
||||||
|
nummyPrettyJSON = `{
|
||||||
|
"nummy": {
|
||||||
|
"1": 2,
|
||||||
|
"3": 4
|
||||||
|
}
|
||||||
|
}`
|
||||||
|
|
||||||
|
objjyPrettyJSON = `{
|
||||||
|
"objjy": {
|
||||||
|
"1": {
|
||||||
|
"dub": 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}`
|
||||||
|
realNumber = &pb.Real{Value: proto.Float64(3.14159265359)}
|
||||||
|
realNumberName = "Pi"
|
||||||
|
complexNumber = &pb.Complex{Imaginary: proto.Float64(0.5772156649)}
|
||||||
|
realNumberJSON = `{` +
|
||||||
|
`"value":3.14159265359,` +
|
||||||
|
`"[jsonpb.Complex.real_extension]":{"imaginary":0.5772156649},` +
|
||||||
|
`"[jsonpb.name]":"Pi"` +
|
||||||
|
`}`
|
||||||
|
|
||||||
|
anySimple = &pb.KnownTypes{
|
||||||
|
An: &anypb.Any{
|
||||||
|
TypeUrl: "something.example.com/jsonpb.Simple",
|
||||||
|
Value: []byte{
|
||||||
|
// &pb.Simple{OBool:true}
|
||||||
|
1 << 3, 1,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
anySimpleJSON = `{"an":{"@type":"something.example.com/jsonpb.Simple","oBool":true}}`
|
||||||
|
anySimplePrettyJSON = `{
|
||||||
|
"an": {
|
||||||
|
"@type": "something.example.com/jsonpb.Simple",
|
||||||
|
"oBool": true
|
||||||
|
}
|
||||||
|
}`
|
||||||
|
|
||||||
|
anyWellKnown = &pb.KnownTypes{
|
||||||
|
An: &anypb.Any{
|
||||||
|
TypeUrl: "type.googleapis.com/google.protobuf.Duration",
|
||||||
|
Value: []byte{
|
||||||
|
// &durpb.Duration{Seconds: 1, Nanos: 212000000 }
|
||||||
|
1 << 3, 1, // seconds
|
||||||
|
2 << 3, 0x80, 0xba, 0x8b, 0x65, // nanos
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
anyWellKnownJSON = `{"an":{"@type":"type.googleapis.com/google.protobuf.Duration","value":"1.212s"}}`
|
||||||
|
anyWellKnownPrettyJSON = `{
|
||||||
|
"an": {
|
||||||
|
"@type": "type.googleapis.com/google.protobuf.Duration",
|
||||||
|
"value": "1.212s"
|
||||||
|
}
|
||||||
|
}`
|
||||||
|
|
||||||
|
nonFinites = &pb.NonFinites{
|
||||||
|
FNan: proto.Float32(float32(math.NaN())),
|
||||||
|
FPinf: proto.Float32(float32(math.Inf(1))),
|
||||||
|
FNinf: proto.Float32(float32(math.Inf(-1))),
|
||||||
|
DNan: proto.Float64(float64(math.NaN())),
|
||||||
|
DPinf: proto.Float64(float64(math.Inf(1))),
|
||||||
|
DNinf: proto.Float64(float64(math.Inf(-1))),
|
||||||
|
}
|
||||||
|
nonFinitesJSON = `{` +
|
||||||
|
`"fNan":"NaN",` +
|
||||||
|
`"fPinf":"Infinity",` +
|
||||||
|
`"fNinf":"-Infinity",` +
|
||||||
|
`"dNan":"NaN",` +
|
||||||
|
`"dPinf":"Infinity",` +
|
||||||
|
`"dNinf":"-Infinity"` +
|
||||||
|
`}`
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
if err := proto.SetExtension(realNumber, pb.E_Name, &realNumberName); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
if err := proto.SetExtension(realNumber, pb.E_Complex_RealExtension, complexNumber); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var marshalingTests = []struct {
|
||||||
|
desc string
|
||||||
|
marshaler Marshaler
|
||||||
|
pb proto.Message
|
||||||
|
json string
|
||||||
|
}{
|
||||||
|
{"simple flat object", marshaler, simpleObject, simpleObjectJSON},
|
||||||
|
{"simple pretty object", marshalerAllOptions, simpleObject, simpleObjectPrettyJSON},
|
||||||
|
{"non-finite floats fields object", marshaler, nonFinites, nonFinitesJSON},
|
||||||
|
{"repeated fields flat object", marshaler, repeatsObject, repeatsObjectJSON},
|
||||||
|
{"repeated fields pretty object", marshalerAllOptions, repeatsObject, repeatsObjectPrettyJSON},
|
||||||
|
{"nested message/enum flat object", marshaler, complexObject, complexObjectJSON},
|
||||||
|
{"nested message/enum pretty object", marshalerAllOptions, complexObject, complexObjectPrettyJSON},
|
||||||
|
{"enum-string flat object", Marshaler{},
|
||||||
|
&pb.Widget{Color: pb.Widget_BLUE.Enum()}, `{"color":"BLUE"}`},
|
||||||
|
{"enum-value pretty object", Marshaler{EnumsAsInts: true, Indent: " "},
|
||||||
|
&pb.Widget{Color: pb.Widget_BLUE.Enum()}, colorPrettyJSON},
|
||||||
|
{"unknown enum value object", marshalerAllOptions,
|
||||||
|
&pb.Widget{Color: pb.Widget_Color(1000).Enum(), RColor: []pb.Widget_Color{pb.Widget_RED}}, colorListPrettyJSON},
|
||||||
|
{"repeated proto3 enum", Marshaler{},
|
||||||
|
&proto3pb.Message{RFunny: []proto3pb.Message_Humour{
|
||||||
|
proto3pb.Message_PUNS,
|
||||||
|
proto3pb.Message_SLAPSTICK,
|
||||||
|
}},
|
||||||
|
`{"rFunny":["PUNS","SLAPSTICK"]}`},
|
||||||
|
{"repeated proto3 enum as int", Marshaler{EnumsAsInts: true},
|
||||||
|
&proto3pb.Message{RFunny: []proto3pb.Message_Humour{
|
||||||
|
proto3pb.Message_PUNS,
|
||||||
|
proto3pb.Message_SLAPSTICK,
|
||||||
|
}},
|
||||||
|
`{"rFunny":[1,2]}`},
|
||||||
|
{"empty value", marshaler, &pb.Simple3{}, `{}`},
|
||||||
|
{"empty value emitted", Marshaler{EmitDefaults: true}, &pb.Simple3{}, `{"dub":0}`},
|
||||||
|
{"empty repeated emitted", Marshaler{EmitDefaults: true}, &pb.SimpleSlice3{}, `{"slices":[]}`},
|
||||||
|
{"empty map emitted", Marshaler{EmitDefaults: true}, &pb.SimpleMap3{}, `{"stringy":{}}`},
|
||||||
|
{"nested struct null", Marshaler{EmitDefaults: true}, &pb.SimpleNull3{}, `{"simple":null}`},
|
||||||
|
{"map<int64, int32>", marshaler, &pb.Mappy{Nummy: map[int64]int32{1: 2, 3: 4}}, `{"nummy":{"1":2,"3":4}}`},
|
||||||
|
{"map<int64, int32>", marshalerAllOptions, &pb.Mappy{Nummy: map[int64]int32{1: 2, 3: 4}}, nummyPrettyJSON},
|
||||||
|
{"map<string, string>", marshaler,
|
||||||
|
&pb.Mappy{Strry: map[string]string{`"one"`: "two", "three": "four"}},
|
||||||
|
`{"strry":{"\"one\"":"two","three":"four"}}`},
|
||||||
|
{"map<int32, Object>", marshaler,
|
||||||
|
&pb.Mappy{Objjy: map[int32]*pb.Simple3{1: {Dub: 1}}}, `{"objjy":{"1":{"dub":1}}}`},
|
||||||
|
{"map<int32, Object>", marshalerAllOptions,
|
||||||
|
&pb.Mappy{Objjy: map[int32]*pb.Simple3{1: {Dub: 1}}}, objjyPrettyJSON},
|
||||||
|
{"map<int64, string>", marshaler, &pb.Mappy{Buggy: map[int64]string{1234: "yup"}},
|
||||||
|
`{"buggy":{"1234":"yup"}}`},
|
||||||
|
{"map<bool, bool>", marshaler, &pb.Mappy{Booly: map[bool]bool{false: true}}, `{"booly":{"false":true}}`},
|
||||||
|
// TODO: This is broken.
|
||||||
|
//{"map<string, enum>", marshaler, &pb.Mappy{Enumy: map[string]pb.Numeral{"XIV": pb.Numeral_ROMAN}}, `{"enumy":{"XIV":"ROMAN"}`},
|
||||||
|
{"map<string, enum as int>", Marshaler{EnumsAsInts: true}, &pb.Mappy{Enumy: map[string]pb.Numeral{"XIV": pb.Numeral_ROMAN}}, `{"enumy":{"XIV":2}}`},
|
||||||
|
{"map<int32, bool>", marshaler, &pb.Mappy{S32Booly: map[int32]bool{1: true, 3: false, 10: true, 12: false}}, `{"s32booly":{"1":true,"3":false,"10":true,"12":false}}`},
|
||||||
|
{"map<int64, bool>", marshaler, &pb.Mappy{S64Booly: map[int64]bool{1: true, 3: false, 10: true, 12: false}}, `{"s64booly":{"1":true,"3":false,"10":true,"12":false}}`},
|
||||||
|
{"map<uint32, bool>", marshaler, &pb.Mappy{U32Booly: map[uint32]bool{1: true, 3: false, 10: true, 12: false}}, `{"u32booly":{"1":true,"3":false,"10":true,"12":false}}`},
|
||||||
|
{"map<uint64, bool>", marshaler, &pb.Mappy{U64Booly: map[uint64]bool{1: true, 3: false, 10: true, 12: false}}, `{"u64booly":{"1":true,"3":false,"10":true,"12":false}}`},
|
||||||
|
{"proto2 map<int64, string>", marshaler, &pb.Maps{MInt64Str: map[int64]string{213: "cat"}},
|
||||||
|
`{"mInt64Str":{"213":"cat"}}`},
|
||||||
|
{"proto2 map<bool, Object>", marshaler,
|
||||||
|
&pb.Maps{MBoolSimple: map[bool]*pb.Simple{true: {OInt32: proto.Int32(1)}}},
|
||||||
|
`{"mBoolSimple":{"true":{"oInt32":1}}}`},
|
||||||
|
{"oneof, not set", marshaler, &pb.MsgWithOneof{}, `{}`},
|
||||||
|
{"oneof, set", marshaler, &pb.MsgWithOneof{Union: &pb.MsgWithOneof_Title{"Grand Poobah"}}, `{"title":"Grand Poobah"}`},
|
||||||
|
{"force orig_name", Marshaler{OrigName: true}, &pb.Simple{OInt32: proto.Int32(4)},
|
||||||
|
`{"o_int32":4}`},
|
||||||
|
{"proto2 extension", marshaler, realNumber, realNumberJSON},
|
||||||
|
{"Any with message", marshaler, anySimple, anySimpleJSON},
|
||||||
|
{"Any with message and indent", marshalerAllOptions, anySimple, anySimplePrettyJSON},
|
||||||
|
{"Any with WKT", marshaler, anyWellKnown, anyWellKnownJSON},
|
||||||
|
{"Any with WKT and indent", marshalerAllOptions, anyWellKnown, anyWellKnownPrettyJSON},
|
||||||
|
{"Duration", marshaler, &pb.KnownTypes{Dur: &durpb.Duration{Seconds: 3}}, `{"dur":"3.000s"}`},
|
||||||
|
{"Duration", marshaler, &pb.KnownTypes{Dur: &durpb.Duration{Seconds: 100000000, Nanos: 1}}, `{"dur":"100000000.000000001s"}`},
|
||||||
|
{"Struct", marshaler, &pb.KnownTypes{St: &stpb.Struct{
|
||||||
|
Fields: map[string]*stpb.Value{
|
||||||
|
"one": {Kind: &stpb.Value_StringValue{"loneliest number"}},
|
||||||
|
"two": {Kind: &stpb.Value_NullValue{stpb.NullValue_NULL_VALUE}},
|
||||||
|
},
|
||||||
|
}}, `{"st":{"one":"loneliest number","two":null}}`},
|
||||||
|
{"empty ListValue", marshaler, &pb.KnownTypes{Lv: &stpb.ListValue{}}, `{"lv":[]}`},
|
||||||
|
{"basic ListValue", marshaler, &pb.KnownTypes{Lv: &stpb.ListValue{Values: []*stpb.Value{
|
||||||
|
{Kind: &stpb.Value_StringValue{"x"}},
|
||||||
|
{Kind: &stpb.Value_NullValue{}},
|
||||||
|
{Kind: &stpb.Value_NumberValue{3}},
|
||||||
|
{Kind: &stpb.Value_BoolValue{true}},
|
||||||
|
}}}, `{"lv":["x",null,3,true]}`},
|
||||||
|
{"Timestamp", marshaler, &pb.KnownTypes{Ts: &tspb.Timestamp{Seconds: 14e8, Nanos: 21e6}}, `{"ts":"2014-05-13T16:53:20.021Z"}`},
|
||||||
|
{"number Value", marshaler, &pb.KnownTypes{Val: &stpb.Value{Kind: &stpb.Value_NumberValue{1}}}, `{"val":1}`},
|
||||||
|
{"null Value", marshaler, &pb.KnownTypes{Val: &stpb.Value{Kind: &stpb.Value_NullValue{stpb.NullValue_NULL_VALUE}}}, `{"val":null}`},
|
||||||
|
{"string number value", marshaler, &pb.KnownTypes{Val: &stpb.Value{Kind: &stpb.Value_StringValue{"9223372036854775807"}}}, `{"val":"9223372036854775807"}`},
|
||||||
|
{"list of lists Value", marshaler, &pb.KnownTypes{Val: &stpb.Value{
|
||||||
|
Kind: &stpb.Value_ListValue{&stpb.ListValue{
|
||||||
|
Values: []*stpb.Value{
|
||||||
|
{Kind: &stpb.Value_StringValue{"x"}},
|
||||||
|
{Kind: &stpb.Value_ListValue{&stpb.ListValue{
|
||||||
|
Values: []*stpb.Value{
|
||||||
|
{Kind: &stpb.Value_ListValue{&stpb.ListValue{
|
||||||
|
Values: []*stpb.Value{{Kind: &stpb.Value_StringValue{"y"}}},
|
||||||
|
}}},
|
||||||
|
{Kind: &stpb.Value_StringValue{"z"}},
|
||||||
|
},
|
||||||
|
}}},
|
||||||
|
},
|
||||||
|
}},
|
||||||
|
}}, `{"val":["x",[["y"],"z"]]}`},
|
||||||
|
|
||||||
|
{"DoubleValue", marshaler, &pb.KnownTypes{Dbl: &wpb.DoubleValue{Value: 1.2}}, `{"dbl":1.2}`},
|
||||||
|
{"FloatValue", marshaler, &pb.KnownTypes{Flt: &wpb.FloatValue{Value: 1.2}}, `{"flt":1.2}`},
|
||||||
|
{"Int64Value", marshaler, &pb.KnownTypes{I64: &wpb.Int64Value{Value: -3}}, `{"i64":"-3"}`},
|
||||||
|
{"UInt64Value", marshaler, &pb.KnownTypes{U64: &wpb.UInt64Value{Value: 3}}, `{"u64":"3"}`},
|
||||||
|
{"Int32Value", marshaler, &pb.KnownTypes{I32: &wpb.Int32Value{Value: -4}}, `{"i32":-4}`},
|
||||||
|
{"UInt32Value", marshaler, &pb.KnownTypes{U32: &wpb.UInt32Value{Value: 4}}, `{"u32":4}`},
|
||||||
|
{"BoolValue", marshaler, &pb.KnownTypes{Bool: &wpb.BoolValue{Value: true}}, `{"bool":true}`},
|
||||||
|
{"StringValue", marshaler, &pb.KnownTypes{Str: &wpb.StringValue{Value: "plush"}}, `{"str":"plush"}`},
|
||||||
|
{"BytesValue", marshaler, &pb.KnownTypes{Bytes: &wpb.BytesValue{Value: []byte("wow")}}, `{"bytes":"d293"}`},
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMarshaling(t *testing.T) {
|
||||||
|
for _, tt := range marshalingTests {
|
||||||
|
json, err := tt.marshaler.MarshalToString(tt.pb)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("%s: marshaling error: %v", tt.desc, err)
|
||||||
|
} else if tt.json != json {
|
||||||
|
t.Errorf("%s: got [%v] want [%v]", tt.desc, json, tt.json)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMarshalJSONPBMarshaler(t *testing.T) {
|
||||||
|
rawJson := `{ "foo": "bar", "baz": [0, 1, 2, 3] }`
|
||||||
|
msg := dynamicMessage{rawJson: rawJson}
|
||||||
|
str, err := new(Marshaler).MarshalToString(&msg)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("an unexpected error occurred when marshalling JSONPBMarshaler: %v", err)
|
||||||
|
}
|
||||||
|
if str != rawJson {
|
||||||
|
t.Errorf("marshalling JSON produced incorrect output: got %s, wanted %s", str, rawJson)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMarshalAnyJSONPBMarshaler(t *testing.T) {
|
||||||
|
msg := dynamicMessage{rawJson: `{ "foo": "bar", "baz": [0, 1, 2, 3] }`}
|
||||||
|
a, err := ptypes.MarshalAny(&msg)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("an unexpected error occurred when marshalling to Any: %v", err)
|
||||||
|
}
|
||||||
|
str, err := new(Marshaler).MarshalToString(a)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("an unexpected error occurred when marshalling Any to JSON: %v", err)
|
||||||
|
}
|
||||||
|
// after custom marshaling, it's round-tripped through JSON decoding/encoding already,
|
||||||
|
// so the keys are sorted, whitespace is compacted, and "@type" key has been added
|
||||||
|
expected := `{"@type":"type.googleapis.com/` + dynamicMessageName + `","baz":[0,1,2,3],"foo":"bar"}`
|
||||||
|
if str != expected {
|
||||||
|
t.Errorf("marshalling JSON produced incorrect output: got %s, wanted %s", str, expected)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var unmarshalingTests = []struct {
|
||||||
|
desc string
|
||||||
|
unmarshaler Unmarshaler
|
||||||
|
json string
|
||||||
|
pb proto.Message
|
||||||
|
}{
|
||||||
|
{"simple flat object", Unmarshaler{}, simpleObjectJSON, simpleObject},
|
||||||
|
{"simple pretty object", Unmarshaler{}, simpleObjectPrettyJSON, simpleObject},
|
||||||
|
{"repeated fields flat object", Unmarshaler{}, repeatsObjectJSON, repeatsObject},
|
||||||
|
{"repeated fields pretty object", Unmarshaler{}, repeatsObjectPrettyJSON, repeatsObject},
|
||||||
|
{"nested message/enum flat object", Unmarshaler{}, complexObjectJSON, complexObject},
|
||||||
|
{"nested message/enum pretty object", Unmarshaler{}, complexObjectPrettyJSON, complexObject},
|
||||||
|
{"enum-string object", Unmarshaler{}, `{"color":"BLUE"}`, &pb.Widget{Color: pb.Widget_BLUE.Enum()}},
|
||||||
|
{"enum-value object", Unmarshaler{}, "{\n \"color\": 2\n}", &pb.Widget{Color: pb.Widget_BLUE.Enum()}},
|
||||||
|
{"unknown field with allowed option", Unmarshaler{AllowUnknownFields: true}, `{"unknown": "foo"}`, new(pb.Simple)},
|
||||||
|
{"proto3 enum string", Unmarshaler{}, `{"hilarity":"PUNS"}`, &proto3pb.Message{Hilarity: proto3pb.Message_PUNS}},
|
||||||
|
{"proto3 enum value", Unmarshaler{}, `{"hilarity":1}`, &proto3pb.Message{Hilarity: proto3pb.Message_PUNS}},
|
||||||
|
{"unknown enum value object",
|
||||||
|
Unmarshaler{},
|
||||||
|
"{\n \"color\": 1000,\n \"r_color\": [\n \"RED\"\n ]\n}",
|
||||||
|
&pb.Widget{Color: pb.Widget_Color(1000).Enum(), RColor: []pb.Widget_Color{pb.Widget_RED}}},
|
||||||
|
{"repeated proto3 enum", Unmarshaler{}, `{"rFunny":["PUNS","SLAPSTICK"]}`,
|
||||||
|
&proto3pb.Message{RFunny: []proto3pb.Message_Humour{
|
||||||
|
proto3pb.Message_PUNS,
|
||||||
|
proto3pb.Message_SLAPSTICK,
|
||||||
|
}}},
|
||||||
|
{"repeated proto3 enum as int", Unmarshaler{}, `{"rFunny":[1,2]}`,
|
||||||
|
&proto3pb.Message{RFunny: []proto3pb.Message_Humour{
|
||||||
|
proto3pb.Message_PUNS,
|
||||||
|
proto3pb.Message_SLAPSTICK,
|
||||||
|
}}},
|
||||||
|
{"repeated proto3 enum as mix of strings and ints", Unmarshaler{}, `{"rFunny":["PUNS",2]}`,
|
||||||
|
&proto3pb.Message{RFunny: []proto3pb.Message_Humour{
|
||||||
|
proto3pb.Message_PUNS,
|
||||||
|
proto3pb.Message_SLAPSTICK,
|
||||||
|
}}},
|
||||||
|
{"unquoted int64 object", Unmarshaler{}, `{"oInt64":-314}`, &pb.Simple{OInt64: proto.Int64(-314)}},
|
||||||
|
{"unquoted uint64 object", Unmarshaler{}, `{"oUint64":123}`, &pb.Simple{OUint64: proto.Uint64(123)}},
|
||||||
|
{"NaN", Unmarshaler{}, `{"oDouble":"NaN"}`, &pb.Simple{ODouble: proto.Float64(math.NaN())}},
|
||||||
|
{"Inf", Unmarshaler{}, `{"oFloat":"Infinity"}`, &pb.Simple{OFloat: proto.Float32(float32(math.Inf(1)))}},
|
||||||
|
{"-Inf", Unmarshaler{}, `{"oDouble":"-Infinity"}`, &pb.Simple{ODouble: proto.Float64(math.Inf(-1))}},
|
||||||
|
{"map<int64, int32>", Unmarshaler{}, `{"nummy":{"1":2,"3":4}}`, &pb.Mappy{Nummy: map[int64]int32{1: 2, 3: 4}}},
|
||||||
|
{"map<string, string>", Unmarshaler{}, `{"strry":{"\"one\"":"two","three":"four"}}`, &pb.Mappy{Strry: map[string]string{`"one"`: "two", "three": "four"}}},
|
||||||
|
{"map<int32, Object>", Unmarshaler{}, `{"objjy":{"1":{"dub":1}}}`, &pb.Mappy{Objjy: map[int32]*pb.Simple3{1: {Dub: 1}}}},
|
||||||
|
{"proto2 extension", Unmarshaler{}, realNumberJSON, realNumber},
|
||||||
|
{"Any with message", Unmarshaler{}, anySimpleJSON, anySimple},
|
||||||
|
{"Any with message and indent", Unmarshaler{}, anySimplePrettyJSON, anySimple},
|
||||||
|
{"Any with WKT", Unmarshaler{}, anyWellKnownJSON, anyWellKnown},
|
||||||
|
{"Any with WKT and indent", Unmarshaler{}, anyWellKnownPrettyJSON, anyWellKnown},
|
||||||
|
// TODO: This is broken.
|
||||||
|
//{"map<string, enum>", Unmarshaler{}, `{"enumy":{"XIV":"ROMAN"}`, &pb.Mappy{Enumy: map[string]pb.Numeral{"XIV": pb.Numeral_ROMAN}}},
|
||||||
|
{"map<string, enum as int>", Unmarshaler{}, `{"enumy":{"XIV":2}}`, &pb.Mappy{Enumy: map[string]pb.Numeral{"XIV": pb.Numeral_ROMAN}}},
|
||||||
|
{"oneof", Unmarshaler{}, `{"salary":31000}`, &pb.MsgWithOneof{Union: &pb.MsgWithOneof_Salary{31000}}},
|
||||||
|
{"oneof spec name", Unmarshaler{}, `{"Country":"Australia"}`, &pb.MsgWithOneof{Union: &pb.MsgWithOneof_Country{"Australia"}}},
|
||||||
|
{"oneof orig_name", Unmarshaler{}, `{"Country":"Australia"}`, &pb.MsgWithOneof{Union: &pb.MsgWithOneof_Country{"Australia"}}},
|
||||||
|
{"oneof spec name2", Unmarshaler{}, `{"homeAddress":"Australia"}`, &pb.MsgWithOneof{Union: &pb.MsgWithOneof_HomeAddress{"Australia"}}},
|
||||||
|
{"oneof orig_name2", Unmarshaler{}, `{"home_address":"Australia"}`, &pb.MsgWithOneof{Union: &pb.MsgWithOneof_HomeAddress{"Australia"}}},
|
||||||
|
{"orig_name input", Unmarshaler{}, `{"o_bool":true}`, &pb.Simple{OBool: proto.Bool(true)}},
|
||||||
|
{"camelName input", Unmarshaler{}, `{"oBool":true}`, &pb.Simple{OBool: proto.Bool(true)}},
|
||||||
|
|
||||||
|
{"Duration", Unmarshaler{}, `{"dur":"3.000s"}`, &pb.KnownTypes{Dur: &durpb.Duration{Seconds: 3}}},
|
||||||
|
{"null Duration", Unmarshaler{}, `{"dur":null}`, &pb.KnownTypes{Dur: nil}},
|
||||||
|
{"Timestamp", Unmarshaler{}, `{"ts":"2014-05-13T16:53:20.021Z"}`, &pb.KnownTypes{Ts: &tspb.Timestamp{Seconds: 14e8, Nanos: 21e6}}},
|
||||||
|
{"PreEpochTimestamp", Unmarshaler{}, `{"ts":"1969-12-31T23:59:58.999999995Z"}`, &pb.KnownTypes{Ts: &tspb.Timestamp{Seconds: -2, Nanos: 999999995}}},
|
||||||
|
{"ZeroTimeTimestamp", Unmarshaler{}, `{"ts":"0001-01-01T00:00:00Z"}`, &pb.KnownTypes{Ts: &tspb.Timestamp{Seconds: -62135596800, Nanos: 0}}},
|
||||||
|
{"null Timestamp", Unmarshaler{}, `{"ts":null}`, &pb.KnownTypes{Ts: nil}},
|
||||||
|
{"null Struct", Unmarshaler{}, `{"st": null}`, &pb.KnownTypes{St: nil}},
|
||||||
|
{"empty Struct", Unmarshaler{}, `{"st": {}}`, &pb.KnownTypes{St: &stpb.Struct{}}},
|
||||||
|
{"basic Struct", Unmarshaler{}, `{"st": {"a": "x", "b": null, "c": 3, "d": true}}`, &pb.KnownTypes{St: &stpb.Struct{Fields: map[string]*stpb.Value{
|
||||||
|
"a": {Kind: &stpb.Value_StringValue{"x"}},
|
||||||
|
"b": {Kind: &stpb.Value_NullValue{}},
|
||||||
|
"c": {Kind: &stpb.Value_NumberValue{3}},
|
||||||
|
"d": {Kind: &stpb.Value_BoolValue{true}},
|
||||||
|
}}}},
|
||||||
|
{"nested Struct", Unmarshaler{}, `{"st": {"a": {"b": 1, "c": [{"d": true}, "f"]}}}`, &pb.KnownTypes{St: &stpb.Struct{Fields: map[string]*stpb.Value{
|
||||||
|
"a": {Kind: &stpb.Value_StructValue{&stpb.Struct{Fields: map[string]*stpb.Value{
|
||||||
|
"b": {Kind: &stpb.Value_NumberValue{1}},
|
||||||
|
"c": {Kind: &stpb.Value_ListValue{&stpb.ListValue{Values: []*stpb.Value{
|
||||||
|
{Kind: &stpb.Value_StructValue{&stpb.Struct{Fields: map[string]*stpb.Value{"d": {Kind: &stpb.Value_BoolValue{true}}}}}},
|
||||||
|
{Kind: &stpb.Value_StringValue{"f"}},
|
||||||
|
}}}},
|
||||||
|
}}}},
|
||||||
|
}}}},
|
||||||
|
{"null ListValue", Unmarshaler{}, `{"lv": null}`, &pb.KnownTypes{Lv: nil}},
|
||||||
|
{"empty ListValue", Unmarshaler{}, `{"lv": []}`, &pb.KnownTypes{Lv: &stpb.ListValue{}}},
|
||||||
|
{"basic ListValue", Unmarshaler{}, `{"lv": ["x", null, 3, true]}`, &pb.KnownTypes{Lv: &stpb.ListValue{Values: []*stpb.Value{
|
||||||
|
{Kind: &stpb.Value_StringValue{"x"}},
|
||||||
|
{Kind: &stpb.Value_NullValue{}},
|
||||||
|
{Kind: &stpb.Value_NumberValue{3}},
|
||||||
|
{Kind: &stpb.Value_BoolValue{true}},
|
||||||
|
}}}},
|
||||||
|
{"number Value", Unmarshaler{}, `{"val":1}`, &pb.KnownTypes{Val: &stpb.Value{Kind: &stpb.Value_NumberValue{1}}}},
|
||||||
|
{"null Value", Unmarshaler{}, `{"val":null}`, &pb.KnownTypes{Val: &stpb.Value{Kind: &stpb.Value_NullValue{stpb.NullValue_NULL_VALUE}}}},
|
||||||
|
{"bool Value", Unmarshaler{}, `{"val":true}`, &pb.KnownTypes{Val: &stpb.Value{Kind: &stpb.Value_BoolValue{true}}}},
|
||||||
|
{"string Value", Unmarshaler{}, `{"val":"x"}`, &pb.KnownTypes{Val: &stpb.Value{Kind: &stpb.Value_StringValue{"x"}}}},
|
||||||
|
{"string number value", Unmarshaler{}, `{"val":"9223372036854775807"}`, &pb.KnownTypes{Val: &stpb.Value{Kind: &stpb.Value_StringValue{"9223372036854775807"}}}},
|
||||||
|
{"list of lists Value", Unmarshaler{}, `{"val":["x", [["y"], "z"]]}`, &pb.KnownTypes{Val: &stpb.Value{
|
||||||
|
Kind: &stpb.Value_ListValue{&stpb.ListValue{
|
||||||
|
Values: []*stpb.Value{
|
||||||
|
{Kind: &stpb.Value_StringValue{"x"}},
|
||||||
|
{Kind: &stpb.Value_ListValue{&stpb.ListValue{
|
||||||
|
Values: []*stpb.Value{
|
||||||
|
{Kind: &stpb.Value_ListValue{&stpb.ListValue{
|
||||||
|
Values: []*stpb.Value{{Kind: &stpb.Value_StringValue{"y"}}},
|
||||||
|
}}},
|
||||||
|
{Kind: &stpb.Value_StringValue{"z"}},
|
||||||
|
},
|
||||||
|
}}},
|
||||||
|
},
|
||||||
|
}}}}},
|
||||||
|
|
||||||
|
{"DoubleValue", Unmarshaler{}, `{"dbl":1.2}`, &pb.KnownTypes{Dbl: &wpb.DoubleValue{Value: 1.2}}},
|
||||||
|
{"FloatValue", Unmarshaler{}, `{"flt":1.2}`, &pb.KnownTypes{Flt: &wpb.FloatValue{Value: 1.2}}},
|
||||||
|
{"Int64Value", Unmarshaler{}, `{"i64":"-3"}`, &pb.KnownTypes{I64: &wpb.Int64Value{Value: -3}}},
|
||||||
|
{"UInt64Value", Unmarshaler{}, `{"u64":"3"}`, &pb.KnownTypes{U64: &wpb.UInt64Value{Value: 3}}},
|
||||||
|
{"Int32Value", Unmarshaler{}, `{"i32":-4}`, &pb.KnownTypes{I32: &wpb.Int32Value{Value: -4}}},
|
||||||
|
{"UInt32Value", Unmarshaler{}, `{"u32":4}`, &pb.KnownTypes{U32: &wpb.UInt32Value{Value: 4}}},
|
||||||
|
{"BoolValue", Unmarshaler{}, `{"bool":true}`, &pb.KnownTypes{Bool: &wpb.BoolValue{Value: true}}},
|
||||||
|
{"StringValue", Unmarshaler{}, `{"str":"plush"}`, &pb.KnownTypes{Str: &wpb.StringValue{Value: "plush"}}},
|
||||||
|
{"BytesValue", Unmarshaler{}, `{"bytes":"d293"}`, &pb.KnownTypes{Bytes: &wpb.BytesValue{Value: []byte("wow")}}},
|
||||||
|
|
||||||
|
// Ensure that `null` as a value ends up with a nil pointer instead of a [type]Value struct.
|
||||||
|
{"null DoubleValue", Unmarshaler{}, `{"dbl":null}`, &pb.KnownTypes{Dbl: nil}},
|
||||||
|
{"null FloatValue", Unmarshaler{}, `{"flt":null}`, &pb.KnownTypes{Flt: nil}},
|
||||||
|
{"null Int64Value", Unmarshaler{}, `{"i64":null}`, &pb.KnownTypes{I64: nil}},
|
||||||
|
{"null UInt64Value", Unmarshaler{}, `{"u64":null}`, &pb.KnownTypes{U64: nil}},
|
||||||
|
{"null Int32Value", Unmarshaler{}, `{"i32":null}`, &pb.KnownTypes{I32: nil}},
|
||||||
|
{"null UInt32Value", Unmarshaler{}, `{"u32":null}`, &pb.KnownTypes{U32: nil}},
|
||||||
|
{"null BoolValue", Unmarshaler{}, `{"bool":null}`, &pb.KnownTypes{Bool: nil}},
|
||||||
|
{"null StringValue", Unmarshaler{}, `{"str":null}`, &pb.KnownTypes{Str: nil}},
|
||||||
|
{"null BytesValue", Unmarshaler{}, `{"bytes":null}`, &pb.KnownTypes{Bytes: nil}},
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnmarshaling(t *testing.T) {
|
||||||
|
for _, tt := range unmarshalingTests {
|
||||||
|
// Make a new instance of the type of our expected object.
|
||||||
|
p := reflect.New(reflect.TypeOf(tt.pb).Elem()).Interface().(proto.Message)
|
||||||
|
|
||||||
|
err := tt.unmarshaler.Unmarshal(strings.NewReader(tt.json), p)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("%s: %v", tt.desc, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// For easier diffs, compare text strings of the protos.
|
||||||
|
exp := proto.MarshalTextString(tt.pb)
|
||||||
|
act := proto.MarshalTextString(p)
|
||||||
|
if string(exp) != string(act) {
|
||||||
|
t.Errorf("%s: got [%s] want [%s]", tt.desc, act, exp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnmarshalNullArray(t *testing.T) {
|
||||||
|
var repeats pb.Repeats
|
||||||
|
if err := UnmarshalString(`{"rBool":null}`, &repeats); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(repeats, pb.Repeats{}) {
|
||||||
|
t.Errorf("got non-nil fields in [%#v]", repeats)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnmarshalNullObject(t *testing.T) {
|
||||||
|
var maps pb.Maps
|
||||||
|
if err := UnmarshalString(`{"mInt64Str":null}`, &maps); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(maps, pb.Maps{}) {
|
||||||
|
t.Errorf("got non-nil fields in [%#v]", maps)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnmarshalNext(t *testing.T) {
|
||||||
|
// We only need to check against a few, not all of them.
|
||||||
|
tests := unmarshalingTests[:5]
|
||||||
|
|
||||||
|
// Create a buffer with many concatenated JSON objects.
|
||||||
|
var b bytes.Buffer
|
||||||
|
for _, tt := range tests {
|
||||||
|
b.WriteString(tt.json)
|
||||||
|
}
|
||||||
|
|
||||||
|
dec := json.NewDecoder(&b)
|
||||||
|
for _, tt := range tests {
|
||||||
|
// Make a new instance of the type of our expected object.
|
||||||
|
p := reflect.New(reflect.TypeOf(tt.pb).Elem()).Interface().(proto.Message)
|
||||||
|
|
||||||
|
err := tt.unmarshaler.UnmarshalNext(dec, p)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("%s: %v", tt.desc, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// For easier diffs, compare text strings of the protos.
|
||||||
|
exp := proto.MarshalTextString(tt.pb)
|
||||||
|
act := proto.MarshalTextString(p)
|
||||||
|
if string(exp) != string(act) {
|
||||||
|
t.Errorf("%s: got [%s] want [%s]", tt.desc, act, exp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
p := &pb.Simple{}
|
||||||
|
err := new(Unmarshaler).UnmarshalNext(dec, p)
|
||||||
|
if err != io.EOF {
|
||||||
|
t.Errorf("eof: got %v, expected io.EOF", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var unmarshalingShouldError = []struct {
|
||||||
|
desc string
|
||||||
|
in string
|
||||||
|
pb proto.Message
|
||||||
|
}{
|
||||||
|
{"a value", "666", new(pb.Simple)},
|
||||||
|
{"gibberish", "{adskja123;l23=-=", new(pb.Simple)},
|
||||||
|
{"unknown field", `{"unknown": "foo"}`, new(pb.Simple)},
|
||||||
|
{"unknown enum name", `{"hilarity":"DAVE"}`, new(proto3pb.Message)},
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnmarshalingBadInput(t *testing.T) {
|
||||||
|
for _, tt := range unmarshalingShouldError {
|
||||||
|
err := UnmarshalString(tt.in, tt.pb)
|
||||||
|
if err == nil {
|
||||||
|
t.Errorf("an error was expected when parsing %q instead of an object", tt.desc)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type funcResolver func(turl string) (proto.Message, error)
|
||||||
|
|
||||||
|
func (fn funcResolver) Resolve(turl string) (proto.Message, error) {
|
||||||
|
return fn(turl)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAnyWithCustomResolver(t *testing.T) {
|
||||||
|
var resolvedTypeUrls []string
|
||||||
|
resolver := funcResolver(func(turl string) (proto.Message, error) {
|
||||||
|
resolvedTypeUrls = append(resolvedTypeUrls, turl)
|
||||||
|
return new(pb.Simple), nil
|
||||||
|
})
|
||||||
|
msg := &pb.Simple{
|
||||||
|
OBytes: []byte{1, 2, 3, 4},
|
||||||
|
OBool: proto.Bool(true),
|
||||||
|
OString: proto.String("foobar"),
|
||||||
|
OInt64: proto.Int64(1020304),
|
||||||
|
}
|
||||||
|
msgBytes, err := proto.Marshal(msg)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("an unexpected error occurred when marshaling message: %v", err)
|
||||||
|
}
|
||||||
|
// make an Any with a type URL that won't resolve w/out custom resolver
|
||||||
|
any := &anypb.Any{
|
||||||
|
TypeUrl: "https://foobar.com/some.random.MessageKind",
|
||||||
|
Value: msgBytes,
|
||||||
|
}
|
||||||
|
|
||||||
|
m := Marshaler{AnyResolver: resolver}
|
||||||
|
js, err := m.MarshalToString(any)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("an unexpected error occurred when marshaling any to JSON: %v", err)
|
||||||
|
}
|
||||||
|
if len(resolvedTypeUrls) != 1 {
|
||||||
|
t.Errorf("custom resolver was not invoked during marshaling")
|
||||||
|
} else if resolvedTypeUrls[0] != "https://foobar.com/some.random.MessageKind" {
|
||||||
|
t.Errorf("custom resolver was invoked with wrong URL: got %q, wanted %q", resolvedTypeUrls[0], "https://foobar.com/some.random.MessageKind")
|
||||||
|
}
|
||||||
|
wanted := `{"@type":"https://foobar.com/some.random.MessageKind","oBool":true,"oInt64":"1020304","oString":"foobar","oBytes":"AQIDBA=="}`
|
||||||
|
if js != wanted {
|
||||||
|
t.Errorf("marshalling JSON produced incorrect output: got %s, wanted %s", js, wanted)
|
||||||
|
}
|
||||||
|
|
||||||
|
u := Unmarshaler{AnyResolver: resolver}
|
||||||
|
roundTrip := &anypb.Any{}
|
||||||
|
err = u.Unmarshal(bytes.NewReader([]byte(js)), roundTrip)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("an unexpected error occurred when unmarshaling any from JSON: %v", err)
|
||||||
|
}
|
||||||
|
if len(resolvedTypeUrls) != 2 {
|
||||||
|
t.Errorf("custom resolver was not invoked during marshaling")
|
||||||
|
} else if resolvedTypeUrls[1] != "https://foobar.com/some.random.MessageKind" {
|
||||||
|
t.Errorf("custom resolver was invoked with wrong URL: got %q, wanted %q", resolvedTypeUrls[1], "https://foobar.com/some.random.MessageKind")
|
||||||
|
}
|
||||||
|
if !proto.Equal(any, roundTrip) {
|
||||||
|
t.Errorf("message contents not set correctly after unmarshalling JSON: got %s, wanted %s", roundTrip, any)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnmarshalJSONPBUnmarshaler(t *testing.T) {
|
||||||
|
rawJson := `{ "foo": "bar", "baz": [0, 1, 2, 3] }`
|
||||||
|
var msg dynamicMessage
|
||||||
|
if err := Unmarshal(strings.NewReader(rawJson), &msg); err != nil {
|
||||||
|
t.Errorf("an unexpected error occurred when parsing into JSONPBUnmarshaler: %v", err)
|
||||||
|
}
|
||||||
|
if msg.rawJson != rawJson {
|
||||||
|
t.Errorf("message contents not set correctly after unmarshalling JSON: got %s, wanted %s", msg.rawJson, rawJson)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnmarshalNullWithJSONPBUnmarshaler(t *testing.T) {
|
||||||
|
rawJson := `{"stringField":null}`
|
||||||
|
var ptrFieldMsg ptrFieldMessage
|
||||||
|
if err := Unmarshal(strings.NewReader(rawJson), &ptrFieldMsg); err != nil {
|
||||||
|
t.Errorf("unmarshal error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
want := ptrFieldMessage{StringField: &stringField{IsSet: true, StringValue: "null"}}
|
||||||
|
if !proto.Equal(&ptrFieldMsg, &want) {
|
||||||
|
t.Errorf("unmarshal result StringField: got %v, want %v", ptrFieldMsg, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnmarshalAnyJSONPBUnmarshaler(t *testing.T) {
|
||||||
|
rawJson := `{ "@type": "blah.com/` + dynamicMessageName + `", "foo": "bar", "baz": [0, 1, 2, 3] }`
|
||||||
|
var got anypb.Any
|
||||||
|
if err := Unmarshal(strings.NewReader(rawJson), &got); err != nil {
|
||||||
|
t.Errorf("an unexpected error occurred when parsing into JSONPBUnmarshaler: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
dm := &dynamicMessage{rawJson: `{"baz":[0,1,2,3],"foo":"bar"}`}
|
||||||
|
var want anypb.Any
|
||||||
|
if b, err := proto.Marshal(dm); err != nil {
|
||||||
|
t.Errorf("an unexpected error occurred when marshaling message: %v", err)
|
||||||
|
} else {
|
||||||
|
want.TypeUrl = "blah.com/" + dynamicMessageName
|
||||||
|
want.Value = b
|
||||||
|
}
|
||||||
|
|
||||||
|
if !proto.Equal(&got, &want) {
|
||||||
|
t.Errorf("message contents not set correctly after unmarshalling JSON: got %s, wanted %s", got, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
dynamicMessageName = "google.protobuf.jsonpb.testing.dynamicMessage"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
// we register the custom type below so that we can use it in Any types
|
||||||
|
proto.RegisterType((*dynamicMessage)(nil), dynamicMessageName)
|
||||||
|
}
|
||||||
|
|
||||||
|
type ptrFieldMessage struct {
|
||||||
|
StringField *stringField `protobuf:"bytes,1,opt,name=stringField"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *ptrFieldMessage) Reset() {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *ptrFieldMessage) String() string {
|
||||||
|
return m.StringField.StringValue
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *ptrFieldMessage) ProtoMessage() {
|
||||||
|
}
|
||||||
|
|
||||||
|
type stringField struct {
|
||||||
|
IsSet bool `protobuf:"varint,1,opt,name=isSet"`
|
||||||
|
StringValue string `protobuf:"bytes,2,opt,name=stringValue"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stringField) Reset() {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stringField) String() string {
|
||||||
|
return s.StringValue
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stringField) ProtoMessage() {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stringField) UnmarshalJSONPB(jum *Unmarshaler, js []byte) error {
|
||||||
|
s.IsSet = true
|
||||||
|
s.StringValue = string(js)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// dynamicMessage implements protobuf.Message but is not a normal generated message type.
|
||||||
|
// It provides implementations of JSONPBMarshaler and JSONPBUnmarshaler for JSON support.
|
||||||
|
type dynamicMessage struct {
|
||||||
|
rawJson string `protobuf:"bytes,1,opt,name=rawJson"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *dynamicMessage) Reset() {
|
||||||
|
m.rawJson = "{}"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *dynamicMessage) String() string {
|
||||||
|
return m.rawJson
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *dynamicMessage) ProtoMessage() {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *dynamicMessage) MarshalJSONPB(jm *Marshaler) ([]byte, error) {
|
||||||
|
return []byte(m.rawJson), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *dynamicMessage) UnmarshalJSONPB(jum *Unmarshaler, js []byte) error {
|
||||||
|
m.rawJson = string(js)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
33
vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/Makefile
generated
vendored
Normal file
33
vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/Makefile
generated
vendored
Normal file
|
|
@ -0,0 +1,33 @@
|
||||||
|
# Go support for Protocol Buffers - Google's data interchange format
|
||||||
|
#
|
||||||
|
# Copyright 2015 The Go Authors. All rights reserved.
|
||||||
|
# https://github.com/golang/protobuf
|
||||||
|
#
|
||||||
|
# Redistribution and use in source and binary forms, with or without
|
||||||
|
# modification, are permitted provided that the following conditions are
|
||||||
|
# met:
|
||||||
|
#
|
||||||
|
# * Redistributions of source code must retain the above copyright
|
||||||
|
# notice, this list of conditions and the following disclaimer.
|
||||||
|
# * Redistributions in binary form must reproduce the above
|
||||||
|
# copyright notice, this list of conditions and the following disclaimer
|
||||||
|
# in the documentation and/or other materials provided with the
|
||||||
|
# distribution.
|
||||||
|
# * Neither the name of Google Inc. nor the names of its
|
||||||
|
# contributors may be used to endorse or promote products derived from
|
||||||
|
# this software without specific prior written permission.
|
||||||
|
#
|
||||||
|
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
regenerate:
|
||||||
|
protoc --go_out=Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any,Mgoogle/protobuf/duration.proto=github.com/golang/protobuf/ptypes/duration,Mgoogle/protobuf/struct.proto=github.com/golang/protobuf/ptypes/struct,Mgoogle/protobuf/timestamp.proto=github.com/golang/protobuf/ptypes/timestamp,Mgoogle/protobuf/wrappers.proto=github.com/golang/protobuf/ptypes/wrappers:. *.proto
|
||||||
266
vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.pb.go
generated
vendored
Normal file
266
vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.pb.go
generated
vendored
Normal file
|
|
@ -0,0 +1,266 @@
|
||||||
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||||
|
// source: more_test_objects.proto
|
||||||
|
|
||||||
|
/*
|
||||||
|
Package jsonpb is a generated protocol buffer package.
|
||||||
|
|
||||||
|
It is generated from these files:
|
||||||
|
more_test_objects.proto
|
||||||
|
test_objects.proto
|
||||||
|
|
||||||
|
It has these top-level messages:
|
||||||
|
Simple3
|
||||||
|
SimpleSlice3
|
||||||
|
SimpleMap3
|
||||||
|
SimpleNull3
|
||||||
|
Mappy
|
||||||
|
Simple
|
||||||
|
NonFinites
|
||||||
|
Repeats
|
||||||
|
Widget
|
||||||
|
Maps
|
||||||
|
MsgWithOneof
|
||||||
|
Real
|
||||||
|
Complex
|
||||||
|
KnownTypes
|
||||||
|
*/
|
||||||
|
package jsonpb
|
||||||
|
|
||||||
|
import proto "github.com/golang/protobuf/proto"
|
||||||
|
import fmt "fmt"
|
||||||
|
import math "math"
|
||||||
|
|
||||||
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
|
var _ = proto.Marshal
|
||||||
|
var _ = fmt.Errorf
|
||||||
|
var _ = math.Inf
|
||||||
|
|
||||||
|
// This is a compile-time assertion to ensure that this generated file
|
||||||
|
// is compatible with the proto package it is being compiled against.
|
||||||
|
// A compilation error at this line likely means your copy of the
|
||||||
|
// proto package needs to be updated.
|
||||||
|
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||||
|
|
||||||
|
type Numeral int32
|
||||||
|
|
||||||
|
const (
|
||||||
|
Numeral_UNKNOWN Numeral = 0
|
||||||
|
Numeral_ARABIC Numeral = 1
|
||||||
|
Numeral_ROMAN Numeral = 2
|
||||||
|
)
|
||||||
|
|
||||||
|
var Numeral_name = map[int32]string{
|
||||||
|
0: "UNKNOWN",
|
||||||
|
1: "ARABIC",
|
||||||
|
2: "ROMAN",
|
||||||
|
}
|
||||||
|
var Numeral_value = map[string]int32{
|
||||||
|
"UNKNOWN": 0,
|
||||||
|
"ARABIC": 1,
|
||||||
|
"ROMAN": 2,
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x Numeral) String() string {
|
||||||
|
return proto.EnumName(Numeral_name, int32(x))
|
||||||
|
}
|
||||||
|
func (Numeral) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
|
||||||
|
|
||||||
|
type Simple3 struct {
|
||||||
|
Dub float64 `protobuf:"fixed64,1,opt,name=dub" json:"dub,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Simple3) Reset() { *m = Simple3{} }
|
||||||
|
func (m *Simple3) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*Simple3) ProtoMessage() {}
|
||||||
|
func (*Simple3) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
|
||||||
|
|
||||||
|
func (m *Simple3) GetDub() float64 {
|
||||||
|
if m != nil {
|
||||||
|
return m.Dub
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
type SimpleSlice3 struct {
|
||||||
|
Slices []string `protobuf:"bytes,1,rep,name=slices" json:"slices,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SimpleSlice3) Reset() { *m = SimpleSlice3{} }
|
||||||
|
func (m *SimpleSlice3) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*SimpleSlice3) ProtoMessage() {}
|
||||||
|
func (*SimpleSlice3) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
|
||||||
|
|
||||||
|
func (m *SimpleSlice3) GetSlices() []string {
|
||||||
|
if m != nil {
|
||||||
|
return m.Slices
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type SimpleMap3 struct {
|
||||||
|
Stringy map[string]string `protobuf:"bytes,1,rep,name=stringy" json:"stringy,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SimpleMap3) Reset() { *m = SimpleMap3{} }
|
||||||
|
func (m *SimpleMap3) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*SimpleMap3) ProtoMessage() {}
|
||||||
|
func (*SimpleMap3) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
|
||||||
|
|
||||||
|
func (m *SimpleMap3) GetStringy() map[string]string {
|
||||||
|
if m != nil {
|
||||||
|
return m.Stringy
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type SimpleNull3 struct {
|
||||||
|
Simple *Simple3 `protobuf:"bytes,1,opt,name=simple" json:"simple,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SimpleNull3) Reset() { *m = SimpleNull3{} }
|
||||||
|
func (m *SimpleNull3) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*SimpleNull3) ProtoMessage() {}
|
||||||
|
func (*SimpleNull3) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
|
||||||
|
|
||||||
|
func (m *SimpleNull3) GetSimple() *Simple3 {
|
||||||
|
if m != nil {
|
||||||
|
return m.Simple
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type Mappy struct {
|
||||||
|
Nummy map[int64]int32 `protobuf:"bytes,1,rep,name=nummy" json:"nummy,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"`
|
||||||
|
Strry map[string]string `protobuf:"bytes,2,rep,name=strry" json:"strry,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
|
||||||
|
Objjy map[int32]*Simple3 `protobuf:"bytes,3,rep,name=objjy" json:"objjy,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
|
||||||
|
Buggy map[int64]string `protobuf:"bytes,4,rep,name=buggy" json:"buggy,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
|
||||||
|
Booly map[bool]bool `protobuf:"bytes,5,rep,name=booly" json:"booly,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"`
|
||||||
|
Enumy map[string]Numeral `protobuf:"bytes,6,rep,name=enumy" json:"enumy,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value,enum=jsonpb.Numeral"`
|
||||||
|
S32Booly map[int32]bool `protobuf:"bytes,7,rep,name=s32booly" json:"s32booly,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"`
|
||||||
|
S64Booly map[int64]bool `protobuf:"bytes,8,rep,name=s64booly" json:"s64booly,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"`
|
||||||
|
U32Booly map[uint32]bool `protobuf:"bytes,9,rep,name=u32booly" json:"u32booly,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"`
|
||||||
|
U64Booly map[uint64]bool `protobuf:"bytes,10,rep,name=u64booly" json:"u64booly,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Mappy) Reset() { *m = Mappy{} }
|
||||||
|
func (m *Mappy) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*Mappy) ProtoMessage() {}
|
||||||
|
func (*Mappy) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
|
||||||
|
|
||||||
|
func (m *Mappy) GetNummy() map[int64]int32 {
|
||||||
|
if m != nil {
|
||||||
|
return m.Nummy
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Mappy) GetStrry() map[string]string {
|
||||||
|
if m != nil {
|
||||||
|
return m.Strry
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Mappy) GetObjjy() map[int32]*Simple3 {
|
||||||
|
if m != nil {
|
||||||
|
return m.Objjy
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Mappy) GetBuggy() map[int64]string {
|
||||||
|
if m != nil {
|
||||||
|
return m.Buggy
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Mappy) GetBooly() map[bool]bool {
|
||||||
|
if m != nil {
|
||||||
|
return m.Booly
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Mappy) GetEnumy() map[string]Numeral {
|
||||||
|
if m != nil {
|
||||||
|
return m.Enumy
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Mappy) GetS32Booly() map[int32]bool {
|
||||||
|
if m != nil {
|
||||||
|
return m.S32Booly
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Mappy) GetS64Booly() map[int64]bool {
|
||||||
|
if m != nil {
|
||||||
|
return m.S64Booly
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Mappy) GetU32Booly() map[uint32]bool {
|
||||||
|
if m != nil {
|
||||||
|
return m.U32Booly
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Mappy) GetU64Booly() map[uint64]bool {
|
||||||
|
if m != nil {
|
||||||
|
return m.U64Booly
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterType((*Simple3)(nil), "jsonpb.Simple3")
|
||||||
|
proto.RegisterType((*SimpleSlice3)(nil), "jsonpb.SimpleSlice3")
|
||||||
|
proto.RegisterType((*SimpleMap3)(nil), "jsonpb.SimpleMap3")
|
||||||
|
proto.RegisterType((*SimpleNull3)(nil), "jsonpb.SimpleNull3")
|
||||||
|
proto.RegisterType((*Mappy)(nil), "jsonpb.Mappy")
|
||||||
|
proto.RegisterEnum("jsonpb.Numeral", Numeral_name, Numeral_value)
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() { proto.RegisterFile("more_test_objects.proto", fileDescriptor0) }
|
||||||
|
|
||||||
|
var fileDescriptor0 = []byte{
|
||||||
|
// 526 bytes of a gzipped FileDescriptorProto
|
||||||
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x94, 0xdd, 0x6b, 0xdb, 0x3c,
|
||||||
|
0x14, 0x87, 0x5f, 0x27, 0xf5, 0xd7, 0x49, 0xfb, 0x2e, 0x88, 0xb1, 0x99, 0xf4, 0x62, 0xc5, 0xb0,
|
||||||
|
0xad, 0x0c, 0xe6, 0x8b, 0x78, 0x74, 0x5d, 0x77, 0x95, 0x8e, 0x5e, 0x94, 0x11, 0x07, 0x1c, 0xc2,
|
||||||
|
0x2e, 0x4b, 0xdc, 0x99, 0x90, 0xcc, 0x5f, 0xd8, 0xd6, 0xc0, 0xd7, 0xfb, 0xbb, 0x07, 0xe3, 0x48,
|
||||||
|
0x72, 0x2d, 0x07, 0x85, 0x6c, 0x77, 0x52, 0x7e, 0xcf, 0xe3, 0x73, 0x24, 0x1d, 0x02, 0x2f, 0xd3,
|
||||||
|
0xbc, 0x8c, 0x1f, 0xea, 0xb8, 0xaa, 0x1f, 0xf2, 0x68, 0x17, 0x3f, 0xd6, 0x95, 0x57, 0x94, 0x79,
|
||||||
|
0x9d, 0x13, 0x63, 0x57, 0xe5, 0x59, 0x11, 0xb9, 0xe7, 0x60, 0x2e, 0xb7, 0x69, 0x91, 0xc4, 0x3e,
|
||||||
|
0x19, 0xc3, 0xf0, 0x3b, 0x8d, 0x1c, 0xed, 0x42, 0xbb, 0xd4, 0x42, 0x5c, 0xba, 0x6f, 0xe0, 0x94,
|
||||||
|
0x87, 0xcb, 0x64, 0xfb, 0x18, 0xfb, 0xe4, 0x05, 0x18, 0x15, 0xae, 0x2a, 0x47, 0xbb, 0x18, 0x5e,
|
||||||
|
0xda, 0xa1, 0xd8, 0xb9, 0xbf, 0x34, 0x00, 0x0e, 0xce, 0xd7, 0x85, 0x4f, 0x3e, 0x81, 0x59, 0xd5,
|
||||||
|
0xe5, 0x36, 0xdb, 0x34, 0x8c, 0x1b, 0x4d, 0x5f, 0x79, 0xbc, 0x9a, 0xd7, 0x41, 0xde, 0x92, 0x13,
|
||||||
|
0x77, 0x59, 0x5d, 0x36, 0x61, 0xcb, 0x4f, 0x6e, 0xe0, 0x54, 0x0e, 0xb0, 0xa7, 0x1f, 0x71, 0xc3,
|
||||||
|
0x7a, 0xb2, 0x43, 0x5c, 0x92, 0xe7, 0xa0, 0xff, 0x5c, 0x27, 0x34, 0x76, 0x06, 0xec, 0x37, 0xbe,
|
||||||
|
0xb9, 0x19, 0x5c, 0x6b, 0xee, 0x15, 0x8c, 0xf8, 0xf7, 0x03, 0x9a, 0x24, 0x3e, 0x79, 0x0b, 0x46,
|
||||||
|
0xc5, 0xb6, 0xcc, 0x1e, 0x4d, 0x9f, 0xf5, 0x9b, 0xf0, 0x43, 0x11, 0xbb, 0xbf, 0x2d, 0xd0, 0xe7,
|
||||||
|
0xeb, 0xa2, 0x68, 0x88, 0x07, 0x7a, 0x46, 0xd3, 0xb4, 0x6d, 0xdb, 0x69, 0x0d, 0x96, 0x7a, 0x01,
|
||||||
|
0x46, 0xbc, 0x5f, 0x8e, 0x21, 0x5f, 0xd5, 0x65, 0xd9, 0x38, 0x03, 0x15, 0xbf, 0xc4, 0x48, 0xf0,
|
||||||
|
0x0c, 0x43, 0x3e, 0x8f, 0x76, 0xbb, 0xc6, 0x19, 0xaa, 0xf8, 0x05, 0x46, 0x82, 0x67, 0x18, 0xf2,
|
||||||
|
0x11, 0xdd, 0x6c, 0x1a, 0xe7, 0x44, 0xc5, 0xdf, 0x62, 0x24, 0x78, 0x86, 0x31, 0x3e, 0xcf, 0x93,
|
||||||
|
0xc6, 0xd1, 0x95, 0x3c, 0x46, 0x2d, 0x8f, 0x6b, 0xe4, 0xe3, 0x8c, 0xa6, 0x8d, 0x63, 0xa8, 0xf8,
|
||||||
|
0x3b, 0x8c, 0x04, 0xcf, 0x30, 0xf2, 0x11, 0xac, 0xca, 0x9f, 0xf2, 0x12, 0x26, 0x53, 0xce, 0xf7,
|
||||||
|
0x8e, 0x2c, 0x52, 0x6e, 0x3d, 0xc1, 0x4c, 0xbc, 0xfa, 0xc0, 0x45, 0x4b, 0x29, 0x8a, 0xb4, 0x15,
|
||||||
|
0xc5, 0x16, 0x45, 0xda, 0x56, 0xb4, 0x55, 0xe2, 0xaa, 0x5f, 0x91, 0x4a, 0x15, 0x69, 0x5b, 0x11,
|
||||||
|
0x94, 0x62, 0xbf, 0x62, 0x0b, 0x4f, 0xae, 0x01, 0xba, 0x87, 0x96, 0xe7, 0x6f, 0xa8, 0x98, 0x3f,
|
||||||
|
0x5d, 0x9a, 0x3f, 0x34, 0xbb, 0x27, 0xff, 0x97, 0xc9, 0x9d, 0xdc, 0x03, 0x74, 0x8f, 0x2f, 0x9b,
|
||||||
|
0x3a, 0x37, 0x5f, 0xcb, 0xa6, 0x62, 0x92, 0xfb, 0x4d, 0x74, 0x73, 0x71, 0xac, 0x7d, 0x7b, 0xdf,
|
||||||
|
0x7c, 0xba, 0x10, 0xd9, 0xb4, 0x14, 0xa6, 0xb5, 0xd7, 0x7e, 0x37, 0x2b, 0x8a, 0x83, 0xf7, 0xda,
|
||||||
|
0xff, 0xbf, 0x6b, 0x3f, 0xa0, 0x69, 0x5c, 0xae, 0x13, 0xf9, 0x53, 0x9f, 0xe1, 0xac, 0x37, 0x43,
|
||||||
|
0x8a, 0xcb, 0x38, 0xdc, 0x07, 0xca, 0xf2, 0xab, 0x1e, 0x3b, 0xfe, 0xbe, 0xbc, 0x3a, 0x54, 0xf9,
|
||||||
|
0xec, 0x6f, 0xe4, 0x43, 0x95, 0x4f, 0x8e, 0xc8, 0xef, 0xde, 0x83, 0x29, 0x6e, 0x82, 0x8c, 0xc0,
|
||||||
|
0x5c, 0x05, 0x5f, 0x83, 0xc5, 0xb7, 0x60, 0xfc, 0x1f, 0x01, 0x30, 0x66, 0xe1, 0xec, 0xf6, 0xfe,
|
||||||
|
0xcb, 0x58, 0x23, 0x36, 0xe8, 0xe1, 0x62, 0x3e, 0x0b, 0xc6, 0x83, 0xc8, 0x60, 0x7f, 0xe0, 0xfe,
|
||||||
|
0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x84, 0x34, 0xaf, 0xdb, 0x05, 0x00, 0x00,
|
||||||
|
}
|
||||||
69
vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.proto
generated
vendored
Normal file
69
vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.proto
generated
vendored
Normal file
|
|
@ -0,0 +1,69 @@
|
||||||
|
// Go support for Protocol Buffers - Google's data interchange format
|
||||||
|
//
|
||||||
|
// Copyright 2015 The Go Authors. All rights reserved.
|
||||||
|
// https://github.com/golang/protobuf
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
syntax = "proto3";
|
||||||
|
|
||||||
|
package jsonpb;
|
||||||
|
|
||||||
|
message Simple3 {
|
||||||
|
double dub = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message SimpleSlice3 {
|
||||||
|
repeated string slices = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message SimpleMap3 {
|
||||||
|
map<string,string> stringy = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message SimpleNull3 {
|
||||||
|
Simple3 simple = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
enum Numeral {
|
||||||
|
UNKNOWN = 0;
|
||||||
|
ARABIC = 1;
|
||||||
|
ROMAN = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message Mappy {
|
||||||
|
map<int64, int32> nummy = 1;
|
||||||
|
map<string, string> strry = 2;
|
||||||
|
map<int32, Simple3> objjy = 3;
|
||||||
|
map<int64, string> buggy = 4;
|
||||||
|
map<bool, bool> booly = 5;
|
||||||
|
map<string, Numeral> enumy = 6;
|
||||||
|
map<int32, bool> s32booly = 7;
|
||||||
|
map<int64, bool> s64booly = 8;
|
||||||
|
map<uint32, bool> u32booly = 9;
|
||||||
|
map<uint64, bool> u64booly = 10;
|
||||||
|
}
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue