Update GODEPS
This commit is contained in:
parent
6ce37d02d1
commit
1bedee5707
File diff suppressed because it is too large
Load Diff
|
|
@ -34,6 +34,8 @@ import (
|
|||
gce "google.golang.org/api/compute/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
apiv1 "k8s.io/kubernetes/pkg/api/v1"
|
||||
provider_gce "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
|
|
@ -362,17 +364,17 @@ func buildGenericLabels(ref GceRef, machineType string, nodeName string) (map[st
|
|||
result := make(map[string]string)
|
||||
|
||||
// TODO: extract it somehow
|
||||
result[metav1.LabelArch] = defaultArch
|
||||
result[metav1.LabelOS] = defaultOS
|
||||
result[kubeletapis.LabelArch] = defaultArch
|
||||
result[kubeletapis.LabelOS] = defaultOS
|
||||
|
||||
result[metav1.LabelInstanceType] = machineType
|
||||
result[kubeletapis.LabelInstanceType] = machineType
|
||||
ix := strings.LastIndex(ref.Zone, "-")
|
||||
if ix == -1 {
|
||||
return nil, fmt.Errorf("unexpected zone: %s", ref.Zone)
|
||||
}
|
||||
result[metav1.LabelZoneRegion] = ref.Zone[:ix]
|
||||
result[metav1.LabelZoneFailureDomain] = ref.Zone
|
||||
result[metav1.LabelHostname] = nodeName
|
||||
result[kubeletapis.LabelZoneRegion] = ref.Zone[:ix]
|
||||
result[kubeletapis.LabelZoneFailureDomain] = ref.Zone
|
||||
result[kubeletapis.LabelHostname] = nodeName
|
||||
return result, nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -19,8 +19,8 @@ package gce
|
|||
import (
|
||||
"testing"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
apiv1 "k8s.io/kubernetes/pkg/api/v1"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
|
@ -32,12 +32,12 @@ func TestBuildGenericLabels(t *testing.T) {
|
|||
Zone: "us-central1-b"},
|
||||
"n1-standard-8", "sillyname")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, "us-central1", labels[metav1.LabelZoneRegion])
|
||||
assert.Equal(t, "us-central1-b", labels[metav1.LabelZoneFailureDomain])
|
||||
assert.Equal(t, "sillyname", labels[metav1.LabelHostname])
|
||||
assert.Equal(t, "n1-standard-8", labels[metav1.LabelInstanceType])
|
||||
assert.Equal(t, defaultArch, labels[metav1.LabelArch])
|
||||
assert.Equal(t, defaultOS, labels[metav1.LabelOS])
|
||||
assert.Equal(t, "us-central1", labels[kubeletapis.LabelZoneRegion])
|
||||
assert.Equal(t, "us-central1-b", labels[kubeletapis.LabelZoneFailureDomain])
|
||||
assert.Equal(t, "sillyname", labels[kubeletapis.LabelHostname])
|
||||
assert.Equal(t, "n1-standard-8", labels[kubeletapis.LabelInstanceType])
|
||||
assert.Equal(t, defaultArch, labels[kubeletapis.LabelArch])
|
||||
assert.Equal(t, defaultOS, labels[kubeletapis.LabelOS])
|
||||
}
|
||||
|
||||
func TestExtractLabelsFromKubeEnv(t *testing.T) {
|
||||
|
|
|
|||
|
|
@ -20,8 +20,8 @@ import (
|
|||
"math"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
apiv1 "k8s.io/kubernetes/pkg/api/v1"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
)
|
||||
|
||||
// GcePriceModel implements PriceModel interface for GCE.
|
||||
|
|
@ -95,7 +95,7 @@ func (model *GcePriceModel) NodePrice(node *apiv1.Node, startTime time.Time, end
|
|||
price := 0.0
|
||||
basePriceFound := false
|
||||
if node.Labels != nil {
|
||||
if machineType, found := node.Labels[metav1.LabelInstanceType]; found {
|
||||
if machineType, found := node.Labels[kubeletapis.LabelInstanceType]; found {
|
||||
var priceMapToUse map[string]float64
|
||||
if node.Labels[preemptibleLabel] == "true" {
|
||||
priceMapToUse = preemptiblePrices
|
||||
|
|
|
|||
|
|
@ -22,10 +22,8 @@ import (
|
|||
"net/url"
|
||||
"strconv"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
kube_rest "k8s.io/client-go/rest"
|
||||
kube_client_cmd "k8s.io/client-go/tools/clientcmd"
|
||||
kube_client_cmd_api "k8s.io/client-go/tools/clientcmd/api"
|
||||
)
|
||||
|
||||
// This code was borrowed from Heapster to push the work forward and contains some functionality
|
||||
|
|
@ -33,38 +31,16 @@ import (
|
|||
// TODO(mwielgus): revisit this once we have the basic structur ready.
|
||||
|
||||
const (
|
||||
// APIVersion to be used.
|
||||
APIVersion = "v1"
|
||||
|
||||
defaultUseServiceAccount = false
|
||||
defaultServiceAccountFile = "/var/run/secrets/kubernetes.io/serviceaccount/token"
|
||||
defaultInClusterConfig = true
|
||||
)
|
||||
|
||||
func getConfigOverrides(uri *url.URL) (*kube_client_cmd.ConfigOverrides, error) {
|
||||
kubeConfigOverride := kube_client_cmd.ConfigOverrides{
|
||||
ClusterInfo: kube_client_cmd_api.Cluster{
|
||||
APIVersion: APIVersion,
|
||||
},
|
||||
}
|
||||
kubeConfigOverride := kube_client_cmd.ConfigOverrides{}
|
||||
if len(uri.Scheme) != 0 && len(uri.Host) != 0 {
|
||||
kubeConfigOverride.ClusterInfo.Server = fmt.Sprintf("%s://%s", uri.Scheme, uri.Host)
|
||||
}
|
||||
|
||||
opts := uri.Query()
|
||||
|
||||
if len(opts["apiVersion"]) >= 1 {
|
||||
kubeConfigOverride.ClusterInfo.APIVersion = opts["apiVersion"][0]
|
||||
}
|
||||
|
||||
if len(opts["insecure"]) > 0 {
|
||||
insecure, err := strconv.ParseBool(opts["insecure"][0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
kubeConfigOverride.ClusterInfo.InsecureSkipTLSVerify = insecure
|
||||
}
|
||||
|
||||
return &kubeConfigOverride, nil
|
||||
}
|
||||
|
||||
|
|
@ -98,11 +74,6 @@ func GetKubeClientConfig(uri *url.URL) (*kube_rest.Config, error) {
|
|||
if configOverrides.ClusterInfo.Server != "" {
|
||||
kubeConfig.Host = configOverrides.ClusterInfo.Server
|
||||
}
|
||||
kubeConfig.GroupVersion = &schema.GroupVersion{Version: configOverrides.ClusterInfo.APIVersion}
|
||||
kubeConfig.Insecure = configOverrides.ClusterInfo.InsecureSkipTLSVerify
|
||||
if configOverrides.ClusterInfo.InsecureSkipTLSVerify {
|
||||
kubeConfig.TLSClientConfig.CAFile = ""
|
||||
}
|
||||
} else {
|
||||
authFile := ""
|
||||
if len(opts["auth"]) > 0 {
|
||||
|
|
@ -117,10 +88,8 @@ func GetKubeClientConfig(uri *url.URL) (*kube_rest.Config, error) {
|
|||
}
|
||||
} else {
|
||||
kubeConfig = &kube_rest.Config{
|
||||
Host: configOverrides.ClusterInfo.Server,
|
||||
Insecure: configOverrides.ClusterInfo.InsecureSkipTLSVerify,
|
||||
Host: configOverrides.ClusterInfo.Server,
|
||||
}
|
||||
kubeConfig.GroupVersion = &schema.GroupVersion{Version: configOverrides.ClusterInfo.APIVersion}
|
||||
}
|
||||
}
|
||||
if len(kubeConfig.Host) == 0 {
|
||||
|
|
|
|||
|
|
@ -32,8 +32,10 @@ import (
|
|||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
api "k8s.io/kubernetes/pkg/api"
|
||||
apiv1 "k8s.io/kubernetes/pkg/api/v1"
|
||||
podv1 "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
extensionsv1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
kube_client "k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
|
@ -56,7 +58,7 @@ func GetAllNodesAvailableTime(nodes []*apiv1.Node) time.Time {
|
|||
// Each pod must be in condition "Scheduled: False; Reason: Unschedulable"
|
||||
func SlicePodsByPodScheduledTime(pods []*apiv1.Pod, threshold time.Time) (oldPods []*apiv1.Pod, newPods []*apiv1.Pod) {
|
||||
for _, pod := range pods {
|
||||
_, condition := apiv1.GetPodCondition(&pod.Status, apiv1.PodScheduled)
|
||||
_, condition := podv1.GetPodCondition(&pod.Status, apiv1.PodScheduled)
|
||||
if condition != nil {
|
||||
if condition.LastTransitionTime.After(threshold) {
|
||||
newPods = append(newPods, pod)
|
||||
|
|
@ -79,7 +81,7 @@ func ResetPodScheduledCondition(kubeClient kube_client.Interface, pods []*apiv1.
|
|||
}
|
||||
|
||||
func resetPodScheduledConditionForPod(kubeClient kube_client.Interface, pod *apiv1.Pod) error {
|
||||
_, condition := apiv1.GetPodCondition(&pod.Status, apiv1.PodScheduled)
|
||||
_, condition := podv1.GetPodCondition(&pod.Status, apiv1.PodScheduled)
|
||||
if condition != nil {
|
||||
glog.V(4).Infof("Reseting pod condition for %s/%s, last transition: %s",
|
||||
pod.Namespace, pod.Name, condition.LastTransitionTime.Time.String())
|
||||
|
|
@ -264,7 +266,7 @@ func sanitizeTemplateNode(node *apiv1.Node, nodeGroup string) (*apiv1.Node, erro
|
|||
newNode := obj.(*apiv1.Node)
|
||||
newNode.Labels = make(map[string]string, len(node.Labels))
|
||||
for k, v := range node.Labels {
|
||||
if k != metav1.LabelHostname {
|
||||
if k != kubeletapis.LabelHostname {
|
||||
newNode.Labels[k] = v
|
||||
} else {
|
||||
newNode.Labels[k] = nodeName
|
||||
|
|
|
|||
|
|
@ -26,8 +26,8 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/simulator"
|
||||
. "k8s.io/autoscaler/cluster-autoscaler/utils/test"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
apiv1 "k8s.io/kubernetes/pkg/api/v1"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
|
@ -108,15 +108,15 @@ func TestRemoveOldUnregisteredNodes(t *testing.T) {
|
|||
func TestSanitizeLabels(t *testing.T) {
|
||||
oldNode := BuildTestNode("ng1-1", 1000, 1000)
|
||||
oldNode.Labels = map[string]string{
|
||||
metav1.LabelHostname: "abc",
|
||||
"x": "y",
|
||||
kubeletapis.LabelHostname: "abc",
|
||||
"x": "y",
|
||||
}
|
||||
node, err := sanitizeTemplateNode(oldNode, "bzium")
|
||||
assert.NoError(t, err)
|
||||
assert.NotEqual(t, node.Labels[metav1.LabelHostname], "abc")
|
||||
assert.NotEqual(t, node.Labels[kubeletapis.LabelHostname], "abc")
|
||||
assert.Equal(t, node.Labels["x"], "y")
|
||||
assert.NotEqual(t, node.Name, oldNode.Name)
|
||||
assert.Equal(t, node.Labels[metav1.LabelHostname], node.Name)
|
||||
assert.Equal(t, node.Labels[kubeletapis.LabelHostname], node.Name)
|
||||
}
|
||||
|
||||
func TestRemoveFixNodeTargetSize(t *testing.T) {
|
||||
|
|
|
|||
|
|
@ -262,7 +262,7 @@ func main() {
|
|||
Namespace: *namespace,
|
||||
Name: "cluster-autoscaler",
|
||||
},
|
||||
Client: kubeClient,
|
||||
Client: kubeClient.Core(),
|
||||
LockConfig: resourcelock.ResourceLockConfig{
|
||||
Identity: id,
|
||||
EventRecorder: kube_util.CreateEventRecorder(kubeClient),
|
||||
|
|
|
|||
|
|
@ -150,13 +150,14 @@ func TestFastGetPodsToMove(t *testing.T) {
|
|||
},
|
||||
Spec: apiv1.PodSpec{},
|
||||
}
|
||||
one := intstr.FromInt(1)
|
||||
pdb8 := &policyv1.PodDisruptionBudget{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foobar",
|
||||
Namespace: "ns",
|
||||
},
|
||||
Spec: policyv1.PodDisruptionBudgetSpec{
|
||||
MinAvailable: intstr.FromInt(1),
|
||||
MinAvailable: &one,
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"critical": "true",
|
||||
|
|
@ -189,7 +190,7 @@ func TestFastGetPodsToMove(t *testing.T) {
|
|||
Namespace: "ns",
|
||||
},
|
||||
Spec: policyv1.PodDisruptionBudgetSpec{
|
||||
MinAvailable: intstr.FromInt(1),
|
||||
MinAvailable: &one,
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"critical": "true",
|
||||
|
|
|
|||
|
|
@ -50,10 +50,12 @@ func NewPredicateChecker(kubeClient kube_client.Interface, stop <-chan struct{})
|
|||
"cluster-autoscaler",
|
||||
kubeClient,
|
||||
informerFactory.Core().V1().Nodes(),
|
||||
informerFactory.Core().V1().Pods(),
|
||||
informerFactory.Core().V1().PersistentVolumes(),
|
||||
informerFactory.Core().V1().PersistentVolumeClaims(),
|
||||
informerFactory.Core().V1().ReplicationControllers(),
|
||||
informerFactory.Extensions().V1beta1().ReplicaSets(),
|
||||
informerFactory.Apps().V1beta1().StatefulSets(),
|
||||
informerFactory.Core().V1().Services(),
|
||||
apiv1.DefaultHardPodAffinitySymmetricWeight,
|
||||
)
|
||||
|
|
@ -65,7 +67,8 @@ func NewPredicateChecker(kubeClient kube_client.Interface, stop <-chan struct{})
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
schedulerConfigFactory.Run()
|
||||
// TODO: Verify that run is not needed anymore.
|
||||
// schedulerConfigFactory.Run()
|
||||
return &PredicateChecker{
|
||||
predicates: predicates,
|
||||
}, nil
|
||||
|
|
|
|||
|
|
@ -23,6 +23,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
apiv1 "k8s.io/kubernetes/pkg/api/v1"
|
||||
podv1 "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
extensionsv1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
policyv1 "k8s.io/kubernetes/pkg/apis/policy/v1beta1"
|
||||
client "k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
|
|
@ -119,7 +120,7 @@ func (unschedulablePodLister *UnschedulablePodLister) List() ([]*apiv1.Pod, erro
|
|||
return unschedulablePods, err
|
||||
}
|
||||
for _, pod := range allPods {
|
||||
_, condition := apiv1.GetPodCondition(&pod.Status, apiv1.PodScheduled)
|
||||
_, condition := podv1.GetPodCondition(&pod.Status, apiv1.PodScheduled)
|
||||
if condition != nil && condition.Status == apiv1.ConditionFalse && condition.Reason == "Unschedulable" {
|
||||
unschedulablePods = append(unschedulablePods, pod)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -20,8 +20,8 @@ import (
|
|||
"math"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
apiv1 "k8s.io/kubernetes/pkg/api/v1"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||
)
|
||||
|
||||
|
|
@ -89,13 +89,13 @@ func IsNodeInfoSimilar(n1, n2 *schedulercache.NodeInfo) bool {
|
|||
labels := make(map[string][]string)
|
||||
for _, node := range nodes {
|
||||
for label, value := range node.Node().ObjectMeta.Labels {
|
||||
if label == metav1.LabelHostname {
|
||||
if label == kubeletapis.LabelHostname {
|
||||
continue
|
||||
}
|
||||
if label == metav1.LabelZoneFailureDomain {
|
||||
if label == kubeletapis.LabelZoneFailureDomain {
|
||||
continue
|
||||
}
|
||||
if label == metav1.LabelZoneRegion {
|
||||
if label == kubeletapis.LabelZoneRegion {
|
||||
continue
|
||||
}
|
||||
labels[label] = append(labels[label], value)
|
||||
|
|
|
|||
|
|
@ -20,9 +20,9 @@ import (
|
|||
"testing"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
. "k8s.io/autoscaler/cluster-autoscaler/utils/test"
|
||||
apiv1 "k8s.io/kubernetes/pkg/api/v1"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
|
@ -111,12 +111,12 @@ func TestNodesSimilarVariousLabels(t *testing.T) {
|
|||
checkNodesSimilar(t, n1, n2, true)
|
||||
|
||||
// Different hostname labels shouldn't matter
|
||||
n1.ObjectMeta.Labels[metav1.LabelHostname] = "node1"
|
||||
n2.ObjectMeta.Labels[metav1.LabelHostname] = "node2"
|
||||
n1.ObjectMeta.Labels[kubeletapis.LabelHostname] = "node1"
|
||||
n2.ObjectMeta.Labels[kubeletapis.LabelHostname] = "node2"
|
||||
checkNodesSimilar(t, n1, n2, true)
|
||||
|
||||
// Different zone shouldn't matter either
|
||||
n1.ObjectMeta.Labels[metav1.LabelZoneFailureDomain] = "mars-olympus-mons1-b"
|
||||
n2.ObjectMeta.Labels[metav1.LabelZoneFailureDomain] = "us-houston1-a"
|
||||
n1.ObjectMeta.Labels[kubeletapis.LabelZoneFailureDomain] = "mars-olympus-mons1-b"
|
||||
n2.ObjectMeta.Labels[kubeletapis.LabelZoneFailureDomain] = "us-houston1-a"
|
||||
checkNodesSimilar(t, n1, n2, true)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -26,6 +26,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
apiv1 "k8s.io/kubernetes/pkg/api/v1"
|
||||
refv1 "k8s.io/kubernetes/pkg/api/v1/ref"
|
||||
)
|
||||
|
||||
// BuildTestPod creates a pod with specified resources.
|
||||
|
|
@ -115,7 +116,7 @@ func SetNodeReadyState(node *apiv1.Node, ready bool, lastTransition time.Time) {
|
|||
|
||||
// RefJSON builds string reference to
|
||||
func RefJSON(o runtime.Object) string {
|
||||
ref, err := apiv1.GetReference(api.Scheme, o)
|
||||
ref, err := refv1.GetReference(api.Scheme, o)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,13 +0,0 @@
|
|||
include $(GOROOT)/src/Make.inc
|
||||
|
||||
TARG=bitbucket.org/ww/goautoneg
|
||||
GOFILES=autoneg.go
|
||||
|
||||
include $(GOROOT)/src/Make.pkg
|
||||
|
||||
format:
|
||||
gofmt -w *.go
|
||||
|
||||
docs:
|
||||
gomake clean
|
||||
godoc ${TARG} > README.txt
|
||||
4
cluster-autoscaler/vendor/github.com/emicklei/go-restful-swagger12/.travis.yml
generated
vendored
Normal file
4
cluster-autoscaler/vendor/github.com/emicklei/go-restful-swagger12/.travis.yml
generated
vendored
Normal file
|
|
@ -0,0 +1,4 @@
|
|||
language: go
|
||||
|
||||
go:
|
||||
- 1.x
|
||||
|
|
@ -1,5 +1,8 @@
|
|||
Change history of swagger
|
||||
=
|
||||
2017-01-30
|
||||
- moved from go-restful/swagger to go-restful-swagger12
|
||||
|
||||
2015-10-16
|
||||
- add type override mechanism for swagger models (MR 254, nathanejohnson)
|
||||
- replace uses of wildcard in generated apidocs (issue 251)
|
||||
22
cluster-autoscaler/vendor/github.com/emicklei/go-restful-swagger12/LICENSE
generated
vendored
Normal file
22
cluster-autoscaler/vendor/github.com/emicklei/go-restful-swagger12/LICENSE
generated
vendored
Normal file
|
|
@ -0,0 +1,22 @@
|
|||
Copyright (c) 2017 Ernest Micklei
|
||||
|
||||
MIT License
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
|
@ -1,3 +1,8 @@
|
|||
# go-restful-swagger12
|
||||
|
||||
[](https://travis-ci.org/emicklei/go-restful-swagger12)
|
||||
[](https://godoc.org/github.com/emicklei/go-restful-swagger12)
|
||||
|
||||
How to use Swagger UI with go-restful
|
||||
=
|
||||
|
||||
|
|
@ -74,3 +79,5 @@ Notes
|
|||
--
|
||||
- The Nickname of an Operation is automatically set by finding the name of the function. You can override it using RouteBuilder.Operation(..)
|
||||
- The WebServices field of swagger.Config can be used to control which service you want to expose and document ; you can have multiple configs and therefore multiple endpoints.
|
||||
|
||||
© 2017, ernestmicklei.com. MIT License. Contributions welcome.
|
||||
|
|
@ -226,6 +226,9 @@ func (sws SwaggerService) composeDeclaration(ws *restful.WebService, pathPrefix
|
|||
pathToRoutes := newOrderedRouteMap()
|
||||
for _, other := range ws.Routes() {
|
||||
if strings.HasPrefix(other.Path, pathPrefix) {
|
||||
if len(pathPrefix) > 1 && len(other.Path) > len(pathPrefix) && other.Path[len(pathPrefix)] != '/' {
|
||||
continue
|
||||
}
|
||||
pathToRoutes.Add(other.Path, other)
|
||||
}
|
||||
}
|
||||
|
|
@ -290,13 +293,12 @@ func composeResponseMessages(route restful.Route, decl *ApiDeclaration, config *
|
|||
if each.Model != nil {
|
||||
st := reflect.TypeOf(each.Model)
|
||||
isCollection, st := detectCollectionType(st)
|
||||
modelName := modelBuilder{}.keyFrom(st)
|
||||
if isCollection {
|
||||
modelName = "array[" + modelName + "]"
|
||||
// collection cannot be in responsemodel
|
||||
if !isCollection {
|
||||
modelName := modelBuilder{}.keyFrom(st)
|
||||
modelBuilder{Models: &decl.Models, Config: config}.addModel(st, "")
|
||||
message.ResponseModel = modelName
|
||||
}
|
||||
modelBuilder{Models: &decl.Models, Config: config}.addModel(st, "")
|
||||
// reference the model
|
||||
message.ResponseModel = modelName
|
||||
}
|
||||
messages = append(messages, message)
|
||||
}
|
||||
|
|
@ -331,12 +333,13 @@ func detectCollectionType(st reflect.Type) (bool, reflect.Type) {
|
|||
|
||||
// addModelFromSample creates and adds (or overwrites) a Model from a sample resource
|
||||
func (sws SwaggerService) addModelFromSampleTo(operation *Operation, isResponse bool, sample interface{}, models *ModelList) {
|
||||
mb := modelBuilder{Models: models, Config: &sws.config}
|
||||
if isResponse {
|
||||
type_, items := asDataType(sample, &sws.config)
|
||||
operation.Type = type_
|
||||
sampleType, items := asDataType(sample, &sws.config)
|
||||
operation.Type = sampleType
|
||||
operation.Items = items
|
||||
}
|
||||
modelBuilder{Models: models, Config: &sws.config}.addModelFrom(sample)
|
||||
mb.addModelFrom(sample)
|
||||
}
|
||||
|
||||
func asSwaggerParameter(param restful.ParameterData) Parameter {
|
||||
6
cluster-autoscaler/vendor/github.com/emicklei/go-restful/.travis.yml
generated
vendored
Normal file
6
cluster-autoscaler/vendor/github.com/emicklei/go-restful/.travis.yml
generated
vendored
Normal file
|
|
@ -0,0 +1,6 @@
|
|||
language: go
|
||||
|
||||
go:
|
||||
- 1.x
|
||||
|
||||
script: go test -v
|
||||
|
|
@ -1,39 +1,71 @@
|
|||
Change history of go-restful
|
||||
=
|
||||
2017-02-16
|
||||
- solved issue #304, make operation names unique
|
||||
|
||||
2017-01-30
|
||||
|
||||
[IMPORTANT] For swagger users, change your import statement to:
|
||||
swagger "github.com/emicklei/go-restful-swagger12"
|
||||
|
||||
- moved swagger 1.2 code to go-restful-swagger12
|
||||
- created TAG 2.0.0
|
||||
|
||||
2017-01-27
|
||||
|
||||
- remove defer request body close
|
||||
- expose Dispatch for testing filters and Routefunctions
|
||||
- swagger response model cannot be array
|
||||
- created TAG 1.0.0
|
||||
|
||||
2016-12-22
|
||||
|
||||
- (API change) Remove code related to caching request content. Removes SetCacheReadEntity(doCache bool)
|
||||
|
||||
2016-11-26
|
||||
|
||||
- Default change! now use CurlyRouter (was RouterJSR311)
|
||||
- Default change! no more caching of request content
|
||||
- Default change! do not recover from panics
|
||||
|
||||
2016-09-22
|
||||
|
||||
- fix the DefaultRequestContentType feature
|
||||
|
||||
2016-02-14
|
||||
|
||||
- take the qualify factor of the Accept header mediatype into account when deciding the contentype of the response
|
||||
- add constructors for custom entity accessors for xml and json
|
||||
|
||||
2015-09-27
|
||||
|
||||
- rename new WriteStatusAnd... to WriteHeaderAnd... for consistency
|
||||
|
||||
2015-09-25
|
||||
|
||||
- fixed problem with changing Header after WriteHeader (issue 235)
|
||||
|
||||
2015-09-14
|
||||
|
||||
- changed behavior of WriteHeader (immediate write) and WriteEntity (no status write)
|
||||
- added support for custom EntityReaderWriters.
|
||||
|
||||
2015-08-06
|
||||
|
||||
- add support for reading entities from compressed request content
|
||||
- use sync.Pool for compressors of http response and request body
|
||||
- add Description to Parameter for documentation in Swagger UI
|
||||
|
||||
2015-03-20
|
||||
|
||||
- add configurable logging
|
||||
|
||||
2015-03-18
|
||||
|
||||
- if not specified, the Operation is derived from the Route function
|
||||
|
||||
2015-03-17
|
||||
|
||||
- expose Parameter creation functions
|
||||
- make trace logger an interface
|
||||
- fix OPTIONSFilter
|
||||
|
|
@ -42,21 +74,26 @@ Change history of go-restful
|
|||
- add Notes to Route
|
||||
|
||||
2014-11-27
|
||||
|
||||
- (api add) PrettyPrint per response. (as proposed in #167)
|
||||
|
||||
2014-11-12
|
||||
|
||||
- (api add) ApiVersion(.) for documentation in Swagger UI
|
||||
|
||||
2014-11-10
|
||||
|
||||
- (api change) struct fields tagged with "description" show up in Swagger UI
|
||||
|
||||
2014-10-31
|
||||
|
||||
- (api change) ReturnsError -> Returns
|
||||
- (api add) RouteBuilder.Do(aBuilder) for DRY use of RouteBuilder
|
||||
- fix swagger nested structs
|
||||
- sort Swagger response messages by code
|
||||
|
||||
2014-10-23
|
||||
|
||||
- (api add) ReturnsError allows you to document Http codes in swagger
|
||||
- fixed problem with greedy CurlyRouter
|
||||
- (api add) Access-Control-Max-Age in CORS
|
||||
|
|
@ -70,102 +107,117 @@ Change history of go-restful
|
|||
- (api add) ParameterNamed for detailed documentation
|
||||
|
||||
2014-04-16
|
||||
|
||||
- (api add) expose constructor of Request for testing.
|
||||
|
||||
2014-06-27
|
||||
|
||||
- (api add) ParameterNamed gives access to a Parameter definition and its data (for further specification).
|
||||
- (api add) SetCacheReadEntity allow scontrol over whether or not the request body is being cached (default true for compatibility reasons).
|
||||
|
||||
2014-07-03
|
||||
|
||||
- (api add) CORS can be configured with a list of allowed domains
|
||||
|
||||
2014-03-12
|
||||
|
||||
- (api add) Route path parameters can use wildcard or regular expressions. (requires CurlyRouter)
|
||||
|
||||
2014-02-26
|
||||
|
||||
- (api add) Request now provides information about the matched Route, see method SelectedRoutePath
|
||||
|
||||
2014-02-17
|
||||
|
||||
- (api change) renamed parameter constants (go-lint checks)
|
||||
|
||||
2014-01-10
|
||||
- (api add) support for CloseNotify, see http://golang.org/pkg/net/http/#CloseNotifier
|
||||
|
||||
- (api add) support for CloseNotify, see http://golang.org/pkg/net/http/#CloseNotifier
|
||||
|
||||
2014-01-07
|
||||
- (api change) Write* methods in Response now return the error or nil.
|
||||
- added example of serving HTML from a Go template.
|
||||
- fixed comparing Allowed headers in CORS (is now case-insensitive)
|
||||
|
||||
- (api change) Write* methods in Response now return the error or nil.
|
||||
- added example of serving HTML from a Go template.
|
||||
- fixed comparing Allowed headers in CORS (is now case-insensitive)
|
||||
|
||||
2013-11-13
|
||||
- (api add) Response knows how many bytes are written to the response body.
|
||||
|
||||
- (api add) Response knows how many bytes are written to the response body.
|
||||
|
||||
2013-10-29
|
||||
- (api add) RecoverHandler(handler RecoverHandleFunction) to change how panic recovery is handled. Default behavior is to log and return a stacktrace. This may be a security issue as it exposes sourcecode information.
|
||||
|
||||
- (api add) RecoverHandler(handler RecoverHandleFunction) to change how panic recovery is handled. Default behavior is to log and return a stacktrace. This may be a security issue as it exposes sourcecode information.
|
||||
|
||||
2013-10-04
|
||||
- (api add) Response knows what HTTP status has been written
|
||||
- (api add) Request can have attributes (map of string->interface, also called request-scoped variables
|
||||
|
||||
- (api add) Response knows what HTTP status has been written
|
||||
- (api add) Request can have attributes (map of string->interface, also called request-scoped variables
|
||||
|
||||
2013-09-12
|
||||
- (api change) Router interface simplified
|
||||
- Implemented CurlyRouter, a Router that does not use|allow regular expressions in paths
|
||||
|
||||
- (api change) Router interface simplified
|
||||
- Implemented CurlyRouter, a Router that does not use|allow regular expressions in paths
|
||||
|
||||
2013-08-05
|
||||
- add OPTIONS support
|
||||
- add CORS support
|
||||
|
||||
2013-08-27
|
||||
- fixed some reported issues (see github)
|
||||
- (api change) deprecated use of WriteError; use WriteErrorString instead
|
||||
|
||||
- fixed some reported issues (see github)
|
||||
- (api change) deprecated use of WriteError; use WriteErrorString instead
|
||||
|
||||
2014-04-15
|
||||
- (fix) v1.0.1 tag: fix Issue 111: WriteErrorString
|
||||
|
||||
- (fix) v1.0.1 tag: fix Issue 111: WriteErrorString
|
||||
|
||||
2013-08-08
|
||||
- (api add) Added implementation Container: a WebServices collection with its own http.ServeMux allowing multiple endpoints per program. Existing uses of go-restful will register their services to the DefaultContainer.
|
||||
- (api add) the swagger package has be extended to have a UI per container.
|
||||
- if panic is detected then a small stack trace is printed (thanks to runner-mei)
|
||||
- (api add) WriteErrorString to Response
|
||||
|
||||
- (api add) Added implementation Container: a WebServices collection with its own http.ServeMux allowing multiple endpoints per program. Existing uses of go-restful will register their services to the DefaultContainer.
|
||||
- (api add) the swagger package has be extended to have a UI per container.
|
||||
- if panic is detected then a small stack trace is printed (thanks to runner-mei)
|
||||
- (api add) WriteErrorString to Response
|
||||
|
||||
Important API changes:
|
||||
|
||||
- (api remove) package variable DoNotRecover no longer works ; use restful.DefaultContainer.DoNotRecover(true) instead.
|
||||
- (api remove) package variable EnableContentEncoding no longer works ; use restful.DefaultContainer.EnableContentEncoding(true) instead.
|
||||
- (api remove) package variable DoNotRecover no longer works ; use restful.DefaultContainer.DoNotRecover(true) instead.
|
||||
- (api remove) package variable EnableContentEncoding no longer works ; use restful.DefaultContainer.EnableContentEncoding(true) instead.
|
||||
|
||||
|
||||
2013-07-06
|
||||
|
||||
- (api add) Added support for response encoding (gzip and deflate(zlib)). This feature is disabled on default (for backwards compatibility). Use restful.EnableContentEncoding = true in your initialization to enable this feature.
|
||||
- (api add) Added support for response encoding (gzip and deflate(zlib)). This feature is disabled on default (for backwards compatibility). Use restful.EnableContentEncoding = true in your initialization to enable this feature.
|
||||
|
||||
2013-06-19
|
||||
|
||||
- (improve) DoNotRecover option, moved request body closer, improved ReadEntity
|
||||
- (improve) DoNotRecover option, moved request body closer, improved ReadEntity
|
||||
|
||||
2013-06-03
|
||||
|
||||
- (api change) removed Dispatcher interface, hide PathExpression
|
||||
- changed receiver names of type functions to be more idiomatic Go
|
||||
- (api change) removed Dispatcher interface, hide PathExpression
|
||||
- changed receiver names of type functions to be more idiomatic Go
|
||||
|
||||
2013-06-02
|
||||
|
||||
- (optimize) Cache the RegExp compilation of Paths.
|
||||
- (optimize) Cache the RegExp compilation of Paths.
|
||||
|
||||
2013-05-22
|
||||
|
||||
- (api add) Added support for request/response filter functions
|
||||
- (api add) Added support for request/response filter functions
|
||||
|
||||
2013-05-18
|
||||
|
||||
|
||||
- (api add) Added feature to change the default Http Request Dispatch function (travis cline)
|
||||
- (api change) Moved Swagger Webservice to swagger package (see example restful-user)
|
||||
- (api add) Added feature to change the default Http Request Dispatch function (travis cline)
|
||||
- (api change) Moved Swagger Webservice to swagger package (see example restful-user)
|
||||
|
||||
[2012-11-14 .. 2013-05-18>
|
||||
|
||||
- See https://github.com/emicklei/go-restful/commits
|
||||
- See https://github.com/emicklei/go-restful/commits
|
||||
|
||||
2012-11-14
|
||||
|
||||
- Initial commit
|
||||
- Initial commit
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,7 @@
|
|||
all: test
|
||||
|
||||
test:
|
||||
go test -v .
|
||||
|
||||
ex:
|
||||
cd examples && ls *.go | xargs go build -o /tmp/ignore
|
||||
|
|
@ -1,8 +1,13 @@
|
|||
go-restful
|
||||
==========
|
||||
|
||||
package for building REST-style Web Services using Google Go
|
||||
|
||||
[](https://travis-ci.org/emicklei/go-restful)
|
||||
[](https://goreportcard.com/report/github.com/emicklei/go-restful)
|
||||
[](https://godoc.org/github.com/emicklei/go-restful)
|
||||
|
||||
- [Code examples](https://github.com/emicklei/go-restful/tree/master/examples)
|
||||
|
||||
REST asks developers to use HTTP methods explicitly and in a way that's consistent with the protocol definition. This basic REST design principle establishes a one-to-one mapping between create, read, update, and delete (CRUD) operations and HTTP methods. According to this mapping:
|
||||
|
||||
- GET = Retrieve a representation of a resource
|
||||
|
|
@ -40,35 +45,30 @@ func (u UserResource) findUser(request *restful.Request, response *restful.Respo
|
|||
|
||||
- Routes for request → function mapping with path parameter (e.g. {id}) support
|
||||
- Configurable router:
|
||||
- Routing algorithm after [JSR311](http://jsr311.java.net/nonav/releases/1.1/spec/spec.html) that is implemented using (but does **not** accept) regular expressions (See RouterJSR311 which is used by default)
|
||||
- Fast routing algorithm that allows static elements, regular expressions and dynamic parameters in the URL path (e.g. /meetings/{id} or /static/{subpath:*}, See CurlyRouter)
|
||||
- (default) Fast routing algorithm that allows static elements, regular expressions and dynamic parameters in the URL path (e.g. /meetings/{id} or /static/{subpath:*}
|
||||
- Routing algorithm after [JSR311](http://jsr311.java.net/nonav/releases/1.1/spec/spec.html) that is implemented using (but does **not** accept) regular expressions
|
||||
- Request API for reading structs from JSON/XML and accesing parameters (path,query,header)
|
||||
- Response API for writing structs to JSON/XML and setting headers
|
||||
- Customizable encoding using EntityReaderWriter registration
|
||||
- Filters for intercepting the request → response flow on Service or Route level
|
||||
- Request-scoped variables using attributes
|
||||
- Containers for WebServices on different HTTP endpoints
|
||||
- Content encoding (gzip,deflate) of request and response payloads
|
||||
- Automatic responses on OPTIONS (using a filter)
|
||||
- Automatic CORS request handling (using a filter)
|
||||
- API declaration for Swagger UI (see swagger package)
|
||||
- API declaration for Swagger UI (see [go-restful-swagger12](https://github.com/emicklei/go-restful-swagger12),[go-restful-openapi](https://github.com/emicklei/go-restful-openapi))
|
||||
- Panic recovery to produce HTTP 500, customizable using RecoverHandler(...)
|
||||
- Route errors produce HTTP 404/405/406/415 errors, customizable using ServiceErrorHandler(...)
|
||||
- Configurable (trace) logging
|
||||
- Customizable encoding using EntityReaderWriter registration
|
||||
- Customizable gzip/deflate readers and writers using CompressorProvider registration
|
||||
|
||||
### Resources
|
||||
|
||||
- [Documentation on godoc.org](http://godoc.org/github.com/emicklei/go-restful)
|
||||
- [Code examples](https://github.com/emicklei/go-restful/tree/master/examples)
|
||||
- [Example posted on blog](http://ernestmicklei.com/2012/11/go-restful-first-working-example/)
|
||||
- [Design explained on blog](http://ernestmicklei.com/2012/11/go-restful-api-design/)
|
||||
- [sourcegraph](https://sourcegraph.com/github.com/emicklei/go-restful)
|
||||
- [gopkg.in](https://gopkg.in/emicklei/go-restful.v1)
|
||||
- [showcase: Mora - MongoDB REST Api server](https://github.com/emicklei/mora)
|
||||
|
||||
[](https://drone.io/github.com/emicklei/go-restful/latest)
|
||||
|
||||
(c) 2012 - 2015, http://ernestmicklei.com. MIT License
|
||||
|
||||
Type ```git shortlog -s``` for a full list of contributors.
|
||||
|
||||
© 2012 - 2017, http://ernestmicklei.com. MIT License. Contributions are welcome.
|
||||
|
|
@ -9,6 +9,7 @@ import (
|
|||
"compress/zlib"
|
||||
)
|
||||
|
||||
// CompressorProvider describes a component that can provider compressors for the std methods.
|
||||
type CompressorProvider interface {
|
||||
// Returns a *gzip.Writer which needs to be released later.
|
||||
// Before using it, call Reset().
|
||||
|
|
|
|||
|
|
@ -32,7 +32,7 @@ type Container struct {
|
|||
contentEncodingEnabled bool // default is false
|
||||
}
|
||||
|
||||
// NewContainer creates a new Container using a new ServeMux and default router (RouterJSR311)
|
||||
// NewContainer creates a new Container using a new ServeMux and default router (CurlyRouter)
|
||||
func NewContainer() *Container {
|
||||
return &Container{
|
||||
webServices: []*WebService{},
|
||||
|
|
@ -74,7 +74,7 @@ func (c *Container) DoNotRecover(doNot bool) {
|
|||
c.doNotRecover = doNot
|
||||
}
|
||||
|
||||
// Router changes the default Router (currently RouterJSR311)
|
||||
// Router changes the default Router (currently CurlyRouter)
|
||||
func (c *Container) Router(aRouter RouteSelector) {
|
||||
c.router = aRouter
|
||||
}
|
||||
|
|
@ -188,6 +188,17 @@ func writeServiceError(err ServiceError, req *Request, resp *Response) {
|
|||
resp.WriteErrorString(err.Code, err.Message)
|
||||
}
|
||||
|
||||
// Dispatch the incoming Http Request to a matching WebService.
|
||||
func (c *Container) Dispatch(httpWriter http.ResponseWriter, httpRequest *http.Request) {
|
||||
if httpWriter == nil {
|
||||
panic("httpWriter cannot be nil")
|
||||
}
|
||||
if httpRequest == nil {
|
||||
panic("httpRequest cannot be nil")
|
||||
}
|
||||
c.dispatch(httpWriter, httpRequest)
|
||||
}
|
||||
|
||||
// Dispatch the incoming Http Request to a matching WebService.
|
||||
func (c *Container) dispatch(httpWriter http.ResponseWriter, httpRequest *http.Request) {
|
||||
writer := httpWriter
|
||||
|
|
@ -208,12 +219,6 @@ func (c *Container) dispatch(httpWriter http.ResponseWriter, httpRequest *http.R
|
|||
}
|
||||
}()
|
||||
}
|
||||
// Install closing the request body (if any)
|
||||
defer func() {
|
||||
if nil != httpRequest.Body {
|
||||
httpRequest.Body.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
// Detect if compression is needed
|
||||
// assume without compression, test for override
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Package restful, a lean package for creating REST-style WebServices without magic.
|
||||
Package restful , a lean package for creating REST-style WebServices without magic.
|
||||
|
||||
WebServices and Routes
|
||||
|
||||
|
|
|
|||
|
|
@ -1,10 +0,0 @@
|
|||
go test -test.v ...restful && \
|
||||
go test -test.v ...swagger && \
|
||||
go vet ...restful && \
|
||||
go fmt ...swagger && \
|
||||
go install ...swagger && \
|
||||
go fmt ...restful && \
|
||||
go install ...restful
|
||||
cd examples
|
||||
ls *.go | xargs -I {} go build -o /tmp/ignore {}
|
||||
cd ..
|
||||
|
|
@ -5,7 +5,7 @@ import (
|
|||
"os"
|
||||
)
|
||||
|
||||
// Logger corresponds to a minimal subset of the interface satisfied by stdlib log.Logger
|
||||
// StdLogger corresponds to a minimal subset of the interface satisfied by stdlib log.Logger
|
||||
type StdLogger interface {
|
||||
Print(v ...interface{})
|
||||
Printf(format string, v ...interface{})
|
||||
|
|
@ -18,14 +18,17 @@ func init() {
|
|||
SetLogger(stdlog.New(os.Stderr, "[restful] ", stdlog.LstdFlags|stdlog.Lshortfile))
|
||||
}
|
||||
|
||||
// SetLogger sets the logger for this package
|
||||
func SetLogger(customLogger StdLogger) {
|
||||
Logger = customLogger
|
||||
}
|
||||
|
||||
// Print delegates to the Logger
|
||||
func Print(v ...interface{}) {
|
||||
Logger.Print(v...)
|
||||
}
|
||||
|
||||
// Printf delegates to the Logger
|
||||
func Printf(format string, v ...interface{}) {
|
||||
Logger.Printf(format, v...)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -5,20 +5,15 @@ package restful
|
|||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/zlib"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
var defaultRequestContentType string
|
||||
|
||||
var doCacheReadEntityBytes = false
|
||||
|
||||
// Request is a wrapper for a http Request that provides convenience methods
|
||||
type Request struct {
|
||||
Request *http.Request
|
||||
bodyContent *[]byte // to cache the request body for multiple reads of ReadEntity
|
||||
pathParameters map[string]string
|
||||
attributes map[string]interface{} // for storing request-scoped values
|
||||
selectedRoutePath string // root path + route path that matched the request, e.g. /meetings/{id}/attendees
|
||||
|
|
@ -41,12 +36,6 @@ func DefaultRequestContentType(mime string) {
|
|||
defaultRequestContentType = mime
|
||||
}
|
||||
|
||||
// SetCacheReadEntity controls whether the response data ([]byte) is cached such that ReadEntity is repeatable.
|
||||
// Default is true (due to backwardcompatibility). For better performance, you should set it to false if you don't need it.
|
||||
func SetCacheReadEntity(doCache bool) {
|
||||
doCacheReadEntityBytes = doCache
|
||||
}
|
||||
|
||||
// PathParameter accesses the Path parameter value by its name
|
||||
func (r *Request) PathParameter(name string) string {
|
||||
return r.pathParameters[name]
|
||||
|
|
@ -81,18 +70,6 @@ func (r *Request) ReadEntity(entityPointer interface{}) (err error) {
|
|||
contentType := r.Request.Header.Get(HEADER_ContentType)
|
||||
contentEncoding := r.Request.Header.Get(HEADER_ContentEncoding)
|
||||
|
||||
// OLD feature, cache the body for reads
|
||||
if doCacheReadEntityBytes {
|
||||
if r.bodyContent == nil {
|
||||
data, err := ioutil.ReadAll(r.Request.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.bodyContent = &data
|
||||
}
|
||||
r.Request.Body = ioutil.NopCloser(bytes.NewReader(*r.bodyContent))
|
||||
}
|
||||
|
||||
// check if the request body needs decompression
|
||||
if ENCODING_GZIP == contentEncoding {
|
||||
gzipReader := currentCompressorProvider.AcquireGzipReader()
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ import (
|
|||
"net/http"
|
||||
)
|
||||
|
||||
// DEPRECATED, use DefaultResponseContentType(mime)
|
||||
// DefaultResponseMimeType is DEPRECATED, use DefaultResponseContentType(mime)
|
||||
var DefaultResponseMimeType string
|
||||
|
||||
//PrettyPrintResponses controls the indentation feature of XML and JSON serialization
|
||||
|
|
@ -27,11 +27,12 @@ type Response struct {
|
|||
err error // err property is kept when WriteError is called
|
||||
}
|
||||
|
||||
// Creates a new response based on a http ResponseWriter.
|
||||
// NewResponse creates a new response based on a http ResponseWriter.
|
||||
func NewResponse(httpWriter http.ResponseWriter) *Response {
|
||||
return &Response{httpWriter, "", []string{}, http.StatusOK, 0, PrettyPrintResponses, nil} // empty content-types
|
||||
}
|
||||
|
||||
// DefaultResponseContentType set a default.
|
||||
// If Accept header matching fails, fall back to this type.
|
||||
// Valid values are restful.MIME_JSON and restful.MIME_XML
|
||||
// Example:
|
||||
|
|
|
|||
|
|
@ -34,6 +34,9 @@ type Route struct {
|
|||
ParameterDocs []*Parameter
|
||||
ResponseErrors map[int]ResponseError
|
||||
ReadSample, WriteSample interface{} // structs that model an example request or response payload
|
||||
|
||||
// Extra information used to store custom information about the route.
|
||||
Metadata map[string]interface{}
|
||||
}
|
||||
|
||||
// Initialize for Route
|
||||
|
|
|
|||
|
|
@ -5,10 +5,12 @@ package restful
|
|||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/emicklei/go-restful/log"
|
||||
)
|
||||
|
|
@ -22,6 +24,9 @@ type RouteBuilder struct {
|
|||
httpMethod string // required
|
||||
function RouteFunction // required
|
||||
filters []FilterFunction
|
||||
|
||||
typeNameHandleFunc TypeNameHandleFunction // required
|
||||
|
||||
// documentation
|
||||
doc string
|
||||
notes string
|
||||
|
|
@ -29,6 +34,7 @@ type RouteBuilder struct {
|
|||
readSample, writeSample interface{}
|
||||
parameters []*Parameter
|
||||
errorMap map[int]ResponseError
|
||||
metadata map[string]interface{}
|
||||
}
|
||||
|
||||
// Do evaluates each argument with the RouteBuilder itself.
|
||||
|
|
@ -92,8 +98,13 @@ func (b *RouteBuilder) Notes(notes string) *RouteBuilder {
|
|||
// Reads tells what resource type will be read from the request payload. Optional.
|
||||
// A parameter of type "body" is added ,required is set to true and the dataType is set to the qualified name of the sample's type.
|
||||
func (b *RouteBuilder) Reads(sample interface{}) *RouteBuilder {
|
||||
fn := b.typeNameHandleFunc
|
||||
if fn == nil {
|
||||
fn = reflectTypeName
|
||||
}
|
||||
typeAsName := fn(sample)
|
||||
|
||||
b.readSample = sample
|
||||
typeAsName := reflect.TypeOf(sample).String()
|
||||
bodyParameter := &Parameter{&ParameterData{Name: "body"}}
|
||||
bodyParameter.beBody()
|
||||
bodyParameter.Required(true)
|
||||
|
|
@ -145,9 +156,10 @@ func (b *RouteBuilder) ReturnsError(code int, message string, model interface{})
|
|||
// The model parameter is optional ; either pass a struct instance or use nil if not applicable.
|
||||
func (b *RouteBuilder) Returns(code int, message string, model interface{}) *RouteBuilder {
|
||||
err := ResponseError{
|
||||
Code: code,
|
||||
Message: message,
|
||||
Model: model,
|
||||
Code: code,
|
||||
Message: message,
|
||||
Model: model,
|
||||
IsDefault: false,
|
||||
}
|
||||
// lazy init because there is no NewRouteBuilder (yet)
|
||||
if b.errorMap == nil {
|
||||
|
|
@ -157,10 +169,36 @@ func (b *RouteBuilder) Returns(code int, message string, model interface{}) *Rou
|
|||
return b
|
||||
}
|
||||
|
||||
// DefaultReturns is a special Returns call that sets the default of the response ; the code is zero.
|
||||
func (b *RouteBuilder) DefaultReturns(message string, model interface{}) *RouteBuilder {
|
||||
b.Returns(0, message, model)
|
||||
// Modify the ResponseError just added/updated
|
||||
re := b.errorMap[0]
|
||||
// errorMap is initialized
|
||||
b.errorMap[0] = ResponseError{
|
||||
Code: re.Code,
|
||||
Message: re.Message,
|
||||
Model: re.Model,
|
||||
IsDefault: true,
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// Metadata adds or updates a key=value pair to the metadata map.
|
||||
func (b *RouteBuilder) Metadata(key string, value interface{}) *RouteBuilder {
|
||||
if b.metadata == nil {
|
||||
b.metadata = map[string]interface{}{}
|
||||
}
|
||||
b.metadata[key] = value
|
||||
return b
|
||||
}
|
||||
|
||||
// ResponseError represents a response; not necessarily an error.
|
||||
type ResponseError struct {
|
||||
Code int
|
||||
Message string
|
||||
Model interface{}
|
||||
Code int
|
||||
Message string
|
||||
Model interface{}
|
||||
IsDefault bool
|
||||
}
|
||||
|
||||
func (b *RouteBuilder) servicePath(path string) *RouteBuilder {
|
||||
|
|
@ -186,6 +224,13 @@ func (b *RouteBuilder) copyDefaults(rootProduces, rootConsumes []string) {
|
|||
}
|
||||
}
|
||||
|
||||
// typeNameHandler sets the function that will convert types to strings in the parameter
|
||||
// and model definitions.
|
||||
func (b *RouteBuilder) typeNameHandler(handler TypeNameHandleFunction) *RouteBuilder {
|
||||
b.typeNameHandleFunc = handler
|
||||
return b
|
||||
}
|
||||
|
||||
// Build creates a new Route using the specification details collected by the RouteBuilder
|
||||
func (b *RouteBuilder) Build() Route {
|
||||
pathExpr, err := newPathExpression(b.currentPath)
|
||||
|
|
@ -217,7 +262,8 @@ func (b *RouteBuilder) Build() Route {
|
|||
ParameterDocs: b.parameters,
|
||||
ResponseErrors: b.errorMap,
|
||||
ReadSample: b.readSample,
|
||||
WriteSample: b.writeSample}
|
||||
WriteSample: b.writeSample,
|
||||
Metadata: b.metadata}
|
||||
route.postBuild()
|
||||
return route
|
||||
}
|
||||
|
|
@ -226,6 +272,8 @@ func concatPath(path1, path2 string) string {
|
|||
return strings.TrimRight(path1, "/") + "/" + strings.TrimLeft(path2, "/")
|
||||
}
|
||||
|
||||
var anonymousFuncCount int32
|
||||
|
||||
// nameOfFunction returns the short name of the function f for documentation.
|
||||
// It uses a runtime feature for debugging ; its value may change for later Go versions.
|
||||
func nameOfFunction(f interface{}) string {
|
||||
|
|
@ -236,5 +284,10 @@ func nameOfFunction(f interface{}) string {
|
|||
last = strings.TrimSuffix(last, ")-fm") // Go 1.5
|
||||
last = strings.TrimSuffix(last, "·fm") // < Go 1.5
|
||||
last = strings.TrimSuffix(last, "-fm") // Go 1.5
|
||||
if last == "func1" { // this could mean conflicts in API docs
|
||||
val := atomic.AddInt32(&anonymousFuncCount, 1)
|
||||
last = "func" + fmt.Sprintf("%d", val)
|
||||
atomic.StoreInt32(&anonymousFuncCount, val)
|
||||
}
|
||||
return last
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ package restful
|
|||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"reflect"
|
||||
"sync"
|
||||
|
||||
"github.com/emicklei/go-restful/log"
|
||||
|
|
@ -24,6 +25,8 @@ type WebService struct {
|
|||
documentation string
|
||||
apiVersion string
|
||||
|
||||
typeNameHandleFunc TypeNameHandleFunction
|
||||
|
||||
dynamicRoutes bool
|
||||
|
||||
// protects 'routes' if dynamic routes are enabled
|
||||
|
|
@ -34,6 +37,25 @@ func (w *WebService) SetDynamicRoutes(enable bool) {
|
|||
w.dynamicRoutes = enable
|
||||
}
|
||||
|
||||
// TypeNameHandleFunction declares functions that can handle translating the name of a sample object
|
||||
// into the restful documentation for the service.
|
||||
type TypeNameHandleFunction func(sample interface{}) string
|
||||
|
||||
// TypeNameHandler sets the function that will convert types to strings in the parameter
|
||||
// and model definitions. If not set, the web service will invoke
|
||||
// reflect.TypeOf(object).String().
|
||||
func (w *WebService) TypeNameHandler(handler TypeNameHandleFunction) *WebService {
|
||||
w.typeNameHandleFunc = handler
|
||||
return w
|
||||
}
|
||||
|
||||
// reflectTypeName is the default TypeNameHandleFunction and for a given object
|
||||
// returns the name that Go identifies it with (e.g. "string" or "v1.Object") via
|
||||
// the reflection API.
|
||||
func reflectTypeName(sample interface{}) string {
|
||||
return reflect.TypeOf(sample).String()
|
||||
}
|
||||
|
||||
// compilePathExpression ensures that the path is compiled into a RegEx for those routers that need it.
|
||||
func (w *WebService) compilePathExpression() {
|
||||
compiled, err := newPathExpression(w.rootPath)
|
||||
|
|
@ -174,7 +196,7 @@ func (w *WebService) RemoveRoute(path, method string) error {
|
|||
|
||||
// Method creates a new RouteBuilder and initialize its http method
|
||||
func (w *WebService) Method(httpMethod string) *RouteBuilder {
|
||||
return new(RouteBuilder).servicePath(w.rootPath).Method(httpMethod)
|
||||
return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method(httpMethod)
|
||||
}
|
||||
|
||||
// Produces specifies that this WebService can produce one or more MIME types.
|
||||
|
|
@ -239,30 +261,30 @@ func (w *WebService) Documentation() string {
|
|||
|
||||
// HEAD is a shortcut for .Method("HEAD").Path(subPath)
|
||||
func (w *WebService) HEAD(subPath string) *RouteBuilder {
|
||||
return new(RouteBuilder).servicePath(w.rootPath).Method("HEAD").Path(subPath)
|
||||
return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("HEAD").Path(subPath)
|
||||
}
|
||||
|
||||
// GET is a shortcut for .Method("GET").Path(subPath)
|
||||
func (w *WebService) GET(subPath string) *RouteBuilder {
|
||||
return new(RouteBuilder).servicePath(w.rootPath).Method("GET").Path(subPath)
|
||||
return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("GET").Path(subPath)
|
||||
}
|
||||
|
||||
// POST is a shortcut for .Method("POST").Path(subPath)
|
||||
func (w *WebService) POST(subPath string) *RouteBuilder {
|
||||
return new(RouteBuilder).servicePath(w.rootPath).Method("POST").Path(subPath)
|
||||
return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("POST").Path(subPath)
|
||||
}
|
||||
|
||||
// PUT is a shortcut for .Method("PUT").Path(subPath)
|
||||
func (w *WebService) PUT(subPath string) *RouteBuilder {
|
||||
return new(RouteBuilder).servicePath(w.rootPath).Method("PUT").Path(subPath)
|
||||
return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("PUT").Path(subPath)
|
||||
}
|
||||
|
||||
// PATCH is a shortcut for .Method("PATCH").Path(subPath)
|
||||
func (w *WebService) PATCH(subPath string) *RouteBuilder {
|
||||
return new(RouteBuilder).servicePath(w.rootPath).Method("PATCH").Path(subPath)
|
||||
return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("PATCH").Path(subPath)
|
||||
}
|
||||
|
||||
// DELETE is a shortcut for .Method("DELETE").Path(subPath)
|
||||
func (w *WebService) DELETE(subPath string) *RouteBuilder {
|
||||
return new(RouteBuilder).servicePath(w.rootPath).Method("DELETE").Path(subPath)
|
||||
return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("DELETE").Path(subPath)
|
||||
}
|
||||
|
|
|
|||
24
cluster-autoscaler/vendor/github.com/exponent-io/jsonpath/.gitignore
generated
vendored
Normal file
24
cluster-autoscaler/vendor/github.com/exponent-io/jsonpath/.gitignore
generated
vendored
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
||||
*.prof
|
||||
5
cluster-autoscaler/vendor/github.com/exponent-io/jsonpath/.travis.yml
generated
vendored
Normal file
5
cluster-autoscaler/vendor/github.com/exponent-io/jsonpath/.travis.yml
generated
vendored
Normal file
|
|
@ -0,0 +1,5 @@
|
|||
language: go
|
||||
|
||||
go:
|
||||
- 1.5
|
||||
- tip
|
||||
|
|
@ -0,0 +1,21 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015 Exponent Labs LLC
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
66
cluster-autoscaler/vendor/github.com/exponent-io/jsonpath/README.md
generated
vendored
Normal file
66
cluster-autoscaler/vendor/github.com/exponent-io/jsonpath/README.md
generated
vendored
Normal file
|
|
@ -0,0 +1,66 @@
|
|||
[](https://godoc.org/github.com/exponent-io/jsonpath)
|
||||
[](https://travis-ci.org/exponent-io/jsonpath)
|
||||
|
||||
# jsonpath
|
||||
|
||||
This package extends the [json.Decoder](https://golang.org/pkg/encoding/json/#Decoder) to support navigating a stream of JSON tokens. You should be able to use this extended Decoder places where a json.Decoder would have been used.
|
||||
|
||||
This Decoder has the following enhancements...
|
||||
* The [Scan](https://godoc.org/github.com/exponent-io/jsonpath/#Decoder.Scan) method supports scanning a JSON stream while extracting particular values along the way using [PathActions](https://godoc.org/github.com/exponent-io/jsonpath#PathActions).
|
||||
* The [SeekTo](https://godoc.org/github.com/exponent-io/jsonpath#Decoder.SeekTo) method supports seeking forward in a JSON token stream to a particular path.
|
||||
* The [Path](https://godoc.org/github.com/exponent-io/jsonpath#Decoder.Path) method returns the path of the most recently parsed token.
|
||||
* The [Token](https://godoc.org/github.com/exponent-io/jsonpath#Decoder.Token) method has been modified to distinguish between strings that are object keys and strings that are values. Object key strings are returned as the [KeyString](https://godoc.org/github.com/exponent-io/jsonpath#KeyString) type rather than a native string.
|
||||
|
||||
## Installation
|
||||
|
||||
go get -u github.com/exponent-io/jsonpath
|
||||
|
||||
## Example Usage
|
||||
|
||||
#### SeekTo
|
||||
|
||||
```go
|
||||
import "github.com/exponent-io/jsonpath"
|
||||
|
||||
var j = []byte(`[
|
||||
{"Space": "YCbCr", "Point": {"Y": 255, "Cb": 0, "Cr": -10}},
|
||||
{"Space": "RGB", "Point": {"R": 98, "G": 218, "B": 255}}
|
||||
]`)
|
||||
|
||||
w := json.NewDecoder(bytes.NewReader(j))
|
||||
var v interface{}
|
||||
|
||||
w.SeekTo(1, "Point", "G")
|
||||
w.Decode(&v) // v is 218
|
||||
```
|
||||
|
||||
#### Scan with PathActions
|
||||
|
||||
```go
|
||||
var j = []byte(`{"colors":[
|
||||
{"Space": "YCbCr", "Point": {"Y": 255, "Cb": 0, "Cr": -10, "A": 58}},
|
||||
{"Space": "RGB", "Point": {"R": 98, "G": 218, "B": 255, "A": 231}}
|
||||
]}`)
|
||||
|
||||
var actions PathActions
|
||||
|
||||
// Extract the value at Point.A
|
||||
actions.Add(func(d *Decoder) error {
|
||||
var alpha int
|
||||
err := d.Decode(&alpha)
|
||||
fmt.Printf("Alpha: %v\n", alpha)
|
||||
return err
|
||||
}, "Point", "A")
|
||||
|
||||
w := NewDecoder(bytes.NewReader(j))
|
||||
w.SeekTo("colors", 0)
|
||||
|
||||
var ok = true
|
||||
var err error
|
||||
for ok {
|
||||
ok, err = w.Scan(&actions)
|
||||
if err != nil && err != io.EOF {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
```
|
||||
210
cluster-autoscaler/vendor/github.com/exponent-io/jsonpath/decoder.go
generated
vendored
Normal file
210
cluster-autoscaler/vendor/github.com/exponent-io/jsonpath/decoder.go
generated
vendored
Normal file
|
|
@ -0,0 +1,210 @@
|
|||
package jsonpath
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
)
|
||||
|
||||
// KeyString is returned from Decoder.Token to represent each key in a JSON object value.
|
||||
type KeyString string
|
||||
|
||||
// Decoder extends the Go runtime's encoding/json.Decoder to support navigating in a stream of JSON tokens.
|
||||
type Decoder struct {
|
||||
json.Decoder
|
||||
|
||||
path JsonPath
|
||||
context jsonContext
|
||||
}
|
||||
|
||||
// NewDecoder creates a new instance of the extended JSON Decoder.
|
||||
func NewDecoder(r io.Reader) *Decoder {
|
||||
return &Decoder{Decoder: *json.NewDecoder(r)}
|
||||
}
|
||||
|
||||
// SeekTo causes the Decoder to move forward to a given path in the JSON structure.
|
||||
//
|
||||
// The path argument must consist of strings or integers. Each string specifies an JSON object key, and
|
||||
// each integer specifies an index into a JSON array.
|
||||
//
|
||||
// Consider the JSON structure
|
||||
//
|
||||
// { "a": [0,"s",12e4,{"b":0,"v":35} ] }
|
||||
//
|
||||
// SeekTo("a",3,"v") will move to the value referenced by the "a" key in the current object,
|
||||
// followed by a move to the 4th value (index 3) in the array, followed by a move to the value at key "v".
|
||||
// In this example, a subsequent call to the decoder's Decode() would unmarshal the value 35.
|
||||
//
|
||||
// SeekTo returns a boolean value indicating whether a match was found.
|
||||
//
|
||||
// Decoder is intended to be used with a stream of tokens. As a result it navigates forward only.
|
||||
func (d *Decoder) SeekTo(path ...interface{}) (bool, error) {
|
||||
|
||||
if len(path) == 0 {
|
||||
return len(d.path) == 0, nil
|
||||
}
|
||||
last := len(path) - 1
|
||||
if i, ok := path[last].(int); ok {
|
||||
path[last] = i - 1
|
||||
}
|
||||
|
||||
for {
|
||||
if d.path.Equal(path) {
|
||||
return true, nil
|
||||
}
|
||||
_, err := d.Token()
|
||||
if err == io.EOF {
|
||||
return false, nil
|
||||
} else if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Decode reads the next JSON-encoded value from its input and stores it in the value pointed to by v. This is
|
||||
// equivalent to encoding/json.Decode().
|
||||
func (d *Decoder) Decode(v interface{}) error {
|
||||
switch d.context {
|
||||
case objValue:
|
||||
d.context = objKey
|
||||
break
|
||||
case arrValue:
|
||||
d.path.incTop()
|
||||
break
|
||||
}
|
||||
return d.Decoder.Decode(v)
|
||||
}
|
||||
|
||||
// Path returns a slice of string and/or int values representing the path from the root of the JSON object to the
|
||||
// position of the most-recently parsed token.
|
||||
func (d *Decoder) Path() JsonPath {
|
||||
p := make(JsonPath, len(d.path))
|
||||
copy(p, d.path)
|
||||
return p
|
||||
}
|
||||
|
||||
// Token is equivalent to the Token() method on json.Decoder. The primary difference is that it distinguishes
|
||||
// between strings that are keys and and strings that are values. String tokens that are object keys are returned as a
|
||||
// KeyString rather than as a native string.
|
||||
func (d *Decoder) Token() (json.Token, error) {
|
||||
t, err := d.Decoder.Token()
|
||||
if err != nil {
|
||||
return t, err
|
||||
}
|
||||
|
||||
if t == nil {
|
||||
switch d.context {
|
||||
case objValue:
|
||||
d.context = objKey
|
||||
break
|
||||
case arrValue:
|
||||
d.path.incTop()
|
||||
break
|
||||
}
|
||||
return t, err
|
||||
}
|
||||
|
||||
switch t := t.(type) {
|
||||
case json.Delim:
|
||||
switch t {
|
||||
case json.Delim('{'):
|
||||
if d.context == arrValue {
|
||||
d.path.incTop()
|
||||
}
|
||||
d.path.push("")
|
||||
d.context = objKey
|
||||
break
|
||||
case json.Delim('}'):
|
||||
d.path.pop()
|
||||
d.context = d.path.inferContext()
|
||||
break
|
||||
case json.Delim('['):
|
||||
if d.context == arrValue {
|
||||
d.path.incTop()
|
||||
}
|
||||
d.path.push(-1)
|
||||
d.context = arrValue
|
||||
break
|
||||
case json.Delim(']'):
|
||||
d.path.pop()
|
||||
d.context = d.path.inferContext()
|
||||
break
|
||||
}
|
||||
case float64, json.Number, bool:
|
||||
switch d.context {
|
||||
case objValue:
|
||||
d.context = objKey
|
||||
break
|
||||
case arrValue:
|
||||
d.path.incTop()
|
||||
break
|
||||
}
|
||||
break
|
||||
case string:
|
||||
switch d.context {
|
||||
case objKey:
|
||||
d.path.nameTop(t)
|
||||
d.context = objValue
|
||||
return KeyString(t), err
|
||||
case objValue:
|
||||
d.context = objKey
|
||||
case arrValue:
|
||||
d.path.incTop()
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
return t, err
|
||||
}
|
||||
|
||||
// Scan moves forward over the JSON stream consuming all the tokens at the current level (current object, current array)
|
||||
// invoking each matching PathAction along the way.
|
||||
//
|
||||
// Scan returns true if there are more contiguous values to scan (for example in an array).
|
||||
func (d *Decoder) Scan(ext *PathActions) (bool, error) {
|
||||
|
||||
rootPath := d.Path()
|
||||
|
||||
// If this is an array path, increment the root path in our local copy.
|
||||
if rootPath.inferContext() == arrValue {
|
||||
rootPath.incTop()
|
||||
}
|
||||
|
||||
for {
|
||||
// advance the token position
|
||||
_, err := d.Token()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
match:
|
||||
var relPath JsonPath
|
||||
|
||||
// capture the new JSON path
|
||||
path := d.Path()
|
||||
|
||||
if len(path) > len(rootPath) {
|
||||
// capture the path relative to where the scan started
|
||||
relPath = path[len(rootPath):]
|
||||
} else {
|
||||
// if the path is not longer than the root, then we are done with this scan
|
||||
// return boolean flag indicating if there are more items to scan at the same level
|
||||
return d.Decoder.More(), nil
|
||||
}
|
||||
|
||||
// match the relative path against the path actions
|
||||
if node := ext.node.match(relPath); node != nil {
|
||||
if node.action != nil {
|
||||
// we have a match so execute the action
|
||||
err = node.action(d)
|
||||
if err != nil {
|
||||
return d.Decoder.More(), err
|
||||
}
|
||||
// The action may have advanced the decoder. If we are in an array, advancing it further would
|
||||
// skip tokens. So, if we are scanning an array, jump to the top without advancing the token.
|
||||
if d.path.inferContext() == arrValue && d.Decoder.More() {
|
||||
goto match
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,67 @@
|
|||
// Extends the Go runtime's json.Decoder enabling navigation of a stream of json tokens.
|
||||
package jsonpath
|
||||
|
||||
import "fmt"
|
||||
|
||||
type jsonContext int
|
||||
|
||||
const (
|
||||
none jsonContext = iota
|
||||
objKey
|
||||
objValue
|
||||
arrValue
|
||||
)
|
||||
|
||||
// AnyIndex can be used in a pattern to match any array index.
|
||||
const AnyIndex = -2
|
||||
|
||||
// JsonPath is a slice of strings and/or integers. Each string specifies an JSON object key, and
|
||||
// each integer specifies an index into a JSON array.
|
||||
type JsonPath []interface{}
|
||||
|
||||
func (p *JsonPath) push(n interface{}) { *p = append(*p, n) }
|
||||
func (p *JsonPath) pop() { *p = (*p)[:len(*p)-1] }
|
||||
|
||||
// increment the index at the top of the stack (must be an array index)
|
||||
func (p *JsonPath) incTop() { (*p)[len(*p)-1] = (*p)[len(*p)-1].(int) + 1 }
|
||||
|
||||
// name the key at the top of the stack (must be an object key)
|
||||
func (p *JsonPath) nameTop(n string) { (*p)[len(*p)-1] = n }
|
||||
|
||||
// infer the context from the item at the top of the stack
|
||||
func (p *JsonPath) inferContext() jsonContext {
|
||||
if len(*p) == 0 {
|
||||
return none
|
||||
}
|
||||
t := (*p)[len(*p)-1]
|
||||
switch t.(type) {
|
||||
case string:
|
||||
return objKey
|
||||
case int:
|
||||
return arrValue
|
||||
default:
|
||||
panic(fmt.Sprintf("Invalid stack type %T", t))
|
||||
}
|
||||
}
|
||||
|
||||
// Equal tests for equality between two JsonPath types.
|
||||
func (p *JsonPath) Equal(o JsonPath) bool {
|
||||
if len(*p) != len(o) {
|
||||
return false
|
||||
}
|
||||
for i, v := range *p {
|
||||
if v != o[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (p *JsonPath) HasPrefix(o JsonPath) bool {
|
||||
for i, v := range o {
|
||||
if v != (*p)[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
61
cluster-autoscaler/vendor/github.com/exponent-io/jsonpath/pathaction.go
generated
vendored
Normal file
61
cluster-autoscaler/vendor/github.com/exponent-io/jsonpath/pathaction.go
generated
vendored
Normal file
|
|
@ -0,0 +1,61 @@
|
|||
package jsonpath
|
||||
|
||||
// pathNode is used to construct a trie of paths to be matched
|
||||
type pathNode struct {
|
||||
matchOn interface{} // string, or integer
|
||||
childNodes []pathNode
|
||||
action DecodeAction
|
||||
}
|
||||
|
||||
// match climbs the trie to find a node that matches the given JSON path.
|
||||
func (n *pathNode) match(path JsonPath) *pathNode {
|
||||
var node *pathNode = n
|
||||
for _, ps := range path {
|
||||
found := false
|
||||
for i, n := range node.childNodes {
|
||||
if n.matchOn == ps {
|
||||
node = &node.childNodes[i]
|
||||
found = true
|
||||
break
|
||||
} else if _, ok := ps.(int); ok && n.matchOn == AnyIndex {
|
||||
node = &node.childNodes[i]
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// PathActions represents a collection of DecodeAction functions that should be called at certain path positions
|
||||
// when scanning the JSON stream. PathActions can be created once and used many times in one or more JSON streams.
|
||||
type PathActions struct {
|
||||
node pathNode
|
||||
}
|
||||
|
||||
// DecodeAction handlers are called by the Decoder when scanning objects. See PathActions.Add for more detail.
|
||||
type DecodeAction func(d *Decoder) error
|
||||
|
||||
// Add specifies an action to call on the Decoder when the specified path is encountered.
|
||||
func (je *PathActions) Add(action DecodeAction, path ...interface{}) {
|
||||
|
||||
var node *pathNode = &je.node
|
||||
for _, ps := range path {
|
||||
found := false
|
||||
for i, n := range node.childNodes {
|
||||
if n.matchOn == ps {
|
||||
node = &node.childNodes[i]
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
node.childNodes = append(node.childNodes, pathNode{matchOn: ps})
|
||||
node = &node.childNodes[len(node.childNodes)-1]
|
||||
}
|
||||
}
|
||||
node.action = action
|
||||
}
|
||||
36
cluster-autoscaler/vendor/github.com/go-openapi/analysis/.drone.yml
generated
vendored
Normal file
36
cluster-autoscaler/vendor/github.com/go-openapi/analysis/.drone.yml
generated
vendored
Normal file
|
|
@ -0,0 +1,36 @@
|
|||
clone:
|
||||
path: github.com/go-openapi/analysis
|
||||
|
||||
matrix:
|
||||
GO_VERSION:
|
||||
- "1.6"
|
||||
|
||||
build:
|
||||
integration:
|
||||
image: golang:$$GO_VERSION
|
||||
pull: true
|
||||
commands:
|
||||
- go get -u github.com/stretchr/testify/assert
|
||||
- go get -u gopkg.in/yaml.v2
|
||||
- go get -u github.com/go-openapi/swag
|
||||
- go get -u github.com/go-openapi/jsonpointer
|
||||
- go get -u github.com/go-openapi/spec
|
||||
- go get -u github.com/go-openapi/loads/fmts
|
||||
- go test -race ./...
|
||||
- go test -v -cover -coverprofile=coverage.out -covermode=count ./...
|
||||
|
||||
notify:
|
||||
slack:
|
||||
channel: bots
|
||||
webhook_url: $$SLACK_URL
|
||||
username: drone
|
||||
|
||||
publish:
|
||||
coverage:
|
||||
server: https://coverage.vmware.run
|
||||
token: $$GITHUB_TOKEN
|
||||
# threshold: 70
|
||||
# must_increase: true
|
||||
when:
|
||||
matrix:
|
||||
GO_VERSION: "1.6"
|
||||
2
cluster-autoscaler/vendor/github.com/go-openapi/analysis/.gitignore
generated
vendored
Normal file
2
cluster-autoscaler/vendor/github.com/go-openapi/analysis/.gitignore
generated
vendored
Normal file
|
|
@ -0,0 +1,2 @@
|
|||
secrets.yml
|
||||
coverage.out
|
||||
12
cluster-autoscaler/vendor/github.com/go-openapi/analysis/.pullapprove.yml
generated
vendored
Normal file
12
cluster-autoscaler/vendor/github.com/go-openapi/analysis/.pullapprove.yml
generated
vendored
Normal file
|
|
@ -0,0 +1,12 @@
|
|||
approve_by_comment: true
|
||||
approve_regex: '^(:shipit:|:\+1:|\+1|LGTM|lgtm|Approved)'
|
||||
reject_regex: ^[Rr]ejected
|
||||
reset_on_push: false
|
||||
reviewers:
|
||||
members:
|
||||
- casualjim
|
||||
- frapposelli
|
||||
- vburenin
|
||||
- pytlesk4
|
||||
name: pullapprove
|
||||
required: 1
|
||||
74
cluster-autoscaler/vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md
generated
vendored
Normal file
74
cluster-autoscaler/vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md
generated
vendored
Normal file
|
|
@ -0,0 +1,74 @@
|
|||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
In the interest of fostering an open and welcoming environment, we as
|
||||
contributors and maintainers pledge to making participation in our project and
|
||||
our community a harassment-free experience for everyone, regardless of age, body
|
||||
size, disability, ethnicity, gender identity and expression, level of experience,
|
||||
nationality, personal appearance, race, religion, or sexual identity and
|
||||
orientation.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to creating a positive environment
|
||||
include:
|
||||
|
||||
* Using welcoming and inclusive language
|
||||
* Being respectful of differing viewpoints and experiences
|
||||
* Gracefully accepting constructive criticism
|
||||
* Focusing on what is best for the community
|
||||
* Showing empathy towards other community members
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery and unwelcome sexual attention or
|
||||
advances
|
||||
* Trolling, insulting/derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or electronic
|
||||
address, without explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Our Responsibilities
|
||||
|
||||
Project maintainers are responsible for clarifying the standards of acceptable
|
||||
behavior and are expected to take appropriate and fair corrective action in
|
||||
response to any instances of unacceptable behavior.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or
|
||||
reject comments, commits, code, wiki edits, issues, and other contributions
|
||||
that are not aligned to this Code of Conduct, or to ban temporarily or
|
||||
permanently any contributor for other behaviors that they deem inappropriate,
|
||||
threatening, offensive, or harmful.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies both within project spaces and in public spaces
|
||||
when an individual is representing the project or its community. Examples of
|
||||
representing a project or community include using an official project e-mail
|
||||
address, posting via an official social media account, or acting as an appointed
|
||||
representative at an online or offline event. Representation of a project may be
|
||||
further defined and clarified by project maintainers.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported by contacting the project team at ivan+abuse@flanders.co.nz. All
|
||||
complaints will be reviewed and investigated and will result in a response that
|
||||
is deemed necessary and appropriate to the circumstances. The project team is
|
||||
obligated to maintain confidentiality with regard to the reporter of an incident.
|
||||
Further details of specific enforcement policies may be posted separately.
|
||||
|
||||
Project maintainers who do not follow or enforce the Code of Conduct in good
|
||||
faith may face temporary or permanent repercussions as determined by other
|
||||
members of the project's leadership.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
|
||||
available at [http://contributor-covenant.org/version/1/4][version]
|
||||
|
||||
[homepage]: http://contributor-covenant.org
|
||||
[version]: http://contributor-covenant.org/version/1/4/
|
||||
|
|
@ -0,0 +1,6 @@
|
|||
# OpenAPI initiative analysis [](https://ci.vmware.run/go-openapi/analysis) [](https://coverage.vmware.run/go-openapi/analysis) [](https://slackin.goswagger.io)
|
||||
|
||||
[](https://raw.githubusercontent.com/go-openapi/analysis/master/LICENSE) [](http://godoc.org/github.com/go-openapi/analysis)
|
||||
|
||||
|
||||
A foundational library to analyze an OAI specification document for easier reasoning about the content.
|
||||
614
cluster-autoscaler/vendor/github.com/go-openapi/analysis/analyzer.go
generated
vendored
Normal file
614
cluster-autoscaler/vendor/github.com/go-openapi/analysis/analyzer.go
generated
vendored
Normal file
|
|
@ -0,0 +1,614 @@
|
|||
// Copyright 2015 go-swagger maintainers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package analysis
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
slashpath "path"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/go-openapi/jsonpointer"
|
||||
"github.com/go-openapi/spec"
|
||||
"github.com/go-openapi/swag"
|
||||
)
|
||||
|
||||
type referenceAnalysis struct {
|
||||
schemas map[string]spec.Ref
|
||||
responses map[string]spec.Ref
|
||||
parameters map[string]spec.Ref
|
||||
items map[string]spec.Ref
|
||||
allRefs map[string]spec.Ref
|
||||
referenced struct {
|
||||
schemas map[string]SchemaRef
|
||||
responses map[string]*spec.Response
|
||||
parameters map[string]*spec.Parameter
|
||||
}
|
||||
}
|
||||
|
||||
func (r *referenceAnalysis) addRef(key string, ref spec.Ref) {
|
||||
r.allRefs["#"+key] = ref
|
||||
}
|
||||
|
||||
func (r *referenceAnalysis) addItemsRef(key string, items *spec.Items) {
|
||||
r.items["#"+key] = items.Ref
|
||||
r.addRef(key, items.Ref)
|
||||
}
|
||||
|
||||
func (r *referenceAnalysis) addSchemaRef(key string, ref SchemaRef) {
|
||||
r.schemas["#"+key] = ref.Schema.Ref
|
||||
r.addRef(key, ref.Schema.Ref)
|
||||
}
|
||||
|
||||
func (r *referenceAnalysis) addResponseRef(key string, resp *spec.Response) {
|
||||
r.responses["#"+key] = resp.Ref
|
||||
r.addRef(key, resp.Ref)
|
||||
}
|
||||
|
||||
func (r *referenceAnalysis) addParamRef(key string, param *spec.Parameter) {
|
||||
r.parameters["#"+key] = param.Ref
|
||||
r.addRef(key, param.Ref)
|
||||
}
|
||||
|
||||
// New takes a swagger spec object and returns an analyzed spec document.
|
||||
// The analyzed document contains a number of indices that make it easier to
|
||||
// reason about semantics of a swagger specification for use in code generation
|
||||
// or validation etc.
|
||||
func New(doc *spec.Swagger) *Spec {
|
||||
a := &Spec{
|
||||
spec: doc,
|
||||
consumes: make(map[string]struct{}, 150),
|
||||
produces: make(map[string]struct{}, 150),
|
||||
authSchemes: make(map[string]struct{}, 150),
|
||||
operations: make(map[string]map[string]*spec.Operation, 150),
|
||||
allSchemas: make(map[string]SchemaRef, 150),
|
||||
allOfs: make(map[string]SchemaRef, 150),
|
||||
references: referenceAnalysis{
|
||||
schemas: make(map[string]spec.Ref, 150),
|
||||
responses: make(map[string]spec.Ref, 150),
|
||||
parameters: make(map[string]spec.Ref, 150),
|
||||
items: make(map[string]spec.Ref, 150),
|
||||
allRefs: make(map[string]spec.Ref, 150),
|
||||
},
|
||||
}
|
||||
a.references.referenced.schemas = make(map[string]SchemaRef, 150)
|
||||
a.references.referenced.responses = make(map[string]*spec.Response, 150)
|
||||
a.references.referenced.parameters = make(map[string]*spec.Parameter, 150)
|
||||
a.initialize()
|
||||
return a
|
||||
}
|
||||
|
||||
// Spec takes a swagger spec object and turns it into a registry
|
||||
// with a bunch of utility methods to act on the information in the spec
|
||||
type Spec struct {
|
||||
spec *spec.Swagger
|
||||
consumes map[string]struct{}
|
||||
produces map[string]struct{}
|
||||
authSchemes map[string]struct{}
|
||||
operations map[string]map[string]*spec.Operation
|
||||
references referenceAnalysis
|
||||
allSchemas map[string]SchemaRef
|
||||
allOfs map[string]SchemaRef
|
||||
}
|
||||
|
||||
func (s *Spec) initialize() {
|
||||
for _, c := range s.spec.Consumes {
|
||||
s.consumes[c] = struct{}{}
|
||||
}
|
||||
for _, c := range s.spec.Produces {
|
||||
s.produces[c] = struct{}{}
|
||||
}
|
||||
for _, ss := range s.spec.Security {
|
||||
for k := range ss {
|
||||
s.authSchemes[k] = struct{}{}
|
||||
}
|
||||
}
|
||||
for path, pathItem := range s.AllPaths() {
|
||||
s.analyzeOperations(path, &pathItem)
|
||||
}
|
||||
|
||||
for name, parameter := range s.spec.Parameters {
|
||||
refPref := slashpath.Join("/parameters", jsonpointer.Escape(name))
|
||||
if parameter.Items != nil {
|
||||
s.analyzeItems("items", parameter.Items, refPref)
|
||||
}
|
||||
if parameter.In == "body" && parameter.Schema != nil {
|
||||
s.analyzeSchema("schema", *parameter.Schema, refPref)
|
||||
}
|
||||
}
|
||||
|
||||
for name, response := range s.spec.Responses {
|
||||
refPref := slashpath.Join("/responses", jsonpointer.Escape(name))
|
||||
for _, v := range response.Headers {
|
||||
if v.Items != nil {
|
||||
s.analyzeItems("items", v.Items, refPref)
|
||||
}
|
||||
}
|
||||
if response.Schema != nil {
|
||||
s.analyzeSchema("schema", *response.Schema, refPref)
|
||||
}
|
||||
}
|
||||
|
||||
for name, schema := range s.spec.Definitions {
|
||||
s.analyzeSchema(name, schema, "/definitions")
|
||||
}
|
||||
// TODO: after analyzing all things and flattening schemas etc
|
||||
// resolve all the collected references to their final representations
|
||||
// best put in a separate method because this could get expensive
|
||||
}
|
||||
|
||||
func (s *Spec) analyzeOperations(path string, pi *spec.PathItem) {
|
||||
// TODO: resolve refs here?
|
||||
op := pi
|
||||
s.analyzeOperation("GET", path, op.Get)
|
||||
s.analyzeOperation("PUT", path, op.Put)
|
||||
s.analyzeOperation("POST", path, op.Post)
|
||||
s.analyzeOperation("PATCH", path, op.Patch)
|
||||
s.analyzeOperation("DELETE", path, op.Delete)
|
||||
s.analyzeOperation("HEAD", path, op.Head)
|
||||
s.analyzeOperation("OPTIONS", path, op.Options)
|
||||
for i, param := range op.Parameters {
|
||||
refPref := slashpath.Join("/paths", jsonpointer.Escape(path), "parameters", strconv.Itoa(i))
|
||||
if param.Ref.String() != "" {
|
||||
s.references.addParamRef(refPref, ¶m)
|
||||
}
|
||||
if param.Items != nil {
|
||||
s.analyzeItems("items", param.Items, refPref)
|
||||
}
|
||||
if param.Schema != nil {
|
||||
s.analyzeSchema("schema", *param.Schema, refPref)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Spec) analyzeItems(name string, items *spec.Items, prefix string) {
|
||||
if items == nil {
|
||||
return
|
||||
}
|
||||
refPref := slashpath.Join(prefix, name)
|
||||
s.analyzeItems(name, items.Items, refPref)
|
||||
if items.Ref.String() != "" {
|
||||
s.references.addItemsRef(refPref, items)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Spec) analyzeOperation(method, path string, op *spec.Operation) {
|
||||
if op == nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, c := range op.Consumes {
|
||||
s.consumes[c] = struct{}{}
|
||||
}
|
||||
for _, c := range op.Produces {
|
||||
s.produces[c] = struct{}{}
|
||||
}
|
||||
for _, ss := range op.Security {
|
||||
for k := range ss {
|
||||
s.authSchemes[k] = struct{}{}
|
||||
}
|
||||
}
|
||||
if _, ok := s.operations[method]; !ok {
|
||||
s.operations[method] = make(map[string]*spec.Operation)
|
||||
}
|
||||
s.operations[method][path] = op
|
||||
prefix := slashpath.Join("/paths", jsonpointer.Escape(path), strings.ToLower(method))
|
||||
for i, param := range op.Parameters {
|
||||
refPref := slashpath.Join(prefix, "parameters", strconv.Itoa(i))
|
||||
if param.Ref.String() != "" {
|
||||
s.references.addParamRef(refPref, ¶m)
|
||||
}
|
||||
s.analyzeItems("items", param.Items, refPref)
|
||||
if param.In == "body" && param.Schema != nil {
|
||||
s.analyzeSchema("schema", *param.Schema, refPref)
|
||||
}
|
||||
}
|
||||
if op.Responses != nil {
|
||||
if op.Responses.Default != nil {
|
||||
refPref := slashpath.Join(prefix, "responses", "default")
|
||||
if op.Responses.Default.Ref.String() != "" {
|
||||
s.references.addResponseRef(refPref, op.Responses.Default)
|
||||
}
|
||||
for _, v := range op.Responses.Default.Headers {
|
||||
s.analyzeItems("items", v.Items, refPref)
|
||||
}
|
||||
if op.Responses.Default.Schema != nil {
|
||||
s.analyzeSchema("schema", *op.Responses.Default.Schema, refPref)
|
||||
}
|
||||
}
|
||||
for k, res := range op.Responses.StatusCodeResponses {
|
||||
refPref := slashpath.Join(prefix, "responses", strconv.Itoa(k))
|
||||
if res.Ref.String() != "" {
|
||||
s.references.addResponseRef(refPref, &res)
|
||||
}
|
||||
for _, v := range res.Headers {
|
||||
s.analyzeItems("items", v.Items, refPref)
|
||||
}
|
||||
if res.Schema != nil {
|
||||
s.analyzeSchema("schema", *res.Schema, refPref)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Spec) analyzeSchema(name string, schema spec.Schema, prefix string) {
|
||||
refURI := slashpath.Join(prefix, jsonpointer.Escape(name))
|
||||
schRef := SchemaRef{
|
||||
Name: name,
|
||||
Schema: &schema,
|
||||
Ref: spec.MustCreateRef("#" + refURI),
|
||||
}
|
||||
s.allSchemas["#"+refURI] = schRef
|
||||
if schema.Ref.String() != "" {
|
||||
s.references.addSchemaRef(refURI, schRef)
|
||||
}
|
||||
for k, v := range schema.Definitions {
|
||||
s.analyzeSchema(k, v, slashpath.Join(refURI, "definitions"))
|
||||
}
|
||||
for k, v := range schema.Properties {
|
||||
s.analyzeSchema(k, v, slashpath.Join(refURI, "properties"))
|
||||
}
|
||||
for k, v := range schema.PatternProperties {
|
||||
s.analyzeSchema(k, v, slashpath.Join(refURI, "patternProperties"))
|
||||
}
|
||||
for i, v := range schema.AllOf {
|
||||
s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "allOf"))
|
||||
}
|
||||
if len(schema.AllOf) > 0 {
|
||||
s.allOfs["#"+refURI] = SchemaRef{Name: name, Schema: &schema, Ref: spec.MustCreateRef("#" + refURI)}
|
||||
}
|
||||
for i, v := range schema.AnyOf {
|
||||
s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "anyOf"))
|
||||
}
|
||||
for i, v := range schema.OneOf {
|
||||
s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "oneOf"))
|
||||
}
|
||||
if schema.Not != nil {
|
||||
s.analyzeSchema("not", *schema.Not, refURI)
|
||||
}
|
||||
if schema.AdditionalProperties != nil && schema.AdditionalProperties.Schema != nil {
|
||||
s.analyzeSchema("additionalProperties", *schema.AdditionalProperties.Schema, refURI)
|
||||
}
|
||||
if schema.AdditionalItems != nil && schema.AdditionalItems.Schema != nil {
|
||||
s.analyzeSchema("additionalItems", *schema.AdditionalItems.Schema, refURI)
|
||||
}
|
||||
if schema.Items != nil {
|
||||
if schema.Items.Schema != nil {
|
||||
s.analyzeSchema("items", *schema.Items.Schema, refURI)
|
||||
}
|
||||
for i, sch := range schema.Items.Schemas {
|
||||
s.analyzeSchema(strconv.Itoa(i), sch, slashpath.Join(refURI, "items"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SecurityRequirement is a representation of a security requirement for an operation
|
||||
type SecurityRequirement struct {
|
||||
Name string
|
||||
Scopes []string
|
||||
}
|
||||
|
||||
// SecurityRequirementsFor gets the security requirements for the operation
|
||||
func (s *Spec) SecurityRequirementsFor(operation *spec.Operation) []SecurityRequirement {
|
||||
if s.spec.Security == nil && operation.Security == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
schemes := s.spec.Security
|
||||
if operation.Security != nil {
|
||||
schemes = operation.Security
|
||||
}
|
||||
|
||||
unique := make(map[string]SecurityRequirement)
|
||||
for _, scheme := range schemes {
|
||||
for k, v := range scheme {
|
||||
if _, ok := unique[k]; !ok {
|
||||
unique[k] = SecurityRequirement{Name: k, Scopes: v}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var result []SecurityRequirement
|
||||
for _, v := range unique {
|
||||
result = append(result, v)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// SecurityDefinitionsFor gets the matching security definitions for a set of requirements
|
||||
func (s *Spec) SecurityDefinitionsFor(operation *spec.Operation) map[string]spec.SecurityScheme {
|
||||
requirements := s.SecurityRequirementsFor(operation)
|
||||
if len(requirements) == 0 {
|
||||
return nil
|
||||
}
|
||||
result := make(map[string]spec.SecurityScheme)
|
||||
for _, v := range requirements {
|
||||
if definition, ok := s.spec.SecurityDefinitions[v.Name]; ok {
|
||||
if definition != nil {
|
||||
result[v.Name] = *definition
|
||||
}
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// ConsumesFor gets the mediatypes for the operation
|
||||
func (s *Spec) ConsumesFor(operation *spec.Operation) []string {
|
||||
|
||||
if len(operation.Consumes) == 0 {
|
||||
cons := make(map[string]struct{}, len(s.spec.Consumes))
|
||||
for _, k := range s.spec.Consumes {
|
||||
cons[k] = struct{}{}
|
||||
}
|
||||
return s.structMapKeys(cons)
|
||||
}
|
||||
|
||||
cons := make(map[string]struct{}, len(operation.Consumes))
|
||||
for _, c := range operation.Consumes {
|
||||
cons[c] = struct{}{}
|
||||
}
|
||||
return s.structMapKeys(cons)
|
||||
}
|
||||
|
||||
// ProducesFor gets the mediatypes for the operation
|
||||
func (s *Spec) ProducesFor(operation *spec.Operation) []string {
|
||||
if len(operation.Produces) == 0 {
|
||||
prod := make(map[string]struct{}, len(s.spec.Produces))
|
||||
for _, k := range s.spec.Produces {
|
||||
prod[k] = struct{}{}
|
||||
}
|
||||
return s.structMapKeys(prod)
|
||||
}
|
||||
|
||||
prod := make(map[string]struct{}, len(operation.Produces))
|
||||
for _, c := range operation.Produces {
|
||||
prod[c] = struct{}{}
|
||||
}
|
||||
return s.structMapKeys(prod)
|
||||
}
|
||||
|
||||
func mapKeyFromParam(param *spec.Parameter) string {
|
||||
return fmt.Sprintf("%s#%s", param.In, fieldNameFromParam(param))
|
||||
}
|
||||
|
||||
func fieldNameFromParam(param *spec.Parameter) string {
|
||||
if nm, ok := param.Extensions.GetString("go-name"); ok {
|
||||
return nm
|
||||
}
|
||||
return swag.ToGoName(param.Name)
|
||||
}
|
||||
|
||||
func (s *Spec) paramsAsMap(parameters []spec.Parameter, res map[string]spec.Parameter) {
|
||||
for _, param := range parameters {
|
||||
pr := param
|
||||
if pr.Ref.String() != "" {
|
||||
obj, _, err := pr.Ref.GetPointer().Get(s.spec)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
pr = obj.(spec.Parameter)
|
||||
}
|
||||
res[mapKeyFromParam(&pr)] = pr
|
||||
}
|
||||
}
|
||||
|
||||
// ParametersFor the specified operation id
|
||||
func (s *Spec) ParametersFor(operationID string) []spec.Parameter {
|
||||
gatherParams := func(pi *spec.PathItem, op *spec.Operation) []spec.Parameter {
|
||||
bag := make(map[string]spec.Parameter)
|
||||
s.paramsAsMap(pi.Parameters, bag)
|
||||
s.paramsAsMap(op.Parameters, bag)
|
||||
|
||||
var res []spec.Parameter
|
||||
for _, v := range bag {
|
||||
res = append(res, v)
|
||||
}
|
||||
return res
|
||||
}
|
||||
for _, pi := range s.spec.Paths.Paths {
|
||||
if pi.Get != nil && pi.Get.ID == operationID {
|
||||
return gatherParams(&pi, pi.Get)
|
||||
}
|
||||
if pi.Head != nil && pi.Head.ID == operationID {
|
||||
return gatherParams(&pi, pi.Head)
|
||||
}
|
||||
if pi.Options != nil && pi.Options.ID == operationID {
|
||||
return gatherParams(&pi, pi.Options)
|
||||
}
|
||||
if pi.Post != nil && pi.Post.ID == operationID {
|
||||
return gatherParams(&pi, pi.Post)
|
||||
}
|
||||
if pi.Patch != nil && pi.Patch.ID == operationID {
|
||||
return gatherParams(&pi, pi.Patch)
|
||||
}
|
||||
if pi.Put != nil && pi.Put.ID == operationID {
|
||||
return gatherParams(&pi, pi.Put)
|
||||
}
|
||||
if pi.Delete != nil && pi.Delete.ID == operationID {
|
||||
return gatherParams(&pi, pi.Delete)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ParamsFor the specified method and path. Aggregates them with the defaults etc, so it's all the params that
|
||||
// apply for the method and path.
|
||||
func (s *Spec) ParamsFor(method, path string) map[string]spec.Parameter {
|
||||
res := make(map[string]spec.Parameter)
|
||||
if pi, ok := s.spec.Paths.Paths[path]; ok {
|
||||
s.paramsAsMap(pi.Parameters, res)
|
||||
s.paramsAsMap(s.operations[strings.ToUpper(method)][path].Parameters, res)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// OperationForName gets the operation for the given id
|
||||
func (s *Spec) OperationForName(operationID string) (string, string, *spec.Operation, bool) {
|
||||
for method, pathItem := range s.operations {
|
||||
for path, op := range pathItem {
|
||||
if operationID == op.ID {
|
||||
return method, path, op, true
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", "", nil, false
|
||||
}
|
||||
|
||||
// OperationFor the given method and path
|
||||
func (s *Spec) OperationFor(method, path string) (*spec.Operation, bool) {
|
||||
if mp, ok := s.operations[strings.ToUpper(method)]; ok {
|
||||
op, fn := mp[path]
|
||||
return op, fn
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// Operations gathers all the operations specified in the spec document
|
||||
func (s *Spec) Operations() map[string]map[string]*spec.Operation {
|
||||
return s.operations
|
||||
}
|
||||
|
||||
func (s *Spec) structMapKeys(mp map[string]struct{}) []string {
|
||||
if len(mp) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
result := make([]string, 0, len(mp))
|
||||
for k := range mp {
|
||||
result = append(result, k)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// AllPaths returns all the paths in the swagger spec
|
||||
func (s *Spec) AllPaths() map[string]spec.PathItem {
|
||||
if s.spec == nil || s.spec.Paths == nil {
|
||||
return nil
|
||||
}
|
||||
return s.spec.Paths.Paths
|
||||
}
|
||||
|
||||
// OperationIDs gets all the operation ids based on method an dpath
|
||||
func (s *Spec) OperationIDs() []string {
|
||||
if len(s.operations) == 0 {
|
||||
return nil
|
||||
}
|
||||
result := make([]string, 0, len(s.operations))
|
||||
for method, v := range s.operations {
|
||||
for p, o := range v {
|
||||
if o.ID != "" {
|
||||
result = append(result, o.ID)
|
||||
} else {
|
||||
result = append(result, fmt.Sprintf("%s %s", strings.ToUpper(method), p))
|
||||
}
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// RequiredConsumes gets all the distinct consumes that are specified in the specification document
|
||||
func (s *Spec) RequiredConsumes() []string {
|
||||
return s.structMapKeys(s.consumes)
|
||||
}
|
||||
|
||||
// RequiredProduces gets all the distinct produces that are specified in the specification document
|
||||
func (s *Spec) RequiredProduces() []string {
|
||||
return s.structMapKeys(s.produces)
|
||||
}
|
||||
|
||||
// RequiredSecuritySchemes gets all the distinct security schemes that are specified in the swagger spec
|
||||
func (s *Spec) RequiredSecuritySchemes() []string {
|
||||
return s.structMapKeys(s.authSchemes)
|
||||
}
|
||||
|
||||
// SchemaRef is a reference to a schema
|
||||
type SchemaRef struct {
|
||||
Name string
|
||||
Ref spec.Ref
|
||||
Schema *spec.Schema
|
||||
}
|
||||
|
||||
// SchemasWithAllOf returns schema references to all schemas that are defined
|
||||
// with an allOf key
|
||||
func (s *Spec) SchemasWithAllOf() (result []SchemaRef) {
|
||||
for _, v := range s.allOfs {
|
||||
result = append(result, v)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// AllDefinitions returns schema references for all the definitions that were discovered
|
||||
func (s *Spec) AllDefinitions() (result []SchemaRef) {
|
||||
for _, v := range s.allSchemas {
|
||||
result = append(result, v)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// AllDefinitionReferences returns json refs for all the discovered schemas
|
||||
func (s *Spec) AllDefinitionReferences() (result []string) {
|
||||
for _, v := range s.references.schemas {
|
||||
result = append(result, v.String())
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// AllParameterReferences returns json refs for all the discovered parameters
|
||||
func (s *Spec) AllParameterReferences() (result []string) {
|
||||
for _, v := range s.references.parameters {
|
||||
result = append(result, v.String())
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// AllResponseReferences returns json refs for all the discovered responses
|
||||
func (s *Spec) AllResponseReferences() (result []string) {
|
||||
for _, v := range s.references.responses {
|
||||
result = append(result, v.String())
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// AllItemsReferences returns the references for all the items
|
||||
func (s *Spec) AllItemsReferences() (result []string) {
|
||||
for _, v := range s.references.items {
|
||||
result = append(result, v.String())
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// AllReferences returns all the references found in the document
|
||||
func (s *Spec) AllReferences() (result []string) {
|
||||
for _, v := range s.references.allRefs {
|
||||
result = append(result, v.String())
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// AllRefs returns all the unique references found in the document
|
||||
func (s *Spec) AllRefs() (result []spec.Ref) {
|
||||
set := make(map[string]struct{})
|
||||
for _, v := range s.references.allRefs {
|
||||
a := v.String()
|
||||
if a == "" {
|
||||
continue
|
||||
}
|
||||
if _, ok := set[a]; !ok {
|
||||
set[a] = struct{}{}
|
||||
result = append(result, v)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
|
@ -0,0 +1,39 @@
|
|||
clone:
|
||||
path: github.com/go-openapi/loads
|
||||
|
||||
matrix:
|
||||
GO_VERSION:
|
||||
- "1.6"
|
||||
|
||||
build:
|
||||
integration:
|
||||
image: golang:$$GO_VERSION
|
||||
pull: true
|
||||
environment:
|
||||
GOCOVMODE: "count"
|
||||
commands:
|
||||
- go get -u github.com/axw/gocov/gocov
|
||||
- go get -u gopkg.in/matm/v1/gocov-html
|
||||
- go get -u github.com/cee-dub/go-junit-report
|
||||
- go get -u github.com/stretchr/testify/assert
|
||||
- go get -u gopkg.in/yaml.v2
|
||||
- go get -u github.com/go-openapi/swag
|
||||
- go get -u github.com/go-openapi/analysis
|
||||
- go get -u github.com/go-openapi/spec
|
||||
- ./hack/build-drone.sh
|
||||
|
||||
notify:
|
||||
slack:
|
||||
channel: bots
|
||||
webhook_url: $$SLACK_URL
|
||||
username: drone
|
||||
|
||||
publish:
|
||||
coverage:
|
||||
server: https://coverage.vmware.run
|
||||
token: $$GITHUB_TOKEN
|
||||
# threshold: 70
|
||||
# must_increase: true
|
||||
when:
|
||||
matrix:
|
||||
GO_VERSION: "1.6"
|
||||
|
|
@ -0,0 +1,4 @@
|
|||
secrets.yml
|
||||
coverage.out
|
||||
profile.cov
|
||||
profile.out
|
||||
13
cluster-autoscaler/vendor/github.com/go-openapi/loads/.pullapprove.yml
generated
vendored
Normal file
13
cluster-autoscaler/vendor/github.com/go-openapi/loads/.pullapprove.yml
generated
vendored
Normal file
|
|
@ -0,0 +1,13 @@
|
|||
approve_by_comment: true
|
||||
approve_regex: '^(:shipit:|:\+1:|\+1|LGTM|lgtm|Approved)'
|
||||
reject_regex: ^[Rr]ejected
|
||||
reset_on_push: false
|
||||
reviewers:
|
||||
members:
|
||||
- casualjim
|
||||
- chancez
|
||||
- frapposelli
|
||||
- vburenin
|
||||
- pytlesk4
|
||||
name: pullapprove
|
||||
required: 1
|
||||
74
cluster-autoscaler/vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md
generated
vendored
Normal file
74
cluster-autoscaler/vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md
generated
vendored
Normal file
|
|
@ -0,0 +1,74 @@
|
|||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
In the interest of fostering an open and welcoming environment, we as
|
||||
contributors and maintainers pledge to making participation in our project and
|
||||
our community a harassment-free experience for everyone, regardless of age, body
|
||||
size, disability, ethnicity, gender identity and expression, level of experience,
|
||||
nationality, personal appearance, race, religion, or sexual identity and
|
||||
orientation.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to creating a positive environment
|
||||
include:
|
||||
|
||||
* Using welcoming and inclusive language
|
||||
* Being respectful of differing viewpoints and experiences
|
||||
* Gracefully accepting constructive criticism
|
||||
* Focusing on what is best for the community
|
||||
* Showing empathy towards other community members
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery and unwelcome sexual attention or
|
||||
advances
|
||||
* Trolling, insulting/derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or electronic
|
||||
address, without explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Our Responsibilities
|
||||
|
||||
Project maintainers are responsible for clarifying the standards of acceptable
|
||||
behavior and are expected to take appropriate and fair corrective action in
|
||||
response to any instances of unacceptable behavior.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or
|
||||
reject comments, commits, code, wiki edits, issues, and other contributions
|
||||
that are not aligned to this Code of Conduct, or to ban temporarily or
|
||||
permanently any contributor for other behaviors that they deem inappropriate,
|
||||
threatening, offensive, or harmful.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies both within project spaces and in public spaces
|
||||
when an individual is representing the project or its community. Examples of
|
||||
representing a project or community include using an official project e-mail
|
||||
address, posting via an official social media account, or acting as an appointed
|
||||
representative at an online or offline event. Representation of a project may be
|
||||
further defined and clarified by project maintainers.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported by contacting the project team at ivan+abuse@flanders.co.nz. All
|
||||
complaints will be reviewed and investigated and will result in a response that
|
||||
is deemed necessary and appropriate to the circumstances. The project team is
|
||||
obligated to maintain confidentiality with regard to the reporter of an incident.
|
||||
Further details of specific enforcement policies may be posted separately.
|
||||
|
||||
Project maintainers who do not follow or enforce the Code of Conduct in good
|
||||
faith may face temporary or permanent repercussions as determined by other
|
||||
members of the project's leadership.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
|
||||
available at [http://contributor-covenant.org/version/1/4][version]
|
||||
|
||||
[homepage]: http://contributor-covenant.org
|
||||
[version]: http://contributor-covenant.org/version/1/4/
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
# Loads OAI specs [](https://ci.vmware.run/go-openapi/loads) [](https://coverage.vmware.run/go-openapi/loads) [](https://slackin.goswagger.io)
|
||||
|
||||
[](https://raw.githubusercontent.com/go-openapi/loads/master/LICENSE) [](http://godoc.org/github.com/go-openapi/loads)
|
||||
|
||||
Loading of OAI specification documents from local or remote locations.
|
||||
|
|
@ -0,0 +1,203 @@
|
|||
// Copyright 2015 go-swagger maintainers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package loads
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
|
||||
"github.com/go-openapi/analysis"
|
||||
"github.com/go-openapi/spec"
|
||||
"github.com/go-openapi/swag"
|
||||
)
|
||||
|
||||
// JSONDoc loads a json document from either a file or a remote url
|
||||
func JSONDoc(path string) (json.RawMessage, error) {
|
||||
data, err := swag.LoadFromFileOrHTTP(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return json.RawMessage(data), nil
|
||||
}
|
||||
|
||||
// DocLoader represents a doc loader type
|
||||
type DocLoader func(string) (json.RawMessage, error)
|
||||
|
||||
// DocMatcher represents a predicate to check if a loader matches
|
||||
type DocMatcher func(string) bool
|
||||
|
||||
var loaders = &loader{Match: func(_ string) bool { return true }, Fn: JSONDoc}
|
||||
|
||||
// AddLoader for a document
|
||||
func AddLoader(predicate DocMatcher, load DocLoader) {
|
||||
prev := loaders
|
||||
loaders = &loader{
|
||||
Match: predicate,
|
||||
Fn: load,
|
||||
Next: prev,
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
type loader struct {
|
||||
Fn DocLoader
|
||||
Match DocMatcher
|
||||
Next *loader
|
||||
}
|
||||
|
||||
// JSONSpec loads a spec from a json document
|
||||
func JSONSpec(path string) (*Document, error) {
|
||||
data, err := JSONDoc(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// convert to json
|
||||
return Analyzed(json.RawMessage(data), "")
|
||||
}
|
||||
|
||||
// Document represents a swagger spec document
|
||||
type Document struct {
|
||||
// specAnalyzer
|
||||
Analyzer *analysis.Spec
|
||||
spec *spec.Swagger
|
||||
origSpec *spec.Swagger
|
||||
schema *spec.Schema
|
||||
raw json.RawMessage
|
||||
}
|
||||
|
||||
// Spec loads a new spec document
|
||||
func Spec(path string) (*Document, error) {
|
||||
specURL, err := url.Parse(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for l := loaders.Next; l != nil; l = l.Next {
|
||||
if loaders.Match(specURL.Path) {
|
||||
b, err2 := loaders.Fn(path)
|
||||
if err2 != nil {
|
||||
return nil, err2
|
||||
}
|
||||
return Analyzed(b, "")
|
||||
}
|
||||
}
|
||||
b, err := loaders.Fn(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return Analyzed(b, "")
|
||||
}
|
||||
|
||||
var swag20Schema = spec.MustLoadSwagger20Schema()
|
||||
|
||||
// Analyzed creates a new analyzed spec document
|
||||
func Analyzed(data json.RawMessage, version string) (*Document, error) {
|
||||
if version == "" {
|
||||
version = "2.0"
|
||||
}
|
||||
if version != "2.0" {
|
||||
return nil, fmt.Errorf("spec version %q is not supported", version)
|
||||
}
|
||||
|
||||
swspec := new(spec.Swagger)
|
||||
if err := json.Unmarshal(data, swspec); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
origsqspec := new(spec.Swagger)
|
||||
if err := json.Unmarshal(data, origsqspec); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
d := &Document{
|
||||
Analyzer: analysis.New(swspec),
|
||||
schema: swag20Schema,
|
||||
spec: swspec,
|
||||
raw: data,
|
||||
origSpec: origsqspec,
|
||||
}
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// Expanded expands the ref fields in the spec document and returns a new spec document
|
||||
func (d *Document) Expanded() (*Document, error) {
|
||||
swspec := new(spec.Swagger)
|
||||
if err := json.Unmarshal(d.raw, swspec); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := spec.ExpandSpec(swspec); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dd := &Document{
|
||||
Analyzer: analysis.New(swspec),
|
||||
spec: swspec,
|
||||
schema: swag20Schema,
|
||||
raw: d.raw,
|
||||
origSpec: d.origSpec,
|
||||
}
|
||||
return dd, nil
|
||||
}
|
||||
|
||||
// BasePath the base path for this spec
|
||||
func (d *Document) BasePath() string {
|
||||
return d.spec.BasePath
|
||||
}
|
||||
|
||||
// Version returns the version of this spec
|
||||
func (d *Document) Version() string {
|
||||
return d.spec.Swagger
|
||||
}
|
||||
|
||||
// Schema returns the swagger 2.0 schema
|
||||
func (d *Document) Schema() *spec.Schema {
|
||||
return d.schema
|
||||
}
|
||||
|
||||
// Spec returns the swagger spec object model
|
||||
func (d *Document) Spec() *spec.Swagger {
|
||||
return d.spec
|
||||
}
|
||||
|
||||
// Host returns the host for the API
|
||||
func (d *Document) Host() string {
|
||||
return d.spec.Host
|
||||
}
|
||||
|
||||
// Raw returns the raw swagger spec as json bytes
|
||||
func (d *Document) Raw() json.RawMessage {
|
||||
return d.raw
|
||||
}
|
||||
|
||||
func (d *Document) OrigSpec() *spec.Swagger {
|
||||
return d.origSpec
|
||||
}
|
||||
|
||||
// ResetDefinitions gives a shallow copy with the models reset
|
||||
func (d *Document) ResetDefinitions() *Document {
|
||||
defs := make(map[string]spec.Schema, len(d.origSpec.Definitions))
|
||||
for k, v := range d.origSpec.Definitions {
|
||||
defs[k] = v
|
||||
}
|
||||
|
||||
d.spec.Definitions = defs
|
||||
return d
|
||||
}
|
||||
|
||||
// Pristine creates a new pristine document instance based on the input data
|
||||
func (d *Document) Pristine() *Document {
|
||||
dd, _ := Analyzed(d.Raw(), d.Version())
|
||||
return dd
|
||||
}
|
||||
|
|
@ -0,0 +1,14 @@
|
|||
# This is the official list of GoGo authors for copyright purposes.
|
||||
# This file is distinct from the CONTRIBUTORS file, which
|
||||
# lists people. For example, employees are listed in CONTRIBUTORS,
|
||||
# but not in AUTHORS, because the employer holds the copyright.
|
||||
|
||||
# Names should be added to this file as one of
|
||||
# Organization's name
|
||||
# Individual's name <submission email address>
|
||||
# Individual's name <submission email address> <email2> <emailN>
|
||||
|
||||
# Please keep the list sorted.
|
||||
|
||||
Vastech SA (PTY) LTD
|
||||
Walter Schulze <awalterschulze@gmail.com>
|
||||
|
|
@ -5,9 +5,12 @@ DongYun Kang <ceram1000@gmail.com>
|
|||
Dwayne Schultz <dschultz@pivotal.io>
|
||||
Georg Apitz <gapitz@pivotal.io>
|
||||
Gustav Paul <gustav.paul@gmail.com>
|
||||
Johan Brandhorst <johan.brandhorst@gmail.com>
|
||||
John Shahid <jvshahid@gmail.com>
|
||||
John Tuley <john@tuley.org>
|
||||
Laurent <laurent@adyoulike.com>
|
||||
Patrick Lee <patrick@dropbox.com>
|
||||
Sergio Arbeo <serabe@gmail.com>
|
||||
Stephen J Day <stephen.day@docker.com>
|
||||
Tamir Duberstein <tamird@gmail.com>
|
||||
Todd Eisenberger <teisenberger@dropbox.com>
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
Extensions for Protocol Buffers to create more go like structures.
|
||||
Protocol Buffers for Go with Gadgets
|
||||
|
||||
Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
|
||||
http://github.com/gogo/protobuf/gogoproto
|
||||
Copyright (c) 2013, The GoGo Authors. All rights reserved.
|
||||
http://github.com/gogo/protobuf
|
||||
|
||||
Go support for Protocol Buffers - Google's data interchange format
|
||||
|
||||
|
|
|
|||
|
|
@ -39,5 +39,5 @@ test: install generate-test-pbs
|
|||
generate-test-pbs:
|
||||
make install
|
||||
make -C testdata
|
||||
protoc-min-version --version="3.0.0" --proto_path=.:../../../../ --gogo_out=. proto3_proto/proto3.proto
|
||||
protoc-min-version --version="3.0.0" --proto_path=.:../../../../:../protobuf --gogo_out=Mtestdata/test.proto=github.com/gogo/protobuf/proto/testdata,Mgoogle/protobuf/any.proto=github.com/gogo/protobuf/types:. proto3_proto/proto3.proto
|
||||
make
|
||||
|
|
|
|||
|
|
@ -84,14 +84,20 @@ func mergeStruct(out, in reflect.Value) {
|
|||
mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
|
||||
}
|
||||
|
||||
if emIn, ok := in.Addr().Interface().(extensionsMap); ok {
|
||||
emOut := out.Addr().Interface().(extensionsMap)
|
||||
mergeExtension(emOut.ExtensionMap(), emIn.ExtensionMap())
|
||||
} else if emIn, ok := in.Addr().Interface().(extensionsBytes); ok {
|
||||
if emIn, ok := in.Addr().Interface().(extensionsBytes); ok {
|
||||
emOut := out.Addr().Interface().(extensionsBytes)
|
||||
bIn := emIn.GetExtensions()
|
||||
bOut := emOut.GetExtensions()
|
||||
*bOut = append(*bOut, *bIn...)
|
||||
} else if emIn, ok := extendable(in.Addr().Interface()); ok {
|
||||
emOut, _ := extendable(out.Addr().Interface())
|
||||
mIn, muIn := emIn.extensionsRead()
|
||||
if mIn != nil {
|
||||
mOut := emOut.extensionsWrite()
|
||||
muIn.Lock()
|
||||
mergeExtension(mOut, mIn)
|
||||
muIn.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
uf := in.FieldByName("XXX_unrecognized")
|
||||
|
|
|
|||
|
|
@ -61,7 +61,6 @@ var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for
|
|||
// int32, int64, uint32, uint64, bool, and enum
|
||||
// protocol buffer types.
|
||||
func DecodeVarint(buf []byte) (x uint64, n int) {
|
||||
// x, n already 0
|
||||
for shift := uint(0); shift < 64; shift += 7 {
|
||||
if n >= len(buf) {
|
||||
return 0, 0
|
||||
|
|
@ -78,13 +77,7 @@ func DecodeVarint(buf []byte) (x uint64, n int) {
|
|||
return 0, 0
|
||||
}
|
||||
|
||||
// DecodeVarint reads a varint-encoded integer from the Buffer.
|
||||
// This is the format for the
|
||||
// int32, int64, uint32, uint64, bool, and enum
|
||||
// protocol buffer types.
|
||||
func (p *Buffer) DecodeVarint() (x uint64, err error) {
|
||||
// x, err already 0
|
||||
|
||||
func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
|
||||
i := p.index
|
||||
l := len(p.buf)
|
||||
|
||||
|
|
@ -107,6 +100,107 @@ func (p *Buffer) DecodeVarint() (x uint64, err error) {
|
|||
return
|
||||
}
|
||||
|
||||
// DecodeVarint reads a varint-encoded integer from the Buffer.
|
||||
// This is the format for the
|
||||
// int32, int64, uint32, uint64, bool, and enum
|
||||
// protocol buffer types.
|
||||
func (p *Buffer) DecodeVarint() (x uint64, err error) {
|
||||
i := p.index
|
||||
buf := p.buf
|
||||
|
||||
if i >= len(buf) {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
} else if buf[i] < 0x80 {
|
||||
p.index++
|
||||
return uint64(buf[i]), nil
|
||||
} else if len(buf)-i < 10 {
|
||||
return p.decodeVarintSlow()
|
||||
}
|
||||
|
||||
var b uint64
|
||||
// we already checked the first byte
|
||||
x = uint64(buf[i]) - 0x80
|
||||
i++
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 7
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 7
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 14
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 14
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 21
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 21
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 28
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 28
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 35
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 35
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 42
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 42
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 49
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 49
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 56
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 56
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 63
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
// x -= 0x80 << 63 // Always zero.
|
||||
|
||||
return 0, errOverflow
|
||||
|
||||
done:
|
||||
p.index = i
|
||||
return x, nil
|
||||
}
|
||||
|
||||
// DecodeFixed64 reads a 64-bit integer from the Buffer.
|
||||
// This is the format for the
|
||||
// fixed64, sfixed64, and double protocol buffer types.
|
||||
|
|
@ -340,6 +434,8 @@ func (p *Buffer) DecodeGroup(pb Message) error {
|
|||
// Buffer and places the decoded result in pb. If the struct
|
||||
// underlying pb does not match the data in the buffer, the results can be
|
||||
// unpredictable.
|
||||
//
|
||||
// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
|
||||
func (p *Buffer) Unmarshal(pb Message) error {
|
||||
// If the object can unmarshal itself, let it.
|
||||
if u, ok := pb.(Unmarshaler); ok {
|
||||
|
|
@ -378,6 +474,11 @@ func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group
|
|||
wire := int(u & 0x7)
|
||||
if wire == WireEndGroup {
|
||||
if is_group {
|
||||
if required > 0 {
|
||||
// Not enough information to determine the exact field.
|
||||
// (See below.)
|
||||
return &RequiredNotSetError{"{Unknown}"}
|
||||
}
|
||||
return nil // input is satisfied
|
||||
}
|
||||
return fmt.Errorf("proto: %s: wiretype end group for non-group", st)
|
||||
|
|
@ -390,16 +491,20 @@ func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group
|
|||
if !ok {
|
||||
// Maybe it's an extension?
|
||||
if prop.extendable {
|
||||
if e := structPointer_Interface(base, st).(extendableProto); isExtensionField(e, int32(tag)) {
|
||||
if err = o.skip(st, tag, wire); err == nil {
|
||||
if ee, eok := e.(extensionsMap); eok {
|
||||
ext := ee.ExtensionMap()[int32(tag)] // may be missing
|
||||
ext.enc = append(ext.enc, o.buf[oi:o.index]...)
|
||||
ee.ExtensionMap()[int32(tag)] = ext
|
||||
} else if ee, eok := e.(extensionsBytes); eok {
|
||||
ext := ee.GetExtensions()
|
||||
if e, eok := structPointer_Interface(base, st).(extensionsBytes); eok {
|
||||
if isExtensionField(e, int32(tag)) {
|
||||
if err = o.skip(st, tag, wire); err == nil {
|
||||
ext := e.GetExtensions()
|
||||
*ext = append(*ext, o.buf[oi:o.index]...)
|
||||
}
|
||||
continue
|
||||
}
|
||||
} else if e, _ := extendable(structPointer_Interface(base, st)); isExtensionField(e, int32(tag)) {
|
||||
if err = o.skip(st, tag, wire); err == nil {
|
||||
extmap := e.extensionsWrite()
|
||||
ext := extmap[int32(tag)] // may be missing
|
||||
ext.enc = append(ext.enc, o.buf[oi:o.index]...)
|
||||
extmap[int32(tag)] = ext
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,5 +1,7 @@
|
|||
// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
|
||||
// http://github.com/gogo/protobuf/gogoproto
|
||||
// Protocol Buffers for Go with Gadgets
|
||||
//
|
||||
// Copyright (c) 2013, The GoGo Authors. All rights reserved.
|
||||
// http://github.com/gogo/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
|
|
@ -96,7 +98,7 @@ func setPtrCustomType(base structPointer, f field, v interface{}) {
|
|||
if v == nil {
|
||||
return
|
||||
}
|
||||
structPointer_SetStructPointer(base, f, structPointer(reflect.ValueOf(v).Pointer()))
|
||||
structPointer_SetStructPointer(base, f, toStructPointer(reflect.ValueOf(v)))
|
||||
}
|
||||
|
||||
func setCustomType(base structPointer, f field, value interface{}) {
|
||||
|
|
@ -163,7 +165,8 @@ func (o *Buffer) dec_custom_slice_bytes(p *Properties, base structPointer) error
|
|||
}
|
||||
newBas := appendStructPointer(base, p.field, p.ctype)
|
||||
|
||||
setCustomType(newBas, 0, custom)
|
||||
var zero field
|
||||
setCustomType(newBas, zero, custom)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
100
cluster-autoscaler/vendor/github.com/gogo/protobuf/proto/duration.go
generated
vendored
Normal file
100
cluster-autoscaler/vendor/github.com/gogo/protobuf/proto/duration.go
generated
vendored
Normal file
|
|
@ -0,0 +1,100 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
// This file implements conversions between google.protobuf.Duration
|
||||
// and time.Duration.
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// Range of a Duration in seconds, as specified in
|
||||
// google/protobuf/duration.proto. This is about 10,000 years in seconds.
|
||||
maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
|
||||
minSeconds = -maxSeconds
|
||||
)
|
||||
|
||||
// validateDuration determines whether the Duration is valid according to the
|
||||
// definition in google/protobuf/duration.proto. A valid Duration
|
||||
// may still be too large to fit into a time.Duration (the range of Duration
|
||||
// is about 10,000 years, and the range of time.Duration is about 290).
|
||||
func validateDuration(d *duration) error {
|
||||
if d == nil {
|
||||
return errors.New("duration: nil Duration")
|
||||
}
|
||||
if d.Seconds < minSeconds || d.Seconds > maxSeconds {
|
||||
return fmt.Errorf("duration: %#v: seconds out of range", d)
|
||||
}
|
||||
if d.Nanos <= -1e9 || d.Nanos >= 1e9 {
|
||||
return fmt.Errorf("duration: %#v: nanos out of range", d)
|
||||
}
|
||||
// Seconds and Nanos must have the same sign, unless d.Nanos is zero.
|
||||
if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) {
|
||||
return fmt.Errorf("duration: %#v: seconds and nanos have different signs", d)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DurationFromProto converts a Duration to a time.Duration. DurationFromProto
|
||||
// returns an error if the Duration is invalid or is too large to be
|
||||
// represented in a time.Duration.
|
||||
func durationFromProto(p *duration) (time.Duration, error) {
|
||||
if err := validateDuration(p); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
d := time.Duration(p.Seconds) * time.Second
|
||||
if int64(d/time.Second) != p.Seconds {
|
||||
return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p)
|
||||
}
|
||||
if p.Nanos != 0 {
|
||||
d += time.Duration(p.Nanos)
|
||||
if (d < 0) != (p.Nanos < 0) {
|
||||
return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p)
|
||||
}
|
||||
}
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// DurationProto converts a time.Duration to a Duration.
|
||||
func durationProto(d time.Duration) *duration {
|
||||
nanos := d.Nanoseconds()
|
||||
secs := nanos / 1e9
|
||||
nanos -= secs * 1e9
|
||||
return &duration{
|
||||
Seconds: secs,
|
||||
Nanos: int32(nanos),
|
||||
}
|
||||
}
|
||||
203
cluster-autoscaler/vendor/github.com/gogo/protobuf/proto/duration_gogo.go
generated
vendored
Normal file
203
cluster-autoscaler/vendor/github.com/gogo/protobuf/proto/duration_gogo.go
generated
vendored
Normal file
|
|
@ -0,0 +1,203 @@
|
|||
// Protocol Buffers for Go with Gadgets
|
||||
//
|
||||
// Copyright (c) 2016, The GoGo Authors. All rights reserved.
|
||||
// http://github.com/gogo/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"time"
|
||||
)
|
||||
|
||||
var durationType = reflect.TypeOf((*time.Duration)(nil)).Elem()
|
||||
|
||||
type duration struct {
|
||||
Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
|
||||
Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
|
||||
}
|
||||
|
||||
func (m *duration) Reset() { *m = duration{} }
|
||||
func (*duration) ProtoMessage() {}
|
||||
func (*duration) String() string { return "duration<string>" }
|
||||
|
||||
func init() {
|
||||
RegisterType((*duration)(nil), "gogo.protobuf.proto.duration")
|
||||
}
|
||||
|
||||
func (o *Buffer) decDuration() (time.Duration, error) {
|
||||
b, err := o.DecodeRawBytes(true)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
dproto := &duration{}
|
||||
if err := Unmarshal(b, dproto); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return durationFromProto(dproto)
|
||||
}
|
||||
|
||||
func (o *Buffer) dec_duration(p *Properties, base structPointer) error {
|
||||
d, err := o.decDuration()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
word64_Set(structPointer_Word64(base, p.field), o, uint64(d))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Buffer) dec_ref_duration(p *Properties, base structPointer) error {
|
||||
d, err := o.decDuration()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
word64Val_Set(structPointer_Word64Val(base, p.field), o, uint64(d))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Buffer) dec_slice_duration(p *Properties, base structPointer) error {
|
||||
d, err := o.decDuration()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newBas := appendStructPointer(base, p.field, reflect.SliceOf(reflect.PtrTo(durationType)))
|
||||
var zero field
|
||||
setPtrCustomType(newBas, zero, &d)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Buffer) dec_slice_ref_duration(p *Properties, base structPointer) error {
|
||||
d, err := o.decDuration()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
structPointer_Word64Slice(base, p.field).Append(uint64(d))
|
||||
return nil
|
||||
}
|
||||
|
||||
func size_duration(p *Properties, base structPointer) (n int) {
|
||||
structp := structPointer_GetStructPointer(base, p.field)
|
||||
if structPointer_IsNil(structp) {
|
||||
return 0
|
||||
}
|
||||
dur := structPointer_Interface(structp, durationType).(*time.Duration)
|
||||
d := durationProto(*dur)
|
||||
size := Size(d)
|
||||
return size + sizeVarint(uint64(size)) + len(p.tagcode)
|
||||
}
|
||||
|
||||
func (o *Buffer) enc_duration(p *Properties, base structPointer) error {
|
||||
structp := structPointer_GetStructPointer(base, p.field)
|
||||
if structPointer_IsNil(structp) {
|
||||
return ErrNil
|
||||
}
|
||||
dur := structPointer_Interface(structp, durationType).(*time.Duration)
|
||||
d := durationProto(*dur)
|
||||
data, err := Marshal(d)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.buf = append(o.buf, p.tagcode...)
|
||||
o.EncodeRawBytes(data)
|
||||
return nil
|
||||
}
|
||||
|
||||
func size_ref_duration(p *Properties, base structPointer) (n int) {
|
||||
dur := structPointer_InterfaceAt(base, p.field, durationType).(*time.Duration)
|
||||
d := durationProto(*dur)
|
||||
size := Size(d)
|
||||
return size + sizeVarint(uint64(size)) + len(p.tagcode)
|
||||
}
|
||||
|
||||
func (o *Buffer) enc_ref_duration(p *Properties, base structPointer) error {
|
||||
dur := structPointer_InterfaceAt(base, p.field, durationType).(*time.Duration)
|
||||
d := durationProto(*dur)
|
||||
data, err := Marshal(d)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.buf = append(o.buf, p.tagcode...)
|
||||
o.EncodeRawBytes(data)
|
||||
return nil
|
||||
}
|
||||
|
||||
func size_slice_duration(p *Properties, base structPointer) (n int) {
|
||||
pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(durationType))).(*[]*time.Duration)
|
||||
durs := *pdurs
|
||||
for i := 0; i < len(durs); i++ {
|
||||
if durs[i] == nil {
|
||||
return 0
|
||||
}
|
||||
dproto := durationProto(*durs[i])
|
||||
size := Size(dproto)
|
||||
n += len(p.tagcode) + size + sizeVarint(uint64(size))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (o *Buffer) enc_slice_duration(p *Properties, base structPointer) error {
|
||||
pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(durationType))).(*[]*time.Duration)
|
||||
durs := *pdurs
|
||||
for i := 0; i < len(durs); i++ {
|
||||
if durs[i] == nil {
|
||||
return errRepeatedHasNil
|
||||
}
|
||||
dproto := durationProto(*durs[i])
|
||||
data, err := Marshal(dproto)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.buf = append(o.buf, p.tagcode...)
|
||||
o.EncodeRawBytes(data)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func size_slice_ref_duration(p *Properties, base structPointer) (n int) {
|
||||
pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(durationType)).(*[]time.Duration)
|
||||
durs := *pdurs
|
||||
for i := 0; i < len(durs); i++ {
|
||||
dproto := durationProto(durs[i])
|
||||
size := Size(dproto)
|
||||
n += len(p.tagcode) + size + sizeVarint(uint64(size))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (o *Buffer) enc_slice_ref_duration(p *Properties, base structPointer) error {
|
||||
pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(durationType)).(*[]time.Duration)
|
||||
durs := *pdurs
|
||||
for i := 0; i < len(durs); i++ {
|
||||
dproto := durationProto(durs[i])
|
||||
data, err := Marshal(dproto)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.buf = append(o.buf, p.tagcode...)
|
||||
o.EncodeRawBytes(data)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -70,6 +70,10 @@ var (
|
|||
|
||||
// ErrNil is the error returned if Marshal is called with nil.
|
||||
ErrNil = errors.New("proto: Marshal called with nil")
|
||||
|
||||
// ErrTooLarge is the error returned if Marshal is called with a
|
||||
// message that encodes to >2GB.
|
||||
ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
|
||||
)
|
||||
|
||||
// The fundamental encoders that put bytes on the wire.
|
||||
|
|
@ -78,6 +82,10 @@ var (
|
|||
|
||||
const maxVarintBytes = 10 // maximum length of a varint
|
||||
|
||||
// maxMarshalSize is the largest allowed size of an encoded protobuf,
|
||||
// since C++ and Java use signed int32s for the size.
|
||||
const maxMarshalSize = 1<<31 - 1
|
||||
|
||||
// EncodeVarint returns the varint encoding of x.
|
||||
// This is the format for the
|
||||
// int32, int64, uint32, uint64, bool, and enum
|
||||
|
|
@ -226,10 +234,6 @@ func Marshal(pb Message) ([]byte, error) {
|
|||
}
|
||||
p := NewBuffer(nil)
|
||||
err := p.Marshal(pb)
|
||||
var state errorState
|
||||
if err != nil && !state.shouldContinue(err, nil) {
|
||||
return nil, err
|
||||
}
|
||||
if p.buf == nil && err == nil {
|
||||
// Return a non-nil slice on success.
|
||||
return []byte{}, nil
|
||||
|
|
@ -258,11 +262,8 @@ func (p *Buffer) Marshal(pb Message) error {
|
|||
// Can the object marshal itself?
|
||||
if m, ok := pb.(Marshaler); ok {
|
||||
data, err := m.Marshal()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.buf = append(p.buf, data...)
|
||||
return nil
|
||||
return err
|
||||
}
|
||||
|
||||
t, base, err := getbase(pb)
|
||||
|
|
@ -274,9 +275,12 @@ func (p *Buffer) Marshal(pb Message) error {
|
|||
}
|
||||
|
||||
if collectStats {
|
||||
stats.Encode++
|
||||
(stats).Encode++ // Parens are to work around a goimports bug.
|
||||
}
|
||||
|
||||
if len(p.buf) > maxMarshalSize {
|
||||
return ErrTooLarge
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
@ -298,7 +302,7 @@ func Size(pb Message) (n int) {
|
|||
}
|
||||
|
||||
if collectStats {
|
||||
stats.Size++
|
||||
(stats).Size++ // Parens are to work around a goimports bug.
|
||||
}
|
||||
|
||||
return
|
||||
|
|
@ -1003,7 +1007,6 @@ func size_slice_struct_message(p *Properties, base structPointer) (n int) {
|
|||
if p.isMarshaler {
|
||||
m := structPointer_Interface(structp, p.stype).(Marshaler)
|
||||
data, _ := m.Marshal()
|
||||
n += len(p.tagcode)
|
||||
n += sizeRawBytes(data)
|
||||
continue
|
||||
}
|
||||
|
|
@ -1062,10 +1065,32 @@ func size_slice_struct_group(p *Properties, base structPointer) (n int) {
|
|||
|
||||
// Encode an extension map.
|
||||
func (o *Buffer) enc_map(p *Properties, base structPointer) error {
|
||||
v := *structPointer_ExtMap(base, p.field)
|
||||
if err := encodeExtensionMap(v); err != nil {
|
||||
exts := structPointer_ExtMap(base, p.field)
|
||||
if err := encodeExtensionsMap(*exts); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return o.enc_map_body(*exts)
|
||||
}
|
||||
|
||||
func (o *Buffer) enc_exts(p *Properties, base structPointer) error {
|
||||
exts := structPointer_Extensions(base, p.field)
|
||||
|
||||
v, mu := exts.extensionsRead()
|
||||
if v == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
if err := encodeExtensionsMap(v); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return o.enc_map_body(v)
|
||||
}
|
||||
|
||||
func (o *Buffer) enc_map_body(v map[int32]Extension) error {
|
||||
// Fast-path for common cases: zero or one extensions.
|
||||
if len(v) <= 1 {
|
||||
for _, e := range v {
|
||||
|
|
@ -1088,8 +1113,13 @@ func (o *Buffer) enc_map(p *Properties, base structPointer) error {
|
|||
}
|
||||
|
||||
func size_map(p *Properties, base structPointer) int {
|
||||
v := *structPointer_ExtMap(base, p.field)
|
||||
return sizeExtensionMap(v)
|
||||
v := structPointer_ExtMap(base, p.field)
|
||||
return extensionsMapSize(*v)
|
||||
}
|
||||
|
||||
func size_exts(p *Properties, base structPointer) int {
|
||||
v := structPointer_Extensions(base, p.field)
|
||||
return extensionsSize(v)
|
||||
}
|
||||
|
||||
// Encode a map field.
|
||||
|
|
@ -1118,7 +1148,7 @@ func (o *Buffer) enc_new_map(p *Properties, base structPointer) error {
|
|||
if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil {
|
||||
if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil && err != ErrNil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
|
@ -1128,11 +1158,6 @@ func (o *Buffer) enc_new_map(p *Properties, base structPointer) error {
|
|||
for _, key := range v.MapKeys() {
|
||||
val := v.MapIndex(key)
|
||||
|
||||
// The only illegal map entry values are nil message pointers.
|
||||
if val.Kind() == reflect.Ptr && val.IsNil() {
|
||||
return errors.New("proto: map has nil element")
|
||||
}
|
||||
|
||||
keycopy.Set(key)
|
||||
valcopy.Set(val)
|
||||
|
||||
|
|
@ -1220,6 +1245,9 @@ func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error {
|
|||
return err
|
||||
}
|
||||
}
|
||||
if len(o.buf) > maxMarshalSize {
|
||||
return ErrTooLarge
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1236,6 +1264,9 @@ func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error {
|
|||
// Add unrecognized fields at the end.
|
||||
if prop.unrecField.IsValid() {
|
||||
v := *structPointer_Bytes(base, prop.unrecField)
|
||||
if len(o.buf)+len(v) > maxMarshalSize {
|
||||
return ErrTooLarge
|
||||
}
|
||||
if len(v) > 0 {
|
||||
o.buf = append(o.buf, v...)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
// Extensions for Protocol Buffers to create more go like structures.
|
||||
// Protocol Buffers for Go with Gadgets
|
||||
//
|
||||
// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
|
||||
// http://github.com/gogo/protobuf/gogoproto
|
||||
// Copyright (c) 2013, The GoGo Authors. All rights reserved.
|
||||
// http://github.com/gogo/protobuf
|
||||
//
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
|
|
@ -196,12 +196,10 @@ func size_ref_struct_message(p *Properties, base structPointer) int {
|
|||
// Encode a slice of references to message struct pointers ([]struct).
|
||||
func (o *Buffer) enc_slice_ref_struct_message(p *Properties, base structPointer) error {
|
||||
var state errorState
|
||||
ss := structPointer_GetStructPointer(base, p.field)
|
||||
ss1 := structPointer_GetRefStructPointer(ss, field(0))
|
||||
size := p.stype.Size()
|
||||
l := structPointer_Len(base, p.field)
|
||||
ss := structPointer_StructRefSlice(base, p.field, p.stype.Size())
|
||||
l := ss.Len()
|
||||
for i := 0; i < l; i++ {
|
||||
structp := structPointer_Add(ss1, field(uintptr(i)*size))
|
||||
structp := ss.Index(i)
|
||||
if structPointer_IsNil(structp) {
|
||||
return errRepeatedHasNil
|
||||
}
|
||||
|
|
@ -233,13 +231,11 @@ func (o *Buffer) enc_slice_ref_struct_message(p *Properties, base structPointer)
|
|||
|
||||
//TODO this is only copied, please fix this
|
||||
func size_slice_ref_struct_message(p *Properties, base structPointer) (n int) {
|
||||
ss := structPointer_GetStructPointer(base, p.field)
|
||||
ss1 := structPointer_GetRefStructPointer(ss, field(0))
|
||||
size := p.stype.Size()
|
||||
l := structPointer_Len(base, p.field)
|
||||
ss := structPointer_StructRefSlice(base, p.field, p.stype.Size())
|
||||
l := ss.Len()
|
||||
n += l * len(p.tagcode)
|
||||
for i := 0; i < l; i++ {
|
||||
structp := structPointer_Add(ss1, field(uintptr(i)*size))
|
||||
structp := ss.Index(i)
|
||||
if structPointer_IsNil(structp) {
|
||||
return // return the size up to this point
|
||||
}
|
||||
|
|
|
|||
|
|
@ -54,13 +54,17 @@ Equality is defined in this way:
|
|||
in a proto3 .proto file, fields are not "set"; specifically,
|
||||
zero length proto3 "bytes" fields are equal (nil == {}).
|
||||
- Two repeated fields are equal iff their lengths are the same,
|
||||
and their corresponding elements are equal (a "bytes" field,
|
||||
although represented by []byte, is not a repeated field)
|
||||
and their corresponding elements are equal. Note a "bytes" field,
|
||||
although represented by []byte, is not a repeated field and the
|
||||
rule for the scalar fields described above applies.
|
||||
- Two unset fields are equal.
|
||||
- Two unknown field sets are equal if their current
|
||||
encoded state is equal.
|
||||
- Two extension sets are equal iff they have corresponding
|
||||
elements that are pairwise equal.
|
||||
- Two map fields are equal iff their lengths are the same,
|
||||
and they contain the same set of elements. Zero-length map
|
||||
fields are equal.
|
||||
- Every other combination of things are not equal.
|
||||
|
||||
The return value is undefined if a and b are not protocol buffers.
|
||||
|
|
@ -121,9 +125,16 @@ func equalStruct(v1, v2 reflect.Value) bool {
|
|||
}
|
||||
}
|
||||
|
||||
if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() {
|
||||
em2 := v2.FieldByName("XXX_InternalExtensions")
|
||||
if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
|
||||
em2 := v2.FieldByName("XXX_extensions")
|
||||
if !equalExtensions(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
|
||||
if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
|
@ -184,6 +195,13 @@ func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
|
|||
}
|
||||
return true
|
||||
case reflect.Ptr:
|
||||
// Maps may have nil values in them, so check for nil.
|
||||
if v1.IsNil() && v2.IsNil() {
|
||||
return true
|
||||
}
|
||||
if v1.IsNil() != v2.IsNil() {
|
||||
return false
|
||||
}
|
||||
return equalAny(v1.Elem(), v2.Elem(), prop)
|
||||
case reflect.Slice:
|
||||
if v1.Type().Elem().Kind() == reflect.Uint8 {
|
||||
|
|
@ -223,8 +241,14 @@ func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
|
|||
}
|
||||
|
||||
// base is the struct type that the extensions are based on.
|
||||
// em1 and em2 are extension maps.
|
||||
func equalExtensions(base reflect.Type, em1, em2 map[int32]Extension) bool {
|
||||
// x1 and x2 are InternalExtensions.
|
||||
func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool {
|
||||
em1, _ := x1.extensionsRead()
|
||||
em2, _ := x2.extensionsRead()
|
||||
return equalExtMap(base, em1, em2)
|
||||
}
|
||||
|
||||
func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
|
||||
if len(em1) != len(em2) {
|
||||
return false
|
||||
}
|
||||
|
|
|
|||
|
|
@ -52,23 +52,112 @@ type ExtensionRange struct {
|
|||
Start, End int32 // both inclusive
|
||||
}
|
||||
|
||||
// extendableProto is an interface implemented by any protocol buffer that may be extended.
|
||||
// extendableProto is an interface implemented by any protocol buffer generated by the current
|
||||
// proto compiler that may be extended.
|
||||
type extendableProto interface {
|
||||
Message
|
||||
ExtensionRangeArray() []ExtensionRange
|
||||
extensionsWrite() map[int32]Extension
|
||||
extensionsRead() (map[int32]Extension, sync.Locker)
|
||||
}
|
||||
|
||||
type extensionsMap interface {
|
||||
extendableProto
|
||||
// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous
|
||||
// version of the proto compiler that may be extended.
|
||||
type extendableProtoV1 interface {
|
||||
Message
|
||||
ExtensionRangeArray() []ExtensionRange
|
||||
ExtensionMap() map[int32]Extension
|
||||
}
|
||||
|
||||
type extensionsBytes interface {
|
||||
extendableProto
|
||||
Message
|
||||
ExtensionRangeArray() []ExtensionRange
|
||||
GetExtensions() *[]byte
|
||||
}
|
||||
|
||||
// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto.
|
||||
type extensionAdapter struct {
|
||||
extendableProtoV1
|
||||
}
|
||||
|
||||
func (e extensionAdapter) extensionsWrite() map[int32]Extension {
|
||||
return e.ExtensionMap()
|
||||
}
|
||||
|
||||
func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) {
|
||||
return e.ExtensionMap(), notLocker{}
|
||||
}
|
||||
|
||||
// notLocker is a sync.Locker whose Lock and Unlock methods are nops.
|
||||
type notLocker struct{}
|
||||
|
||||
func (n notLocker) Lock() {}
|
||||
func (n notLocker) Unlock() {}
|
||||
|
||||
// extendable returns the extendableProto interface for the given generated proto message.
|
||||
// If the proto message has the old extension format, it returns a wrapper that implements
|
||||
// the extendableProto interface.
|
||||
func extendable(p interface{}) (extendableProto, bool) {
|
||||
if ep, ok := p.(extendableProto); ok {
|
||||
return ep, ok
|
||||
}
|
||||
if ep, ok := p.(extendableProtoV1); ok {
|
||||
return extensionAdapter{ep}, ok
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// XXX_InternalExtensions is an internal representation of proto extensions.
|
||||
//
|
||||
// Each generated message struct type embeds an anonymous XXX_InternalExtensions field,
|
||||
// thus gaining the unexported 'extensions' method, which can be called only from the proto package.
|
||||
//
|
||||
// The methods of XXX_InternalExtensions are not concurrency safe in general,
|
||||
// but calls to logically read-only methods such as has and get may be executed concurrently.
|
||||
type XXX_InternalExtensions struct {
|
||||
// The struct must be indirect so that if a user inadvertently copies a
|
||||
// generated message and its embedded XXX_InternalExtensions, they
|
||||
// avoid the mayhem of a copied mutex.
|
||||
//
|
||||
// The mutex serializes all logically read-only operations to p.extensionMap.
|
||||
// It is up to the client to ensure that write operations to p.extensionMap are
|
||||
// mutually exclusive with other accesses.
|
||||
p *struct {
|
||||
mu sync.Mutex
|
||||
extensionMap map[int32]Extension
|
||||
}
|
||||
}
|
||||
|
||||
// extensionsWrite returns the extension map, creating it on first use.
|
||||
func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension {
|
||||
if e.p == nil {
|
||||
e.p = new(struct {
|
||||
mu sync.Mutex
|
||||
extensionMap map[int32]Extension
|
||||
})
|
||||
e.p.extensionMap = make(map[int32]Extension)
|
||||
}
|
||||
return e.p.extensionMap
|
||||
}
|
||||
|
||||
// extensionsRead returns the extensions map for read-only use. It may be nil.
|
||||
// The caller must hold the returned mutex's lock when accessing Elements within the map.
|
||||
func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) {
|
||||
if e.p == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return e.p.extensionMap, &e.p.mu
|
||||
}
|
||||
|
||||
type extensionRange interface {
|
||||
Message
|
||||
ExtensionRangeArray() []ExtensionRange
|
||||
}
|
||||
|
||||
var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem()
|
||||
var extendableProtoV1Type = reflect.TypeOf((*extendableProtoV1)(nil)).Elem()
|
||||
var extendableBytesType = reflect.TypeOf((*extensionsBytes)(nil)).Elem()
|
||||
var extensionRangeType = reflect.TypeOf((*extensionRange)(nil)).Elem()
|
||||
|
||||
// ExtensionDesc represents an extension specification.
|
||||
// Used in generated code from the protocol compiler.
|
||||
|
|
@ -78,6 +167,7 @@ type ExtensionDesc struct {
|
|||
Field int32 // field number
|
||||
Name string // fully-qualified name of extension, for text formatting
|
||||
Tag string // protobuf tag style
|
||||
Filename string // name of the file in which the extension is defined
|
||||
}
|
||||
|
||||
func (ed *ExtensionDesc) repeated() bool {
|
||||
|
|
@ -101,20 +191,23 @@ type Extension struct {
|
|||
}
|
||||
|
||||
// SetRawExtension is for testing only.
|
||||
func SetRawExtension(base extendableProto, id int32, b []byte) {
|
||||
if ebase, ok := base.(extensionsMap); ok {
|
||||
ebase.ExtensionMap()[id] = Extension{enc: b}
|
||||
} else if ebase, ok := base.(extensionsBytes); ok {
|
||||
func SetRawExtension(base Message, id int32, b []byte) {
|
||||
if ebase, ok := base.(extensionsBytes); ok {
|
||||
clearExtension(base, id)
|
||||
ext := ebase.GetExtensions()
|
||||
*ext = append(*ext, b...)
|
||||
} else {
|
||||
panic("unreachable")
|
||||
return
|
||||
}
|
||||
epb, ok := extendable(base)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
extmap := epb.extensionsWrite()
|
||||
extmap[id] = Extension{enc: b}
|
||||
}
|
||||
|
||||
// isExtensionField returns true iff the given field number is in an extension range.
|
||||
func isExtensionField(pb extendableProto, field int32) bool {
|
||||
func isExtensionField(pb extensionRange, field int32) bool {
|
||||
for _, er := range pb.ExtensionRangeArray() {
|
||||
if er.Start <= field && field <= er.End {
|
||||
return true
|
||||
|
|
@ -125,8 +218,12 @@ func isExtensionField(pb extendableProto, field int32) bool {
|
|||
|
||||
// checkExtensionTypes checks that the given extension is valid for pb.
|
||||
func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
|
||||
var pbi interface{} = pb
|
||||
// Check the extended type.
|
||||
if a, b := reflect.TypeOf(pb), reflect.TypeOf(extension.ExtendedType); a != b {
|
||||
if ea, ok := pbi.(extensionAdapter); ok {
|
||||
pbi = ea.extendableProtoV1
|
||||
}
|
||||
if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b {
|
||||
return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String())
|
||||
}
|
||||
// Check the range.
|
||||
|
|
@ -172,43 +269,57 @@ func extensionProperties(ed *ExtensionDesc) *Properties {
|
|||
return prop
|
||||
}
|
||||
|
||||
// encodeExtensionMap encodes any unmarshaled (unencoded) extensions in m.
|
||||
func encodeExtensionMap(m map[int32]Extension) error {
|
||||
// encode encodes any unmarshaled (unencoded) extensions in e.
|
||||
func encodeExtensions(e *XXX_InternalExtensions) error {
|
||||
m, mu := e.extensionsRead()
|
||||
if m == nil {
|
||||
return nil // fast path
|
||||
}
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
return encodeExtensionsMap(m)
|
||||
}
|
||||
|
||||
// encode encodes any unmarshaled (unencoded) extensions in e.
|
||||
func encodeExtensionsMap(m map[int32]Extension) error {
|
||||
for k, e := range m {
|
||||
err := encodeExtension(&e)
|
||||
if err != nil {
|
||||
if e.value == nil || e.desc == nil {
|
||||
// Extension is only in its encoded form.
|
||||
continue
|
||||
}
|
||||
|
||||
// We don't skip extensions that have an encoded form set,
|
||||
// because the extension value may have been mutated after
|
||||
// the last time this function was called.
|
||||
|
||||
et := reflect.TypeOf(e.desc.ExtensionType)
|
||||
props := extensionProperties(e.desc)
|
||||
|
||||
p := NewBuffer(nil)
|
||||
// If e.value has type T, the encoder expects a *struct{ X T }.
|
||||
// Pass a *T with a zero field and hope it all works out.
|
||||
x := reflect.New(et)
|
||||
x.Elem().Set(reflect.ValueOf(e.value))
|
||||
if err := props.enc(p, props, toStructPointer(x)); err != nil {
|
||||
return err
|
||||
}
|
||||
e.enc = p.buf
|
||||
m[k] = e
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func encodeExtension(e *Extension) error {
|
||||
if e.value == nil || e.desc == nil {
|
||||
// Extension is only in its encoded form.
|
||||
return nil
|
||||
func extensionsSize(e *XXX_InternalExtensions) (n int) {
|
||||
m, mu := e.extensionsRead()
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
// We don't skip extensions that have an encoded form set,
|
||||
// because the extension value may have been mutated after
|
||||
// the last time this function was called.
|
||||
|
||||
et := reflect.TypeOf(e.desc.ExtensionType)
|
||||
props := extensionProperties(e.desc)
|
||||
|
||||
p := NewBuffer(nil)
|
||||
// If e.value has type T, the encoder expects a *struct{ X T }.
|
||||
// Pass a *T with a zero field and hope it all works out.
|
||||
x := reflect.New(et)
|
||||
x.Elem().Set(reflect.ValueOf(e.value))
|
||||
if err := props.enc(p, props, toStructPointer(x)); err != nil {
|
||||
return err
|
||||
}
|
||||
e.enc = p.buf
|
||||
return nil
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
return extensionsMapSize(m)
|
||||
}
|
||||
|
||||
func sizeExtensionMap(m map[int32]Extension) (n int) {
|
||||
func extensionsMapSize(m map[int32]Extension) (n int) {
|
||||
for _, e := range m {
|
||||
if e.value == nil || e.desc == nil {
|
||||
// Extension is only in its encoded form.
|
||||
|
|
@ -233,12 +344,8 @@ func sizeExtensionMap(m map[int32]Extension) (n int) {
|
|||
}
|
||||
|
||||
// HasExtension returns whether the given extension is present in pb.
|
||||
func HasExtension(pb extendableProto, extension *ExtensionDesc) bool {
|
||||
// TODO: Check types, field numbers, etc.?
|
||||
if epb, doki := pb.(extensionsMap); doki {
|
||||
_, ok := epb.ExtensionMap()[extension.Field]
|
||||
return ok
|
||||
} else if epb, doki := pb.(extensionsBytes); doki {
|
||||
func HasExtension(pb Message, extension *ExtensionDesc) bool {
|
||||
if epb, doki := pb.(extensionsBytes); doki {
|
||||
ext := epb.GetExtensions()
|
||||
buf := *ext
|
||||
o := 0
|
||||
|
|
@ -258,7 +365,19 @@ func HasExtension(pb extendableProto, extension *ExtensionDesc) bool {
|
|||
}
|
||||
return false
|
||||
}
|
||||
panic("unreachable")
|
||||
// TODO: Check types, field numbers, etc.?
|
||||
epb, ok := extendable(pb)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
extmap, mu := epb.extensionsRead()
|
||||
if extmap == nil {
|
||||
return false
|
||||
}
|
||||
mu.Lock()
|
||||
_, ok = extmap[extension.Field]
|
||||
mu.Unlock()
|
||||
return ok
|
||||
}
|
||||
|
||||
func deleteExtension(pb extensionsBytes, theFieldNum int32, offset int) int {
|
||||
|
|
@ -281,64 +400,32 @@ func deleteExtension(pb extensionsBytes, theFieldNum int32, offset int) int {
|
|||
return -1
|
||||
}
|
||||
|
||||
func clearExtension(pb extendableProto, fieldNum int32) {
|
||||
if epb, doki := pb.(extensionsMap); doki {
|
||||
delete(epb.ExtensionMap(), fieldNum)
|
||||
} else if epb, doki := pb.(extensionsBytes); doki {
|
||||
// ClearExtension removes the given extension from pb.
|
||||
func ClearExtension(pb Message, extension *ExtensionDesc) {
|
||||
clearExtension(pb, extension.Field)
|
||||
}
|
||||
|
||||
func clearExtension(pb Message, fieldNum int32) {
|
||||
if epb, doki := pb.(extensionsBytes); doki {
|
||||
offset := 0
|
||||
for offset != -1 {
|
||||
offset = deleteExtension(epb, fieldNum, offset)
|
||||
}
|
||||
} else {
|
||||
panic("unreachable")
|
||||
return
|
||||
}
|
||||
epb, ok := extendable(pb)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// ClearExtension removes the given extension from pb.
|
||||
func ClearExtension(pb extendableProto, extension *ExtensionDesc) {
|
||||
// TODO: Check types, field numbers, etc.?
|
||||
clearExtension(pb, extension.Field)
|
||||
extmap := epb.extensionsWrite()
|
||||
delete(extmap, fieldNum)
|
||||
}
|
||||
|
||||
// GetExtension parses and returns the given extension of pb.
|
||||
// If the extension is not present it returns ErrMissingExtension.
|
||||
func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, error) {
|
||||
if err := checkExtensionTypes(pb, extension); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if epb, doki := pb.(extensionsMap); doki {
|
||||
emap := epb.ExtensionMap()
|
||||
e, ok := emap[extension.Field]
|
||||
if !ok {
|
||||
// defaultExtensionValue returns the default value or
|
||||
// ErrMissingExtension if there is no default.
|
||||
return defaultExtensionValue(extension)
|
||||
}
|
||||
if e.value != nil {
|
||||
// Already decoded. Check the descriptor, though.
|
||||
if e.desc != extension {
|
||||
// This shouldn't happen. If it does, it means that
|
||||
// GetExtension was called twice with two different
|
||||
// descriptors with the same field number.
|
||||
return nil, errors.New("proto: descriptor conflict")
|
||||
}
|
||||
return e.value, nil
|
||||
}
|
||||
|
||||
v, err := decodeExtension(e.enc, extension)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Remember the decoded version and drop the encoded version.
|
||||
// That way it is safe to mutate what we return.
|
||||
e.value = v
|
||||
e.desc = extension
|
||||
e.enc = nil
|
||||
emap[extension.Field] = e
|
||||
return e.value, nil
|
||||
} else if epb, doki := pb.(extensionsBytes); doki {
|
||||
// If the extension is not present and has no default value it returns ErrMissingExtension.
|
||||
func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
|
||||
if epb, doki := pb.(extensionsBytes); doki {
|
||||
ext := epb.GetExtensions()
|
||||
o := 0
|
||||
for o < len(*ext) {
|
||||
|
|
@ -360,7 +447,50 @@ func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, er
|
|||
}
|
||||
return defaultExtensionValue(extension)
|
||||
}
|
||||
panic("unreachable")
|
||||
epb, ok := extendable(pb)
|
||||
if !ok {
|
||||
return nil, errors.New("proto: not an extendable proto")
|
||||
}
|
||||
if err := checkExtensionTypes(epb, extension); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
emap, mu := epb.extensionsRead()
|
||||
if emap == nil {
|
||||
return defaultExtensionValue(extension)
|
||||
}
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
e, ok := emap[extension.Field]
|
||||
if !ok {
|
||||
// defaultExtensionValue returns the default value or
|
||||
// ErrMissingExtension if there is no default.
|
||||
return defaultExtensionValue(extension)
|
||||
}
|
||||
|
||||
if e.value != nil {
|
||||
// Already decoded. Check the descriptor, though.
|
||||
if e.desc != extension {
|
||||
// This shouldn't happen. If it does, it means that
|
||||
// GetExtension was called twice with two different
|
||||
// descriptors with the same field number.
|
||||
return nil, errors.New("proto: descriptor conflict")
|
||||
}
|
||||
return e.value, nil
|
||||
}
|
||||
|
||||
v, err := decodeExtension(e.enc, extension)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Remember the decoded version and drop the encoded version.
|
||||
// That way it is safe to mutate what we return.
|
||||
e.value = v
|
||||
e.desc = extension
|
||||
e.enc = nil
|
||||
emap[extension.Field] = e
|
||||
return e.value, nil
|
||||
}
|
||||
|
||||
// defaultExtensionValue returns the default value for extension.
|
||||
|
|
@ -434,14 +564,9 @@ func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
|
|||
// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
|
||||
// The returned slice has the same length as es; missing extensions will appear as nil elements.
|
||||
func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
|
||||
epb, ok := pb.(extendableProto)
|
||||
if !ok {
|
||||
err = errors.New("proto: not an extendable proto")
|
||||
return
|
||||
}
|
||||
extensions = make([]interface{}, len(es))
|
||||
for i, e := range es {
|
||||
extensions[i], err = GetExtension(epb, e)
|
||||
extensions[i], err = GetExtension(pb, e)
|
||||
if err == ErrMissingExtension {
|
||||
err = nil
|
||||
}
|
||||
|
|
@ -452,9 +577,58 @@ func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, e
|
|||
return
|
||||
}
|
||||
|
||||
// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order.
|
||||
// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing
|
||||
// just the Field field, which defines the extension's field number.
|
||||
func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
|
||||
epb, ok := extendable(pb)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("proto: %T is not an extendable proto.Message", pb)
|
||||
}
|
||||
registeredExtensions := RegisteredExtensions(pb)
|
||||
|
||||
emap, mu := epb.extensionsRead()
|
||||
if emap == nil {
|
||||
return nil, nil
|
||||
}
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
extensions := make([]*ExtensionDesc, 0, len(emap))
|
||||
for extid, e := range emap {
|
||||
desc := e.desc
|
||||
if desc == nil {
|
||||
desc = registeredExtensions[extid]
|
||||
if desc == nil {
|
||||
desc = &ExtensionDesc{Field: extid}
|
||||
}
|
||||
}
|
||||
|
||||
extensions = append(extensions, desc)
|
||||
}
|
||||
return extensions, nil
|
||||
}
|
||||
|
||||
// SetExtension sets the specified extension of pb to the specified value.
|
||||
func SetExtension(pb extendableProto, extension *ExtensionDesc, value interface{}) error {
|
||||
if err := checkExtensionTypes(pb, extension); err != nil {
|
||||
func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
|
||||
if epb, doki := pb.(extensionsBytes); doki {
|
||||
ClearExtension(pb, extension)
|
||||
ext := epb.GetExtensions()
|
||||
et := reflect.TypeOf(extension.ExtensionType)
|
||||
props := extensionProperties(extension)
|
||||
p := NewBuffer(nil)
|
||||
x := reflect.New(et)
|
||||
x.Elem().Set(reflect.ValueOf(value))
|
||||
if err := props.enc(p, props, toStructPointer(x)); err != nil {
|
||||
return err
|
||||
}
|
||||
*ext = append(*ext, p.buf...)
|
||||
return nil
|
||||
}
|
||||
epb, ok := extendable(pb)
|
||||
if !ok {
|
||||
return errors.New("proto: not an extendable proto")
|
||||
}
|
||||
if err := checkExtensionTypes(epb, extension); err != nil {
|
||||
return err
|
||||
}
|
||||
typ := reflect.TypeOf(extension.ExtensionType)
|
||||
|
|
@ -469,26 +643,27 @@ func SetExtension(pb extendableProto, extension *ExtensionDesc, value interface{
|
|||
if reflect.ValueOf(value).IsNil() {
|
||||
return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
|
||||
}
|
||||
return setExtension(pb, extension, value)
|
||||
|
||||
extmap := epb.extensionsWrite()
|
||||
extmap[extension.Field] = Extension{desc: extension, value: value}
|
||||
return nil
|
||||
}
|
||||
|
||||
func setExtension(pb extendableProto, extension *ExtensionDesc, value interface{}) error {
|
||||
if epb, doki := pb.(extensionsMap); doki {
|
||||
epb.ExtensionMap()[extension.Field] = Extension{desc: extension, value: value}
|
||||
} else if epb, doki := pb.(extensionsBytes); doki {
|
||||
ClearExtension(pb, extension)
|
||||
// ClearAllExtensions clears all extensions from pb.
|
||||
func ClearAllExtensions(pb Message) {
|
||||
if epb, doki := pb.(extensionsBytes); doki {
|
||||
ext := epb.GetExtensions()
|
||||
et := reflect.TypeOf(extension.ExtensionType)
|
||||
props := extensionProperties(extension)
|
||||
p := NewBuffer(nil)
|
||||
x := reflect.New(et)
|
||||
x.Elem().Set(reflect.ValueOf(value))
|
||||
if err := props.enc(p, props, toStructPointer(x)); err != nil {
|
||||
return err
|
||||
}
|
||||
*ext = append(*ext, p.buf...)
|
||||
*ext = []byte{}
|
||||
return
|
||||
}
|
||||
epb, ok := extendable(pb)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
m := epb.extensionsWrite()
|
||||
for k := range m {
|
||||
delete(m, k)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// A global registry of extensions.
|
||||
|
|
|
|||
|
|
@ -1,5 +1,7 @@
|
|||
// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
|
||||
// http://github.com/gogo/protobuf/gogoproto
|
||||
// Protocol Buffers for Go with Gadgets
|
||||
//
|
||||
// Copyright (c) 2013, The GoGo Authors. All rights reserved.
|
||||
// http://github.com/gogo/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
|
|
@ -33,9 +35,10 @@ import (
|
|||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
func GetBoolExtension(pb extendableProto, extension *ExtensionDesc, ifnotset bool) bool {
|
||||
func GetBoolExtension(pb Message, extension *ExtensionDesc, ifnotset bool) bool {
|
||||
if reflect.ValueOf(pb).IsNil() {
|
||||
return ifnotset
|
||||
}
|
||||
|
|
@ -60,8 +63,12 @@ func (this *Extension) Compare(that *Extension) int {
|
|||
return bytes.Compare(this.enc, that.enc)
|
||||
}
|
||||
|
||||
func SizeOfInternalExtension(m extendableProto) (n int) {
|
||||
return SizeOfExtensionMap(m.extensionsWrite())
|
||||
}
|
||||
|
||||
func SizeOfExtensionMap(m map[int32]Extension) (n int) {
|
||||
return sizeExtensionMap(m)
|
||||
return extensionsMapSize(m)
|
||||
}
|
||||
|
||||
type sortableMapElem struct {
|
||||
|
|
@ -94,6 +101,10 @@ func (this sortableExtensions) String() string {
|
|||
return "map[" + strings.Join(ss, ",") + "]"
|
||||
}
|
||||
|
||||
func StringFromInternalExtension(m extendableProto) string {
|
||||
return StringFromExtensionsMap(m.extensionsWrite())
|
||||
}
|
||||
|
||||
func StringFromExtensionsMap(m map[int32]Extension) string {
|
||||
return newSortableExtensionsFromMap(m).String()
|
||||
}
|
||||
|
|
@ -106,8 +117,12 @@ func StringFromExtensionsBytes(ext []byte) string {
|
|||
return StringFromExtensionsMap(m)
|
||||
}
|
||||
|
||||
func EncodeInternalExtension(m extendableProto, data []byte) (n int, err error) {
|
||||
return EncodeExtensionMap(m.extensionsWrite(), data)
|
||||
}
|
||||
|
||||
func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) {
|
||||
if err := encodeExtensionMap(m); err != nil {
|
||||
if err := encodeExtensionsMap(m); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
keys := make([]int, 0, len(m))
|
||||
|
|
@ -125,7 +140,7 @@ func GetRawExtension(m map[int32]Extension, id int32) ([]byte, error) {
|
|||
if m[id].value == nil || m[id].desc == nil {
|
||||
return m[id].enc, nil
|
||||
}
|
||||
if err := encodeExtensionMap(m); err != nil {
|
||||
if err := encodeExtensionsMap(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m[id].enc, nil
|
||||
|
|
@ -189,15 +204,42 @@ func NewExtension(e []byte) Extension {
|
|||
return ee
|
||||
}
|
||||
|
||||
func AppendExtension(e extendableProto, tag int32, buf []byte) {
|
||||
if ee, eok := e.(extensionsMap); eok {
|
||||
ext := ee.ExtensionMap()[int32(tag)] // may be missing
|
||||
ext.enc = append(ext.enc, buf...)
|
||||
ee.ExtensionMap()[int32(tag)] = ext
|
||||
} else if ee, eok := e.(extensionsBytes); eok {
|
||||
func AppendExtension(e Message, tag int32, buf []byte) {
|
||||
if ee, eok := e.(extensionsBytes); eok {
|
||||
ext := ee.GetExtensions()
|
||||
*ext = append(*ext, buf...)
|
||||
return
|
||||
}
|
||||
if ee, eok := e.(extendableProto); eok {
|
||||
m := ee.extensionsWrite()
|
||||
ext := m[int32(tag)] // may be missing
|
||||
ext.enc = append(ext.enc, buf...)
|
||||
m[int32(tag)] = ext
|
||||
}
|
||||
}
|
||||
|
||||
func encodeExtension(e *Extension) error {
|
||||
if e.value == nil || e.desc == nil {
|
||||
// Extension is only in its encoded form.
|
||||
return nil
|
||||
}
|
||||
// We don't skip extensions that have an encoded form set,
|
||||
// because the extension value may have been mutated after
|
||||
// the last time this function was called.
|
||||
|
||||
et := reflect.TypeOf(e.desc.ExtensionType)
|
||||
props := extensionProperties(e.desc)
|
||||
|
||||
p := NewBuffer(nil)
|
||||
// If e.value has type T, the encoder expects a *struct{ X T }.
|
||||
// Pass a *T with a zero field and hope it all works out.
|
||||
x := reflect.New(et)
|
||||
x.Elem().Set(reflect.ValueOf(e.value))
|
||||
if err := props.enc(p, props, toStructPointer(x)); err != nil {
|
||||
return err
|
||||
}
|
||||
e.enc = p.buf
|
||||
return nil
|
||||
}
|
||||
|
||||
func (this Extension) GoString() string {
|
||||
|
|
@ -209,7 +251,7 @@ func (this Extension) GoString() string {
|
|||
return fmt.Sprintf("proto.NewExtension(%#v)", this.enc)
|
||||
}
|
||||
|
||||
func SetUnsafeExtension(pb extendableProto, fieldNum int32, value interface{}) error {
|
||||
func SetUnsafeExtension(pb Message, fieldNum int32, value interface{}) error {
|
||||
typ := reflect.TypeOf(pb).Elem()
|
||||
ext, ok := extensionMaps[typ]
|
||||
if !ok {
|
||||
|
|
@ -219,10 +261,10 @@ func SetUnsafeExtension(pb extendableProto, fieldNum int32, value interface{}) e
|
|||
if !ok {
|
||||
return errors.New("proto: bad extension number; not in declared ranges")
|
||||
}
|
||||
return setExtension(pb, desc, value)
|
||||
return SetExtension(pb, desc, value)
|
||||
}
|
||||
|
||||
func GetUnsafeExtension(pb extendableProto, fieldNum int32) (interface{}, error) {
|
||||
func GetUnsafeExtension(pb Message, fieldNum int32) (interface{}, error) {
|
||||
typ := reflect.TypeOf(pb).Elem()
|
||||
ext, ok := extensionMaps[typ]
|
||||
if !ok {
|
||||
|
|
@ -234,3 +276,19 @@ func GetUnsafeExtension(pb extendableProto, fieldNum int32) (interface{}, error)
|
|||
}
|
||||
return GetExtension(pb, desc)
|
||||
}
|
||||
|
||||
func NewUnsafeXXX_InternalExtensions(m map[int32]Extension) XXX_InternalExtensions {
|
||||
x := &XXX_InternalExtensions{
|
||||
p: new(struct {
|
||||
mu sync.Mutex
|
||||
extensionMap map[int32]Extension
|
||||
}),
|
||||
}
|
||||
x.p.extensionMap = m
|
||||
return *x
|
||||
}
|
||||
|
||||
func GetUnsafeExtensionsMap(extendable Message) map[int32]Extension {
|
||||
pb := extendable.(extendableProto)
|
||||
return pb.extensionsWrite()
|
||||
}
|
||||
|
|
|
|||
|
|
@ -308,7 +308,7 @@ func GetStats() Stats { return stats }
|
|||
// temporary Buffer and are fine for most applications.
|
||||
type Buffer struct {
|
||||
buf []byte // encode/decode byte stream
|
||||
index int // write point
|
||||
index int // read point
|
||||
|
||||
// pools of basic types to amortize allocation.
|
||||
bools []bool
|
||||
|
|
@ -889,6 +889,10 @@ func isProto3Zero(v reflect.Value) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
|
||||
// to assert that that code is compatible with this version of the proto package.
|
||||
const GoGoProtoPackageIsVersion2 = true
|
||||
|
||||
// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
|
||||
// to assert that that code is compatible with this version of the proto package.
|
||||
const GoGoProtoPackageIsVersion1 = true
|
||||
|
|
|
|||
|
|
@ -1,5 +1,7 @@
|
|||
// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
|
||||
// http://github.com/gogo/protobuf/gogoproto
|
||||
// Protocol Buffers for Go with Gadgets
|
||||
//
|
||||
// Copyright (c) 2013, The GoGo Authors. All rights reserved.
|
||||
// http://github.com/gogo/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
|
|
|
|||
|
|
@ -149,9 +149,21 @@ func skipVarint(buf []byte) []byte {
|
|||
|
||||
// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
|
||||
// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
|
||||
func MarshalMessageSet(m map[int32]Extension) ([]byte, error) {
|
||||
if err := encodeExtensionMap(m); err != nil {
|
||||
return nil, err
|
||||
func MarshalMessageSet(exts interface{}) ([]byte, error) {
|
||||
var m map[int32]Extension
|
||||
switch exts := exts.(type) {
|
||||
case *XXX_InternalExtensions:
|
||||
if err := encodeExtensions(exts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m, _ = exts.extensionsRead()
|
||||
case map[int32]Extension:
|
||||
if err := encodeExtensionsMap(exts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m = exts
|
||||
default:
|
||||
return nil, errors.New("proto: not an extension map")
|
||||
}
|
||||
|
||||
// Sort extension IDs to provide a deterministic encoding.
|
||||
|
|
@ -178,7 +190,17 @@ func MarshalMessageSet(m map[int32]Extension) ([]byte, error) {
|
|||
|
||||
// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
|
||||
// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
|
||||
func UnmarshalMessageSet(buf []byte, m map[int32]Extension) error {
|
||||
func UnmarshalMessageSet(buf []byte, exts interface{}) error {
|
||||
var m map[int32]Extension
|
||||
switch exts := exts.(type) {
|
||||
case *XXX_InternalExtensions:
|
||||
m = exts.extensionsWrite()
|
||||
case map[int32]Extension:
|
||||
m = exts
|
||||
default:
|
||||
return errors.New("proto: not an extension map")
|
||||
}
|
||||
|
||||
ms := new(messageSet)
|
||||
if err := Unmarshal(buf, ms); err != nil {
|
||||
return err
|
||||
|
|
@ -209,7 +231,16 @@ func UnmarshalMessageSet(buf []byte, m map[int32]Extension) error {
|
|||
|
||||
// MarshalMessageSetJSON encodes the extension map represented by m in JSON format.
|
||||
// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
|
||||
func MarshalMessageSetJSON(m map[int32]Extension) ([]byte, error) {
|
||||
func MarshalMessageSetJSON(exts interface{}) ([]byte, error) {
|
||||
var m map[int32]Extension
|
||||
switch exts := exts.(type) {
|
||||
case *XXX_InternalExtensions:
|
||||
m, _ = exts.extensionsRead()
|
||||
case map[int32]Extension:
|
||||
m = exts
|
||||
default:
|
||||
return nil, errors.New("proto: not an extension map")
|
||||
}
|
||||
var b bytes.Buffer
|
||||
b.WriteByte('{')
|
||||
|
||||
|
|
@ -252,7 +283,7 @@ func MarshalMessageSetJSON(m map[int32]Extension) ([]byte, error) {
|
|||
|
||||
// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.
|
||||
// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
|
||||
func UnmarshalMessageSetJSON(buf []byte, m map[int32]Extension) error {
|
||||
func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error {
|
||||
// Common-case fast path.
|
||||
if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) {
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@
|
|||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// +build appengine
|
||||
// +build appengine js
|
||||
|
||||
// This file contains an implementation of proto field accesses using package reflect.
|
||||
// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
|
||||
|
|
@ -139,6 +139,11 @@ func structPointer_StringSlice(p structPointer, f field) *[]string {
|
|||
return structPointer_ifield(p, f).(*[]string)
|
||||
}
|
||||
|
||||
// Extensions returns the address of an extension map field in the struct.
|
||||
func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions {
|
||||
return structPointer_ifield(p, f).(*XXX_InternalExtensions)
|
||||
}
|
||||
|
||||
// ExtMap returns the address of an extension map field in the struct.
|
||||
func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
|
||||
return structPointer_ifield(p, f).(*map[int32]Extension)
|
||||
|
|
|
|||
85
cluster-autoscaler/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go
generated
vendored
Normal file
85
cluster-autoscaler/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go
generated
vendored
Normal file
|
|
@ -0,0 +1,85 @@
|
|||
// Protocol Buffers for Go with Gadgets
|
||||
//
|
||||
// Copyright (c) 2016, The GoGo Authors. All rights reserved.
|
||||
// http://github.com/gogo/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// +build appengine js
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
)
|
||||
|
||||
func structPointer_FieldPointer(p structPointer, f field) structPointer {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
func appendStructPointer(base structPointer, f field, typ reflect.Type) structPointer {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
func structPointer_InterfaceAt(p structPointer, f field, t reflect.Type) interface{} {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
func structPointer_InterfaceRef(p structPointer, f field, t reflect.Type) interface{} {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
func structPointer_GetRefStructPointer(p structPointer, f field) structPointer {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
func structPointer_Add(p structPointer, size field) structPointer {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
func structPointer_Len(p structPointer, f field) int {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
func structPointer_GetSliceHeader(p structPointer, f field) *reflect.SliceHeader {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
func structPointer_Copy(oldptr structPointer, newptr structPointer, size int) {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
func structPointer_StructRefSlice(p structPointer, f field, size uintptr) *structRefSlice {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
type structRefSlice struct{}
|
||||
|
||||
func (v *structRefSlice) Len() int {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
func (v *structRefSlice) Index(i int) structPointer {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
|
@ -29,7 +29,7 @@
|
|||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// +build !appengine
|
||||
// +build !appengine,!js
|
||||
|
||||
// This file contains the implementation of the proto field accesses using package unsafe.
|
||||
|
||||
|
|
@ -126,6 +126,10 @@ func structPointer_StringSlice(p structPointer, f field) *[]string {
|
|||
}
|
||||
|
||||
// ExtMap returns the address of an extension map field in the struct.
|
||||
func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions {
|
||||
return (*XXX_InternalExtensions)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||
}
|
||||
|
||||
func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
|
||||
return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||
}
|
||||
|
|
|
|||
44
cluster-autoscaler/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go
generated
vendored
44
cluster-autoscaler/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go
generated
vendored
|
|
@ -1,5 +1,7 @@
|
|||
// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
|
||||
// http://github.com/gogo/protobuf/gogoproto
|
||||
// Protocol Buffers for Go with Gadgets
|
||||
//
|
||||
// Copyright (c) 2013, The GoGo Authors. All rights reserved.
|
||||
// http://github.com/gogo/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
|
|
@ -24,7 +26,7 @@
|
|||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// +build !appengine
|
||||
// +build !appengine,!js
|
||||
|
||||
// This file contains the implementation of the proto field accesses using package unsafe.
|
||||
|
||||
|
|
@ -70,16 +72,13 @@ func structPointer_Copy(oldptr structPointer, newptr structPointer, size int) {
|
|||
|
||||
func appendStructPointer(base structPointer, f field, typ reflect.Type) structPointer {
|
||||
size := typ.Elem().Size()
|
||||
oldHeader := structPointer_GetSliceHeader(base, f)
|
||||
newLen := oldHeader.Len + 1
|
||||
slice := reflect.MakeSlice(typ, newLen, newLen)
|
||||
bas := toStructPointer(slice)
|
||||
for i := 0; i < oldHeader.Len; i++ {
|
||||
newElemptr := uintptr(bas) + uintptr(i)*size
|
||||
oldElemptr := oldHeader.Data + uintptr(i)*size
|
||||
copyUintPtr(oldElemptr, newElemptr, int(size))
|
||||
}
|
||||
|
||||
oldHeader := structPointer_GetSliceHeader(base, f)
|
||||
oldSlice := reflect.NewAt(typ, unsafe.Pointer(oldHeader)).Elem()
|
||||
newLen := oldHeader.Len + 1
|
||||
newSlice := reflect.MakeSlice(typ, newLen, newLen)
|
||||
reflect.Copy(newSlice, oldSlice)
|
||||
bas := toStructPointer(newSlice)
|
||||
oldHeader.Data = uintptr(bas)
|
||||
oldHeader.Len = newLen
|
||||
oldHeader.Cap = newLen
|
||||
|
|
@ -106,3 +105,24 @@ func structPointer_Add(p structPointer, size field) structPointer {
|
|||
func structPointer_Len(p structPointer, f field) int {
|
||||
return len(*(*[]interface{})(unsafe.Pointer(structPointer_GetRefStructPointer(p, f))))
|
||||
}
|
||||
|
||||
func structPointer_StructRefSlice(p structPointer, f field, size uintptr) *structRefSlice {
|
||||
return &structRefSlice{p: p, f: f, size: size}
|
||||
}
|
||||
|
||||
// A structRefSlice represents a slice of structs (themselves submessages or groups).
|
||||
type structRefSlice struct {
|
||||
p structPointer
|
||||
f field
|
||||
size uintptr
|
||||
}
|
||||
|
||||
func (v *structRefSlice) Len() int {
|
||||
return structPointer_Len(v.p, v.f)
|
||||
}
|
||||
|
||||
func (v *structRefSlice) Index(i int) structPointer {
|
||||
ss := structPointer_GetStructPointer(v.p, v.f)
|
||||
ss1 := structPointer_GetRefStructPointer(ss, 0)
|
||||
return structPointer_Add(ss1, field(uintptr(i)*v.size))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
// Extensions for Protocol Buffers to create more go like structures.
|
||||
// Protocol Buffers for Go with Gadgets
|
||||
//
|
||||
// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
|
||||
// http://github.com/gogo/protobuf/gogoproto
|
||||
// Copyright (c) 2013, The GoGo Authors. All rights reserved.
|
||||
// http://github.com/gogo/protobuf
|
||||
//
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
|
|
@ -190,10 +190,11 @@ type Properties struct {
|
|||
proto3 bool // whether this is known to be a proto3 field; set for []byte only
|
||||
oneof bool // whether this is a oneof field
|
||||
|
||||
Default string // default value
|
||||
HasDefault bool // whether an explicit default was provided
|
||||
CustomType string
|
||||
def_uint64 uint64
|
||||
Default string // default value
|
||||
HasDefault bool // whether an explicit default was provided
|
||||
CustomType string
|
||||
StdTime bool
|
||||
StdDuration bool
|
||||
|
||||
enc encoder
|
||||
valEnc valueEncoder // set for bool and numeric types only
|
||||
|
|
@ -340,6 +341,10 @@ func (p *Properties) Parse(s string) {
|
|||
p.OrigName = strings.Split(f, "=")[1]
|
||||
case strings.HasPrefix(f, "customtype="):
|
||||
p.CustomType = strings.Split(f, "=")[1]
|
||||
case f == "stdtime":
|
||||
p.StdTime = true
|
||||
case f == "stdduration":
|
||||
p.StdDuration = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -355,11 +360,22 @@ func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lock
|
|||
p.enc = nil
|
||||
p.dec = nil
|
||||
p.size = nil
|
||||
if len(p.CustomType) > 0 {
|
||||
isMap := typ.Kind() == reflect.Map
|
||||
if len(p.CustomType) > 0 && !isMap {
|
||||
p.setCustomEncAndDec(typ)
|
||||
p.setTag(lockGetProp)
|
||||
return
|
||||
}
|
||||
if p.StdTime && !isMap {
|
||||
p.setTimeEncAndDec(typ)
|
||||
p.setTag(lockGetProp)
|
||||
return
|
||||
}
|
||||
if p.StdDuration && !isMap {
|
||||
p.setDurationEncAndDec(typ)
|
||||
p.setTag(lockGetProp)
|
||||
return
|
||||
}
|
||||
switch t1 := typ; t1.Kind() {
|
||||
default:
|
||||
fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1)
|
||||
|
|
@ -542,17 +558,13 @@ func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lock
|
|||
p.dec = (*Buffer).dec_slice_int64
|
||||
p.packedDec = (*Buffer).dec_slice_packed_int64
|
||||
case reflect.Uint8:
|
||||
p.enc = (*Buffer).enc_slice_byte
|
||||
p.dec = (*Buffer).dec_slice_byte
|
||||
p.size = size_slice_byte
|
||||
// This is a []byte, which is either a bytes field,
|
||||
// or the value of a map field. In the latter case,
|
||||
// we always encode an empty []byte, so we should not
|
||||
// use the proto3 enc/size funcs.
|
||||
// f == nil iff this is the key/value of a map field.
|
||||
if p.proto3 && f != nil {
|
||||
if p.proto3 {
|
||||
p.enc = (*Buffer).enc_proto3_slice_byte
|
||||
p.size = size_proto3_slice_byte
|
||||
} else {
|
||||
p.enc = (*Buffer).enc_slice_byte
|
||||
p.size = size_slice_byte
|
||||
}
|
||||
case reflect.Float32, reflect.Float64:
|
||||
switch t2.Bits() {
|
||||
|
|
@ -634,6 +646,10 @@ func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lock
|
|||
// so we need encoders for the pointer to this type.
|
||||
vtype = reflect.PtrTo(vtype)
|
||||
}
|
||||
|
||||
p.mvalprop.CustomType = p.CustomType
|
||||
p.mvalprop.StdDuration = p.StdDuration
|
||||
p.mvalprop.StdTime = p.StdTime
|
||||
p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
|
||||
}
|
||||
p.setTag(lockGetProp)
|
||||
|
|
@ -744,7 +760,9 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
|
|||
propertiesMap[t] = prop
|
||||
|
||||
// build properties
|
||||
prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType)
|
||||
prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) ||
|
||||
reflect.PtrTo(t).Implements(extendableProtoV1Type) ||
|
||||
reflect.PtrTo(t).Implements(extendableBytesType)
|
||||
prop.unrecField = invalidField
|
||||
prop.Prop = make([]*Properties, t.NumField())
|
||||
prop.order = make([]int, t.NumField())
|
||||
|
|
@ -756,7 +774,11 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
|
|||
name := f.Name
|
||||
p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
|
||||
|
||||
if f.Name == "XXX_extensions" { // special case
|
||||
if f.Name == "XXX_InternalExtensions" { // special case
|
||||
p.enc = (*Buffer).enc_exts
|
||||
p.dec = nil // not needed
|
||||
p.size = size_exts
|
||||
} else if f.Name == "XXX_extensions" { // special case
|
||||
if len(f.Tag.Get("protobuf")) > 0 {
|
||||
p.enc = (*Buffer).enc_ext_slice_byte
|
||||
p.dec = nil // not needed
|
||||
|
|
@ -766,13 +788,14 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
|
|||
p.dec = nil // not needed
|
||||
p.size = size_map
|
||||
}
|
||||
}
|
||||
if f.Name == "XXX_unrecognized" { // special case
|
||||
} else if f.Name == "XXX_unrecognized" { // special case
|
||||
prop.unrecField = toField(&f)
|
||||
}
|
||||
oneof := f.Tag.Get("protobuf_oneof") != "" // special case
|
||||
if oneof {
|
||||
oneof := f.Tag.Get("protobuf_oneof") // special case
|
||||
if oneof != "" {
|
||||
isOneofMessage = true
|
||||
// Oneof fields don't use the traditional protobuf tag.
|
||||
p.OrigName = oneof
|
||||
}
|
||||
prop.Prop[i] = p
|
||||
prop.order[i] = i
|
||||
|
|
@ -783,7 +806,7 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
|
|||
}
|
||||
print("\n")
|
||||
}
|
||||
if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && !oneof {
|
||||
if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && oneof == "" {
|
||||
fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]")
|
||||
}
|
||||
}
|
||||
|
|
@ -917,7 +940,29 @@ func RegisterType(x Message, name string) {
|
|||
}
|
||||
|
||||
// MessageName returns the fully-qualified proto name for the given message type.
|
||||
func MessageName(x Message) string { return revProtoTypes[reflect.TypeOf(x)] }
|
||||
func MessageName(x Message) string {
|
||||
type xname interface {
|
||||
XXX_MessageName() string
|
||||
}
|
||||
if m, ok := x.(xname); ok {
|
||||
return m.XXX_MessageName()
|
||||
}
|
||||
return revProtoTypes[reflect.TypeOf(x)]
|
||||
}
|
||||
|
||||
// MessageType returns the message type (pointer to struct) for a named message.
|
||||
func MessageType(name string) reflect.Type { return protoTypes[name] }
|
||||
|
||||
// A registry of all linked proto files.
|
||||
var (
|
||||
protoFiles = make(map[string][]byte) // file name => fileDescriptor
|
||||
)
|
||||
|
||||
// RegisterFile is called from generated code and maps from the
|
||||
// full file name of a .proto file to its compressed FileDescriptorProto.
|
||||
func RegisterFile(filename string, fileDescriptor []byte) {
|
||||
protoFiles[filename] = fileDescriptor
|
||||
}
|
||||
|
||||
// FileDescriptor returns the compressed FileDescriptorProto for a .proto file.
|
||||
func FileDescriptor(filename string) []byte { return protoFiles[filename] }
|
||||
|
|
|
|||
|
|
@ -1,5 +1,7 @@
|
|||
// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
|
||||
// http://github.com/gogo/protobuf/gogoproto
|
||||
// Protocol Buffers for Go with Gadgets
|
||||
//
|
||||
// Copyright (c) 2013, The GoGo Authors. All rights reserved.
|
||||
// http://github.com/gogo/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
|
|
@ -49,6 +51,51 @@ func (p *Properties) setCustomEncAndDec(typ reflect.Type) {
|
|||
}
|
||||
}
|
||||
|
||||
func (p *Properties) setDurationEncAndDec(typ reflect.Type) {
|
||||
if p.Repeated {
|
||||
if typ.Elem().Kind() == reflect.Ptr {
|
||||
p.enc = (*Buffer).enc_slice_duration
|
||||
p.dec = (*Buffer).dec_slice_duration
|
||||
p.size = size_slice_duration
|
||||
} else {
|
||||
p.enc = (*Buffer).enc_slice_ref_duration
|
||||
p.dec = (*Buffer).dec_slice_ref_duration
|
||||
p.size = size_slice_ref_duration
|
||||
}
|
||||
} else if typ.Kind() == reflect.Ptr {
|
||||
p.enc = (*Buffer).enc_duration
|
||||
p.dec = (*Buffer).dec_duration
|
||||
p.size = size_duration
|
||||
} else {
|
||||
p.enc = (*Buffer).enc_ref_duration
|
||||
p.dec = (*Buffer).dec_ref_duration
|
||||
p.size = size_ref_duration
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Properties) setTimeEncAndDec(typ reflect.Type) {
|
||||
if p.Repeated {
|
||||
if typ.Elem().Kind() == reflect.Ptr {
|
||||
p.enc = (*Buffer).enc_slice_time
|
||||
p.dec = (*Buffer).dec_slice_time
|
||||
p.size = size_slice_time
|
||||
} else {
|
||||
p.enc = (*Buffer).enc_slice_ref_time
|
||||
p.dec = (*Buffer).dec_slice_ref_time
|
||||
p.size = size_slice_ref_time
|
||||
}
|
||||
} else if typ.Kind() == reflect.Ptr {
|
||||
p.enc = (*Buffer).enc_time
|
||||
p.dec = (*Buffer).dec_time
|
||||
p.size = size_time
|
||||
} else {
|
||||
p.enc = (*Buffer).enc_ref_time
|
||||
p.dec = (*Buffer).dec_ref_time
|
||||
p.size = size_ref_time
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (p *Properties) setSliceOfNonPointerStructs(typ reflect.Type) {
|
||||
t2 := typ.Elem()
|
||||
p.sstype = typ
|
||||
|
|
|
|||
|
|
@ -1,5 +1,7 @@
|
|||
// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
|
||||
// http://github.com/gogo/protobuf/gogoproto
|
||||
// Protocol Buffers for Go with Gadgets
|
||||
//
|
||||
// Copyright (c) 2013, The GoGo Authors. All rights reserved.
|
||||
// http://github.com/gogo/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
// Extensions for Protocol Buffers to create more go like structures.
|
||||
// Protocol Buffers for Go with Gadgets
|
||||
//
|
||||
// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
|
||||
// http://github.com/gogo/protobuf/gogoproto
|
||||
// Copyright (c) 2013, The GoGo Authors. All rights reserved.
|
||||
// http://github.com/gogo/protobuf
|
||||
//
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
|
|
@ -50,6 +50,8 @@ import (
|
|||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
@ -159,7 +161,7 @@ func (w *textWriter) indent() { w.ind++ }
|
|||
|
||||
func (w *textWriter) unindent() {
|
||||
if w.ind == 0 {
|
||||
log.Printf("proto: textWriter unindented too far")
|
||||
log.Print("proto: textWriter unindented too far")
|
||||
return
|
||||
}
|
||||
w.ind--
|
||||
|
|
@ -180,7 +182,93 @@ type raw interface {
|
|||
Bytes() []byte
|
||||
}
|
||||
|
||||
func writeStruct(w *textWriter, sv reflect.Value) error {
|
||||
func requiresQuotes(u string) bool {
|
||||
// When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
|
||||
for _, ch := range u {
|
||||
switch {
|
||||
case ch == '.' || ch == '/' || ch == '_':
|
||||
continue
|
||||
case '0' <= ch && ch <= '9':
|
||||
continue
|
||||
case 'A' <= ch && ch <= 'Z':
|
||||
continue
|
||||
case 'a' <= ch && ch <= 'z':
|
||||
continue
|
||||
default:
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// isAny reports whether sv is a google.protobuf.Any message
|
||||
func isAny(sv reflect.Value) bool {
|
||||
type wkt interface {
|
||||
XXX_WellKnownType() string
|
||||
}
|
||||
t, ok := sv.Addr().Interface().(wkt)
|
||||
return ok && t.XXX_WellKnownType() == "Any"
|
||||
}
|
||||
|
||||
// writeProto3Any writes an expanded google.protobuf.Any message.
|
||||
//
|
||||
// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
|
||||
// required messages are not linked in).
|
||||
//
|
||||
// It returns (true, error) when sv was written in expanded format or an error
|
||||
// was encountered.
|
||||
func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) {
|
||||
turl := sv.FieldByName("TypeUrl")
|
||||
val := sv.FieldByName("Value")
|
||||
if !turl.IsValid() || !val.IsValid() {
|
||||
return true, errors.New("proto: invalid google.protobuf.Any message")
|
||||
}
|
||||
|
||||
b, ok := val.Interface().([]byte)
|
||||
if !ok {
|
||||
return true, errors.New("proto: invalid google.protobuf.Any message")
|
||||
}
|
||||
|
||||
parts := strings.Split(turl.String(), "/")
|
||||
mt := MessageType(parts[len(parts)-1])
|
||||
if mt == nil {
|
||||
return false, nil
|
||||
}
|
||||
m := reflect.New(mt.Elem())
|
||||
if err := Unmarshal(b, m.Interface().(Message)); err != nil {
|
||||
return false, nil
|
||||
}
|
||||
w.Write([]byte("["))
|
||||
u := turl.String()
|
||||
if requiresQuotes(u) {
|
||||
writeString(w, u)
|
||||
} else {
|
||||
w.Write([]byte(u))
|
||||
}
|
||||
if w.compact {
|
||||
w.Write([]byte("]:<"))
|
||||
} else {
|
||||
w.Write([]byte("]: <\n"))
|
||||
w.ind++
|
||||
}
|
||||
if err := tm.writeStruct(w, m.Elem()); err != nil {
|
||||
return true, err
|
||||
}
|
||||
if w.compact {
|
||||
w.Write([]byte("> "))
|
||||
} else {
|
||||
w.ind--
|
||||
w.Write([]byte(">\n"))
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
|
||||
if tm.ExpandAny && isAny(sv) {
|
||||
if canExpand, err := tm.writeProto3Any(w, sv); canExpand {
|
||||
return err
|
||||
}
|
||||
}
|
||||
st := sv.Type()
|
||||
sprops := GetProperties(st)
|
||||
for i := 0; i < sv.NumField(); i++ {
|
||||
|
|
@ -233,10 +321,10 @@ func writeStruct(w *textWriter, sv reflect.Value) error {
|
|||
continue
|
||||
}
|
||||
if len(props.Enum) > 0 {
|
||||
if err := writeEnum(w, v, props); err != nil {
|
||||
if err := tm.writeEnum(w, v, props); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if err := writeAny(w, v, props); err != nil {
|
||||
} else if err := tm.writeAny(w, v, props); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
|
|
@ -278,7 +366,7 @@ func writeStruct(w *textWriter, sv reflect.Value) error {
|
|||
return err
|
||||
}
|
||||
}
|
||||
if err := writeAny(w, key, props.mkeyprop); err != nil {
|
||||
if err := tm.writeAny(w, key, props.mkeyprop); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
|
|
@ -295,7 +383,7 @@ func writeStruct(w *textWriter, sv reflect.Value) error {
|
|||
return err
|
||||
}
|
||||
}
|
||||
if err := writeAny(w, val, props.mvalprop); err != nil {
|
||||
if err := tm.writeAny(w, val, props.mvalprop); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
|
|
@ -367,10 +455,10 @@ func writeStruct(w *textWriter, sv reflect.Value) error {
|
|||
}
|
||||
|
||||
if len(props.Enum) > 0 {
|
||||
if err := writeEnum(w, fv, props); err != nil {
|
||||
if err := tm.writeEnum(w, fv, props); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if err := writeAny(w, fv, props); err != nil {
|
||||
} else if err := tm.writeAny(w, fv, props); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
@ -387,8 +475,8 @@ func writeStruct(w *textWriter, sv reflect.Value) error {
|
|||
pv = reflect.New(sv.Type())
|
||||
pv.Elem().Set(sv)
|
||||
}
|
||||
if pv.Type().Implements(extendableProtoType) {
|
||||
if err := writeExtensions(w, pv); err != nil {
|
||||
if pv.Type().Implements(extensionRangeType) {
|
||||
if err := tm.writeExtensions(w, pv); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
|
@ -418,20 +506,45 @@ func writeRaw(w *textWriter, b []byte) error {
|
|||
}
|
||||
|
||||
// writeAny writes an arbitrary field.
|
||||
func writeAny(w *textWriter, v reflect.Value, props *Properties) error {
|
||||
func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
|
||||
v = reflect.Indirect(v)
|
||||
|
||||
if props != nil && len(props.CustomType) > 0 {
|
||||
custom, ok := v.Interface().(Marshaler)
|
||||
if ok {
|
||||
data, err := custom.Marshal()
|
||||
if props != nil {
|
||||
if len(props.CustomType) > 0 {
|
||||
custom, ok := v.Interface().(Marshaler)
|
||||
if ok {
|
||||
data, err := custom.Marshal()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := writeString(w, string(data)); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
} else if props.StdTime {
|
||||
t, ok := v.Interface().(time.Time)
|
||||
if !ok {
|
||||
return fmt.Errorf("stdtime is not time.Time, but %T", v.Interface())
|
||||
}
|
||||
tproto, err := timestampProto(t)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := writeString(w, string(data)); err != nil {
|
||||
return err
|
||||
props.StdTime = false
|
||||
err = tm.writeAny(w, reflect.ValueOf(tproto), props)
|
||||
props.StdTime = true
|
||||
return err
|
||||
} else if props.StdDuration {
|
||||
d, ok := v.Interface().(time.Duration)
|
||||
if !ok {
|
||||
return fmt.Errorf("stdtime is not time.Duration, but %T", v.Interface())
|
||||
}
|
||||
return nil
|
||||
dproto := durationProto(d)
|
||||
props.StdDuration = false
|
||||
err := tm.writeAny(w, reflect.ValueOf(dproto), props)
|
||||
props.StdDuration = true
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -481,15 +594,15 @@ func writeAny(w *textWriter, v reflect.Value, props *Properties) error {
|
|||
}
|
||||
}
|
||||
w.indent()
|
||||
if tm, ok := v.Interface().(encoding.TextMarshaler); ok {
|
||||
text, err := tm.MarshalText()
|
||||
if etm, ok := v.Interface().(encoding.TextMarshaler); ok {
|
||||
text, err := etm.MarshalText()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err = w.Write(text); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if err := writeStruct(w, v); err != nil {
|
||||
} else if err := tm.writeStruct(w, v); err != nil {
|
||||
return err
|
||||
}
|
||||
w.unindent()
|
||||
|
|
@ -633,30 +746,39 @@ func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
|||
|
||||
// writeExtensions writes all the extensions in pv.
|
||||
// pv is assumed to be a pointer to a protocol message struct that is extendable.
|
||||
func writeExtensions(w *textWriter, pv reflect.Value) error {
|
||||
func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error {
|
||||
emap := extensionMaps[pv.Type().Elem()]
|
||||
ep := pv.Interface().(extendableProto)
|
||||
e := pv.Interface().(Message)
|
||||
|
||||
// Order the extensions by ID.
|
||||
// This isn't strictly necessary, but it will give us
|
||||
// canonical output, which will also make testing easier.
|
||||
var m map[int32]Extension
|
||||
if em, ok := ep.(extensionsMap); ok {
|
||||
m = em.ExtensionMap()
|
||||
} else if em, ok := ep.(extensionsBytes); ok {
|
||||
var mu sync.Locker
|
||||
if em, ok := e.(extensionsBytes); ok {
|
||||
eb := em.GetExtensions()
|
||||
var err error
|
||||
m, err = BytesToExtensionsMap(*eb)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mu = notLocker{}
|
||||
} else if _, ok := e.(extendableProto); ok {
|
||||
ep, _ := extendable(e)
|
||||
m, mu = ep.extensionsRead()
|
||||
if m == nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Order the extensions by ID.
|
||||
// This isn't strictly necessary, but it will give us
|
||||
// canonical output, which will also make testing easier.
|
||||
|
||||
mu.Lock()
|
||||
ids := make([]int32, 0, len(m))
|
||||
for id := range m {
|
||||
ids = append(ids, id)
|
||||
}
|
||||
sort.Sort(int32Slice(ids))
|
||||
mu.Unlock()
|
||||
|
||||
for _, extNum := range ids {
|
||||
ext := m[extNum]
|
||||
|
|
@ -672,20 +794,20 @@ func writeExtensions(w *textWriter, pv reflect.Value) error {
|
|||
continue
|
||||
}
|
||||
|
||||
pb, err := GetExtension(ep, desc)
|
||||
pb, err := GetExtension(e, desc)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed getting extension: %v", err)
|
||||
}
|
||||
|
||||
// Repeated extensions will appear as a slice.
|
||||
if !desc.repeated() {
|
||||
if err := writeExtension(w, desc.Name, pb); err != nil {
|
||||
if err := tm.writeExtension(w, desc.Name, pb); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
v := reflect.ValueOf(pb)
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
if err := writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
|
||||
if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
|
@ -694,7 +816,7 @@ func writeExtensions(w *textWriter, pv reflect.Value) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func writeExtension(w *textWriter, name string, pb interface{}) error {
|
||||
func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error {
|
||||
if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -703,7 +825,7 @@ func writeExtension(w *textWriter, name string, pb interface{}) error {
|
|||
return err
|
||||
}
|
||||
}
|
||||
if err := writeAny(w, reflect.ValueOf(pb), nil); err != nil {
|
||||
if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
|
|
@ -730,12 +852,13 @@ func (w *textWriter) writeIndent() {
|
|||
|
||||
// TextMarshaler is a configurable text format marshaler.
|
||||
type TextMarshaler struct {
|
||||
Compact bool // use compact text format (one line).
|
||||
Compact bool // use compact text format (one line).
|
||||
ExpandAny bool // expand google.protobuf.Any messages of known types
|
||||
}
|
||||
|
||||
// Marshal writes a given protocol buffer in text format.
|
||||
// The only errors returned are from w.
|
||||
func (m *TextMarshaler) Marshal(w io.Writer, pb Message) error {
|
||||
func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
|
||||
val := reflect.ValueOf(pb)
|
||||
if pb == nil || val.IsNil() {
|
||||
w.Write([]byte("<nil>"))
|
||||
|
|
@ -750,11 +873,11 @@ func (m *TextMarshaler) Marshal(w io.Writer, pb Message) error {
|
|||
aw := &textWriter{
|
||||
w: ww,
|
||||
complete: true,
|
||||
compact: m.Compact,
|
||||
compact: tm.Compact,
|
||||
}
|
||||
|
||||
if tm, ok := pb.(encoding.TextMarshaler); ok {
|
||||
text, err := tm.MarshalText()
|
||||
if etm, ok := pb.(encoding.TextMarshaler); ok {
|
||||
text, err := etm.MarshalText()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -768,7 +891,7 @@ func (m *TextMarshaler) Marshal(w io.Writer, pb Message) error {
|
|||
}
|
||||
// Dereference the received pointer so we don't have outer < and >.
|
||||
v := reflect.Indirect(val)
|
||||
if err := writeStruct(aw, v); err != nil {
|
||||
if err := tm.writeStruct(aw, v); err != nil {
|
||||
return err
|
||||
}
|
||||
if bw != nil {
|
||||
|
|
@ -778,9 +901,9 @@ func (m *TextMarshaler) Marshal(w io.Writer, pb Message) error {
|
|||
}
|
||||
|
||||
// Text is the same as Marshal, but returns the string directly.
|
||||
func (m *TextMarshaler) Text(pb Message) string {
|
||||
func (tm *TextMarshaler) Text(pb Message) string {
|
||||
var buf bytes.Buffer
|
||||
m.Marshal(&buf, pb)
|
||||
tm.Marshal(&buf, pb)
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,5 +1,7 @@
|
|||
// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
|
||||
// http://github.com/gogo/protobuf/gogoproto
|
||||
// Protocol Buffers for Go with Gadgets
|
||||
//
|
||||
// Copyright (c) 2013, The GoGo Authors. All rights reserved.
|
||||
// http://github.com/gogo/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
|
|
@ -31,10 +33,10 @@ import (
|
|||
"reflect"
|
||||
)
|
||||
|
||||
func writeEnum(w *textWriter, v reflect.Value, props *Properties) error {
|
||||
func (tm *TextMarshaler) writeEnum(w *textWriter, v reflect.Value, props *Properties) error {
|
||||
m, ok := enumStringMaps[props.Enum]
|
||||
if !ok {
|
||||
if err := writeAny(w, v, props); err != nil {
|
||||
if err := tm.writeAny(w, v, props); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
|
@ -46,7 +48,7 @@ func writeEnum(w *textWriter, v reflect.Value, props *Properties) error {
|
|||
}
|
||||
s, ok := m[key]
|
||||
if !ok {
|
||||
if err := writeAny(w, v, props); err != nil {
|
||||
if err := tm.writeAny(w, v, props); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
// Extensions for Protocol Buffers to create more go like structures.
|
||||
// Protocol Buffers for Go with Gadgets
|
||||
//
|
||||
// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
|
||||
// http://github.com/gogo/protobuf/gogoproto
|
||||
// Copyright (c) 2013, The GoGo Authors. All rights reserved.
|
||||
// http://github.com/gogo/protobuf
|
||||
//
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
|
|
@ -46,9 +46,13 @@ import (
|
|||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// Error string emitted when deserializing Any and fields are already set
|
||||
const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set"
|
||||
|
||||
type ParseError struct {
|
||||
Message string
|
||||
Line int // 1-based line number
|
||||
|
|
@ -168,7 +172,7 @@ func (p *textParser) advance() {
|
|||
p.cur.offset, p.cur.line = p.offset, p.line
|
||||
p.cur.unquoted = ""
|
||||
switch p.s[0] {
|
||||
case '<', '>', '{', '}', ':', '[', ']', ';', ',':
|
||||
case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
|
||||
// Single symbol
|
||||
p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
|
||||
case '"', '\'':
|
||||
|
|
@ -456,7 +460,10 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
|
|||
fieldSet := make(map[string]bool)
|
||||
// A struct is a sequence of "name: value", terminated by one of
|
||||
// '>' or '}', or the end of the input. A name may also be
|
||||
// "[extension]".
|
||||
// "[extension]" or "[type/url]".
|
||||
//
|
||||
// The whole struct can also be an expanded Any message, like:
|
||||
// [type/url] < ... struct contents ... >
|
||||
for {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
|
|
@ -466,33 +473,74 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
|
|||
break
|
||||
}
|
||||
if tok.value == "[" {
|
||||
// Looks like an extension.
|
||||
// Looks like an extension or an Any.
|
||||
//
|
||||
// TODO: Check whether we need to handle
|
||||
// namespace rooted names (e.g. ".something.Foo").
|
||||
tok = p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
extName, err := p.consumeExtName()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if s := strings.LastIndex(extName, "/"); s >= 0 {
|
||||
// If it contains a slash, it's an Any type URL.
|
||||
messageName := extName[s+1:]
|
||||
mt := MessageType(messageName)
|
||||
if mt == nil {
|
||||
return p.errorf("unrecognized message %q in google.protobuf.Any", messageName)
|
||||
}
|
||||
tok = p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
// consume an optional colon
|
||||
if tok.value == ":" {
|
||||
tok = p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
}
|
||||
var terminator string
|
||||
switch tok.value {
|
||||
case "<":
|
||||
terminator = ">"
|
||||
case "{":
|
||||
terminator = "}"
|
||||
default:
|
||||
return p.errorf("expected '{' or '<', found %q", tok.value)
|
||||
}
|
||||
v := reflect.New(mt.Elem())
|
||||
if pe := p.readStruct(v.Elem(), terminator); pe != nil {
|
||||
return pe
|
||||
}
|
||||
b, err := Marshal(v.Interface().(Message))
|
||||
if err != nil {
|
||||
return p.errorf("failed to marshal message of type %q: %v", messageName, err)
|
||||
}
|
||||
if fieldSet["type_url"] {
|
||||
return p.errorf(anyRepeatedlyUnpacked, "type_url")
|
||||
}
|
||||
if fieldSet["value"] {
|
||||
return p.errorf(anyRepeatedlyUnpacked, "value")
|
||||
}
|
||||
sv.FieldByName("TypeUrl").SetString(extName)
|
||||
sv.FieldByName("Value").SetBytes(b)
|
||||
fieldSet["type_url"] = true
|
||||
fieldSet["value"] = true
|
||||
continue
|
||||
}
|
||||
|
||||
var desc *ExtensionDesc
|
||||
// This could be faster, but it's functional.
|
||||
// TODO: Do something smarter than a linear scan.
|
||||
for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
|
||||
if d.Name == tok.value {
|
||||
if d.Name == extName {
|
||||
desc = d
|
||||
break
|
||||
}
|
||||
}
|
||||
if desc == nil {
|
||||
return p.errorf("unrecognized extension %q", tok.value)
|
||||
}
|
||||
// Check the extension terminator.
|
||||
tok = p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value != "]" {
|
||||
return p.errorf("unrecognized extension terminator %q", tok.value)
|
||||
return p.errorf("unrecognized extension %q", extName)
|
||||
}
|
||||
|
||||
props := &Properties{}
|
||||
|
|
@ -519,7 +567,7 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
|
|||
}
|
||||
reqFieldErr = err
|
||||
}
|
||||
ep := sv.Addr().Interface().(extendableProto)
|
||||
ep := sv.Addr().Interface().(Message)
|
||||
if !rep {
|
||||
SetExtension(ep, desc, ext.Interface())
|
||||
} else {
|
||||
|
|
@ -550,7 +598,11 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
|
|||
props = oop.Prop
|
||||
nv := reflect.New(oop.Type.Elem())
|
||||
dst = nv.Elem().Field(0)
|
||||
sv.Field(oop.Field).Set(nv)
|
||||
field := sv.Field(oop.Field)
|
||||
if !field.IsNil() {
|
||||
return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name)
|
||||
}
|
||||
field.Set(nv)
|
||||
}
|
||||
if !dst.IsValid() {
|
||||
return p.errorf("unknown field name %q in %v", name, st)
|
||||
|
|
@ -571,8 +623,9 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
|
|||
|
||||
// The map entry should be this sequence of tokens:
|
||||
// < key : KEY value : VALUE >
|
||||
// Technically the "key" and "value" could come in any order,
|
||||
// but in practice they won't.
|
||||
// However, implementations may omit key or value, and technically
|
||||
// we should support them in any order. See b/28924776 for a time
|
||||
// this went wrong.
|
||||
|
||||
tok := p.next()
|
||||
var terminator string
|
||||
|
|
@ -584,32 +637,39 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
|
|||
default:
|
||||
return p.errorf("expected '{' or '<', found %q", tok.value)
|
||||
}
|
||||
if err := p.consumeToken("key"); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.consumeToken(":"); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.readAny(key, props.mkeyprop); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.consumeOptionalSeparator(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.consumeToken("value"); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.readAny(val, props.mvalprop); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.consumeOptionalSeparator(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.consumeToken(terminator); err != nil {
|
||||
return err
|
||||
for {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value == terminator {
|
||||
break
|
||||
}
|
||||
switch tok.value {
|
||||
case "key":
|
||||
if err := p.consumeToken(":"); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.readAny(key, props.mkeyprop); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.consumeOptionalSeparator(); err != nil {
|
||||
return err
|
||||
}
|
||||
case "value":
|
||||
if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.readAny(val, props.mvalprop); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.consumeOptionalSeparator(); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
p.back()
|
||||
return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
|
||||
}
|
||||
}
|
||||
|
||||
dst.SetMapIndex(key, val)
|
||||
|
|
@ -632,7 +692,8 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
|
|||
return err
|
||||
}
|
||||
reqFieldErr = err
|
||||
} else if props.Required {
|
||||
}
|
||||
if props.Required {
|
||||
reqCount--
|
||||
}
|
||||
|
||||
|
|
@ -648,6 +709,35 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
|
|||
return reqFieldErr
|
||||
}
|
||||
|
||||
// consumeExtName consumes extension name or expanded Any type URL and the
|
||||
// following ']'. It returns the name or URL consumed.
|
||||
func (p *textParser) consumeExtName() (string, error) {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return "", tok.err
|
||||
}
|
||||
|
||||
// If extension name or type url is quoted, it's a single token.
|
||||
if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
|
||||
name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return name, p.consumeToken("]")
|
||||
}
|
||||
|
||||
// Consume everything up to "]"
|
||||
var parts []string
|
||||
for tok.value != "]" {
|
||||
parts = append(parts, tok.value)
|
||||
tok = p.next()
|
||||
if tok.err != nil {
|
||||
return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
|
||||
}
|
||||
}
|
||||
return strings.Join(parts, ""), nil
|
||||
}
|
||||
|
||||
// consumeOptionalSeparator consumes an optional semicolon or comma.
|
||||
// It is used in readStruct to provide backward compatibility.
|
||||
func (p *textParser) consumeOptionalSeparator() error {
|
||||
|
|
@ -708,6 +798,80 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error {
|
|||
}
|
||||
return nil
|
||||
}
|
||||
if props.StdTime {
|
||||
fv := v
|
||||
p.back()
|
||||
props.StdTime = false
|
||||
tproto := ×tamp{}
|
||||
err := p.readAny(reflect.ValueOf(tproto).Elem(), props)
|
||||
props.StdTime = true
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tim, err := timestampFromProto(tproto)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if props.Repeated {
|
||||
t := reflect.TypeOf(v.Interface())
|
||||
if t.Kind() == reflect.Slice {
|
||||
if t.Elem().Kind() == reflect.Ptr {
|
||||
ts := fv.Interface().([]*time.Time)
|
||||
ts = append(ts, &tim)
|
||||
fv.Set(reflect.ValueOf(ts))
|
||||
return nil
|
||||
} else {
|
||||
ts := fv.Interface().([]time.Time)
|
||||
ts = append(ts, tim)
|
||||
fv.Set(reflect.ValueOf(ts))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr {
|
||||
v.Set(reflect.ValueOf(&tim))
|
||||
} else {
|
||||
v.Set(reflect.Indirect(reflect.ValueOf(&tim)))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if props.StdDuration {
|
||||
fv := v
|
||||
p.back()
|
||||
props.StdDuration = false
|
||||
dproto := &duration{}
|
||||
err := p.readAny(reflect.ValueOf(dproto).Elem(), props)
|
||||
props.StdDuration = true
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dur, err := durationFromProto(dproto)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if props.Repeated {
|
||||
t := reflect.TypeOf(v.Interface())
|
||||
if t.Kind() == reflect.Slice {
|
||||
if t.Elem().Kind() == reflect.Ptr {
|
||||
ds := fv.Interface().([]*time.Duration)
|
||||
ds = append(ds, &dur)
|
||||
fv.Set(reflect.ValueOf(ds))
|
||||
return nil
|
||||
} else {
|
||||
ds := fv.Interface().([]time.Duration)
|
||||
ds = append(ds, dur)
|
||||
fv.Set(reflect.ValueOf(ds))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr {
|
||||
v.Set(reflect.ValueOf(&dur))
|
||||
} else {
|
||||
v.Set(reflect.Indirect(reflect.ValueOf(&dur)))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
switch fv := v; fv.Kind() {
|
||||
case reflect.Slice:
|
||||
at := v.Type()
|
||||
|
|
@ -750,12 +914,12 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error {
|
|||
fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
|
||||
return p.readAny(fv.Index(fv.Len()-1), props)
|
||||
case reflect.Bool:
|
||||
// Either "true", "false", 1 or 0.
|
||||
// true/1/t/True or false/f/0/False.
|
||||
switch tok.value {
|
||||
case "true", "1":
|
||||
case "true", "1", "t", "True":
|
||||
fv.SetBool(true)
|
||||
return nil
|
||||
case "false", "0":
|
||||
case "false", "0", "f", "False":
|
||||
fv.SetBool(false)
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
113
cluster-autoscaler/vendor/github.com/gogo/protobuf/proto/timestamp.go
generated
vendored
Normal file
113
cluster-autoscaler/vendor/github.com/gogo/protobuf/proto/timestamp.go
generated
vendored
Normal file
|
|
@ -0,0 +1,113 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
// This file implements operations on google.protobuf.Timestamp.
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// Seconds field of the earliest valid Timestamp.
|
||||
// This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
|
||||
minValidSeconds = -62135596800
|
||||
// Seconds field just after the latest valid Timestamp.
|
||||
// This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
|
||||
maxValidSeconds = 253402300800
|
||||
)
|
||||
|
||||
// validateTimestamp determines whether a Timestamp is valid.
|
||||
// A valid timestamp represents a time in the range
|
||||
// [0001-01-01, 10000-01-01) and has a Nanos field
|
||||
// in the range [0, 1e9).
|
||||
//
|
||||
// If the Timestamp is valid, validateTimestamp returns nil.
|
||||
// Otherwise, it returns an error that describes
|
||||
// the problem.
|
||||
//
|
||||
// Every valid Timestamp can be represented by a time.Time, but the converse is not true.
|
||||
func validateTimestamp(ts *timestamp) error {
|
||||
if ts == nil {
|
||||
return errors.New("timestamp: nil Timestamp")
|
||||
}
|
||||
if ts.Seconds < minValidSeconds {
|
||||
return fmt.Errorf("timestamp: %#v before 0001-01-01", ts)
|
||||
}
|
||||
if ts.Seconds >= maxValidSeconds {
|
||||
return fmt.Errorf("timestamp: %#v after 10000-01-01", ts)
|
||||
}
|
||||
if ts.Nanos < 0 || ts.Nanos >= 1e9 {
|
||||
return fmt.Errorf("timestamp: %#v: nanos not in range [0, 1e9)", ts)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// TimestampFromProto converts a google.protobuf.Timestamp proto to a time.Time.
|
||||
// It returns an error if the argument is invalid.
|
||||
//
|
||||
// Unlike most Go functions, if Timestamp returns an error, the first return value
|
||||
// is not the zero time.Time. Instead, it is the value obtained from the
|
||||
// time.Unix function when passed the contents of the Timestamp, in the UTC
|
||||
// locale. This may or may not be a meaningful time; many invalid Timestamps
|
||||
// do map to valid time.Times.
|
||||
//
|
||||
// A nil Timestamp returns an error. The first return value in that case is
|
||||
// undefined.
|
||||
func timestampFromProto(ts *timestamp) (time.Time, error) {
|
||||
// Don't return the zero value on error, because corresponds to a valid
|
||||
// timestamp. Instead return whatever time.Unix gives us.
|
||||
var t time.Time
|
||||
if ts == nil {
|
||||
t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
|
||||
} else {
|
||||
t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
|
||||
}
|
||||
return t, validateTimestamp(ts)
|
||||
}
|
||||
|
||||
// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
|
||||
// It returns an error if the resulting Timestamp is invalid.
|
||||
func timestampProto(t time.Time) (*timestamp, error) {
|
||||
seconds := t.Unix()
|
||||
nanos := int32(t.Sub(time.Unix(seconds, 0)))
|
||||
ts := ×tamp{
|
||||
Seconds: seconds,
|
||||
Nanos: nanos,
|
||||
}
|
||||
if err := validateTimestamp(ts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ts, nil
|
||||
}
|
||||
229
cluster-autoscaler/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go
generated
vendored
Normal file
229
cluster-autoscaler/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go
generated
vendored
Normal file
|
|
@ -0,0 +1,229 @@
|
|||
// Protocol Buffers for Go with Gadgets
|
||||
//
|
||||
// Copyright (c) 2016, The GoGo Authors. All rights reserved.
|
||||
// http://github.com/gogo/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"time"
|
||||
)
|
||||
|
||||
var timeType = reflect.TypeOf((*time.Time)(nil)).Elem()
|
||||
|
||||
type timestamp struct {
|
||||
Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
|
||||
Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
|
||||
}
|
||||
|
||||
func (m *timestamp) Reset() { *m = timestamp{} }
|
||||
func (*timestamp) ProtoMessage() {}
|
||||
func (*timestamp) String() string { return "timestamp<string>" }
|
||||
|
||||
func init() {
|
||||
RegisterType((*timestamp)(nil), "gogo.protobuf.proto.timestamp")
|
||||
}
|
||||
|
||||
func (o *Buffer) decTimestamp() (time.Time, error) {
|
||||
b, err := o.DecodeRawBytes(true)
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
tproto := ×tamp{}
|
||||
if err := Unmarshal(b, tproto); err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
return timestampFromProto(tproto)
|
||||
}
|
||||
|
||||
func (o *Buffer) dec_time(p *Properties, base structPointer) error {
|
||||
t, err := o.decTimestamp()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
setPtrCustomType(base, p.field, &t)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Buffer) dec_ref_time(p *Properties, base structPointer) error {
|
||||
t, err := o.decTimestamp()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
setCustomType(base, p.field, &t)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Buffer) dec_slice_time(p *Properties, base structPointer) error {
|
||||
t, err := o.decTimestamp()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newBas := appendStructPointer(base, p.field, reflect.SliceOf(reflect.PtrTo(timeType)))
|
||||
var zero field
|
||||
setPtrCustomType(newBas, zero, &t)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Buffer) dec_slice_ref_time(p *Properties, base structPointer) error {
|
||||
t, err := o.decTimestamp()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newBas := appendStructPointer(base, p.field, reflect.SliceOf(timeType))
|
||||
var zero field
|
||||
setCustomType(newBas, zero, &t)
|
||||
return nil
|
||||
}
|
||||
|
||||
func size_time(p *Properties, base structPointer) (n int) {
|
||||
structp := structPointer_GetStructPointer(base, p.field)
|
||||
if structPointer_IsNil(structp) {
|
||||
return 0
|
||||
}
|
||||
tim := structPointer_Interface(structp, timeType).(*time.Time)
|
||||
t, err := timestampProto(*tim)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
size := Size(t)
|
||||
return size + sizeVarint(uint64(size)) + len(p.tagcode)
|
||||
}
|
||||
|
||||
func (o *Buffer) enc_time(p *Properties, base structPointer) error {
|
||||
structp := structPointer_GetStructPointer(base, p.field)
|
||||
if structPointer_IsNil(structp) {
|
||||
return ErrNil
|
||||
}
|
||||
tim := structPointer_Interface(structp, timeType).(*time.Time)
|
||||
t, err := timestampProto(*tim)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
data, err := Marshal(t)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.buf = append(o.buf, p.tagcode...)
|
||||
o.EncodeRawBytes(data)
|
||||
return nil
|
||||
}
|
||||
|
||||
func size_ref_time(p *Properties, base structPointer) (n int) {
|
||||
tim := structPointer_InterfaceAt(base, p.field, timeType).(*time.Time)
|
||||
t, err := timestampProto(*tim)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
size := Size(t)
|
||||
return size + sizeVarint(uint64(size)) + len(p.tagcode)
|
||||
}
|
||||
|
||||
func (o *Buffer) enc_ref_time(p *Properties, base structPointer) error {
|
||||
tim := structPointer_InterfaceAt(base, p.field, timeType).(*time.Time)
|
||||
t, err := timestampProto(*tim)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
data, err := Marshal(t)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.buf = append(o.buf, p.tagcode...)
|
||||
o.EncodeRawBytes(data)
|
||||
return nil
|
||||
}
|
||||
|
||||
func size_slice_time(p *Properties, base structPointer) (n int) {
|
||||
ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(timeType))).(*[]*time.Time)
|
||||
tims := *ptims
|
||||
for i := 0; i < len(tims); i++ {
|
||||
if tims[i] == nil {
|
||||
return 0
|
||||
}
|
||||
tproto, err := timestampProto(*tims[i])
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
size := Size(tproto)
|
||||
n += len(p.tagcode) + size + sizeVarint(uint64(size))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (o *Buffer) enc_slice_time(p *Properties, base structPointer) error {
|
||||
ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(timeType))).(*[]*time.Time)
|
||||
tims := *ptims
|
||||
for i := 0; i < len(tims); i++ {
|
||||
if tims[i] == nil {
|
||||
return errRepeatedHasNil
|
||||
}
|
||||
tproto, err := timestampProto(*tims[i])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
data, err := Marshal(tproto)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.buf = append(o.buf, p.tagcode...)
|
||||
o.EncodeRawBytes(data)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func size_slice_ref_time(p *Properties, base structPointer) (n int) {
|
||||
ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(timeType)).(*[]time.Time)
|
||||
tims := *ptims
|
||||
for i := 0; i < len(tims); i++ {
|
||||
tproto, err := timestampProto(tims[i])
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
size := Size(tproto)
|
||||
n += len(p.tagcode) + size + sizeVarint(uint64(size))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (o *Buffer) enc_slice_ref_time(p *Properties, base structPointer) error {
|
||||
ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(timeType)).(*[]time.Time)
|
||||
tims := *ptims
|
||||
for i := 0; i < len(tims); i++ {
|
||||
tproto, err := timestampProto(tims[i])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
data, err := Marshal(tproto)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.buf = append(o.buf, p.tagcode...)
|
||||
o.EncodeRawBytes(data)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -1,4 +1,6 @@
|
|||
// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
|
||||
// Protocol Buffers for Go with Gadgets
|
||||
//
|
||||
// Copyright (c) 2013, The GoGo Authors. All rights reserved.
|
||||
// http://github.com/gogo/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
|
|
@ -61,7 +61,6 @@ var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for
|
|||
// int32, int64, uint32, uint64, bool, and enum
|
||||
// protocol buffer types.
|
||||
func DecodeVarint(buf []byte) (x uint64, n int) {
|
||||
// x, n already 0
|
||||
for shift := uint(0); shift < 64; shift += 7 {
|
||||
if n >= len(buf) {
|
||||
return 0, 0
|
||||
|
|
@ -78,13 +77,7 @@ func DecodeVarint(buf []byte) (x uint64, n int) {
|
|||
return 0, 0
|
||||
}
|
||||
|
||||
// DecodeVarint reads a varint-encoded integer from the Buffer.
|
||||
// This is the format for the
|
||||
// int32, int64, uint32, uint64, bool, and enum
|
||||
// protocol buffer types.
|
||||
func (p *Buffer) DecodeVarint() (x uint64, err error) {
|
||||
// x, err already 0
|
||||
|
||||
func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
|
||||
i := p.index
|
||||
l := len(p.buf)
|
||||
|
||||
|
|
@ -107,6 +100,107 @@ func (p *Buffer) DecodeVarint() (x uint64, err error) {
|
|||
return
|
||||
}
|
||||
|
||||
// DecodeVarint reads a varint-encoded integer from the Buffer.
|
||||
// This is the format for the
|
||||
// int32, int64, uint32, uint64, bool, and enum
|
||||
// protocol buffer types.
|
||||
func (p *Buffer) DecodeVarint() (x uint64, err error) {
|
||||
i := p.index
|
||||
buf := p.buf
|
||||
|
||||
if i >= len(buf) {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
} else if buf[i] < 0x80 {
|
||||
p.index++
|
||||
return uint64(buf[i]), nil
|
||||
} else if len(buf)-i < 10 {
|
||||
return p.decodeVarintSlow()
|
||||
}
|
||||
|
||||
var b uint64
|
||||
// we already checked the first byte
|
||||
x = uint64(buf[i]) - 0x80
|
||||
i++
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 7
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 7
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 14
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 14
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 21
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 21
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 28
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 28
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 35
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 35
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 42
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 42
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 49
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 49
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 56
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 56
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 63
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
// x -= 0x80 << 63 // Always zero.
|
||||
|
||||
return 0, errOverflow
|
||||
|
||||
done:
|
||||
p.index = i
|
||||
return x, nil
|
||||
}
|
||||
|
||||
// DecodeFixed64 reads a 64-bit integer from the Buffer.
|
||||
// This is the format for the
|
||||
// fixed64, sfixed64, and double protocol buffer types.
|
||||
|
|
@ -340,6 +434,8 @@ func (p *Buffer) DecodeGroup(pb Message) error {
|
|||
// Buffer and places the decoded result in pb. If the struct
|
||||
// underlying pb does not match the data in the buffer, the results can be
|
||||
// unpredictable.
|
||||
//
|
||||
// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
|
||||
func (p *Buffer) Unmarshal(pb Message) error {
|
||||
// If the object can unmarshal itself, let it.
|
||||
if u, ok := pb.(Unmarshaler); ok {
|
||||
|
|
@ -378,6 +474,11 @@ func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group
|
|||
wire := int(u & 0x7)
|
||||
if wire == WireEndGroup {
|
||||
if is_group {
|
||||
if required > 0 {
|
||||
// Not enough information to determine the exact field.
|
||||
// (See below.)
|
||||
return &RequiredNotSetError{"{Unknown}"}
|
||||
}
|
||||
return nil // input is satisfied
|
||||
}
|
||||
return fmt.Errorf("proto: %s: wiretype end group for non-group", st)
|
||||
|
|
|
|||
|
|
@ -234,10 +234,6 @@ func Marshal(pb Message) ([]byte, error) {
|
|||
}
|
||||
p := NewBuffer(nil)
|
||||
err := p.Marshal(pb)
|
||||
var state errorState
|
||||
if err != nil && !state.shouldContinue(err, nil) {
|
||||
return nil, err
|
||||
}
|
||||
if p.buf == nil && err == nil {
|
||||
// Return a non-nil slice on success.
|
||||
return []byte{}, nil
|
||||
|
|
@ -266,11 +262,8 @@ func (p *Buffer) Marshal(pb Message) error {
|
|||
// Can the object marshal itself?
|
||||
if m, ok := pb.(Marshaler); ok {
|
||||
data, err := m.Marshal()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.buf = append(p.buf, data...)
|
||||
return nil
|
||||
return err
|
||||
}
|
||||
|
||||
t, base, err := getbase(pb)
|
||||
|
|
@ -282,7 +275,7 @@ func (p *Buffer) Marshal(pb Message) error {
|
|||
}
|
||||
|
||||
if collectStats {
|
||||
stats.Encode++
|
||||
(stats).Encode++ // Parens are to work around a goimports bug.
|
||||
}
|
||||
|
||||
if len(p.buf) > maxMarshalSize {
|
||||
|
|
@ -309,7 +302,7 @@ func Size(pb Message) (n int) {
|
|||
}
|
||||
|
||||
if collectStats {
|
||||
stats.Size++
|
||||
(stats).Size++ // Parens are to work around a goimports bug.
|
||||
}
|
||||
|
||||
return
|
||||
|
|
@ -1014,7 +1007,6 @@ func size_slice_struct_message(p *Properties, base structPointer) (n int) {
|
|||
if p.isMarshaler {
|
||||
m := structPointer_Interface(structp, p.stype).(Marshaler)
|
||||
data, _ := m.Marshal()
|
||||
n += len(p.tagcode)
|
||||
n += sizeRawBytes(data)
|
||||
continue
|
||||
}
|
||||
|
|
|
|||
|
|
@ -54,13 +54,17 @@ Equality is defined in this way:
|
|||
in a proto3 .proto file, fields are not "set"; specifically,
|
||||
zero length proto3 "bytes" fields are equal (nil == {}).
|
||||
- Two repeated fields are equal iff their lengths are the same,
|
||||
and their corresponding elements are equal (a "bytes" field,
|
||||
although represented by []byte, is not a repeated field)
|
||||
and their corresponding elements are equal. Note a "bytes" field,
|
||||
although represented by []byte, is not a repeated field and the
|
||||
rule for the scalar fields described above applies.
|
||||
- Two unset fields are equal.
|
||||
- Two unknown field sets are equal if their current
|
||||
encoded state is equal.
|
||||
- Two extension sets are equal iff they have corresponding
|
||||
elements that are pairwise equal.
|
||||
- Two map fields are equal iff their lengths are the same,
|
||||
and they contain the same set of elements. Zero-length map
|
||||
fields are equal.
|
||||
- Every other combination of things are not equal.
|
||||
|
||||
The return value is undefined if a and b are not protocol buffers.
|
||||
|
|
|
|||
|
|
@ -489,6 +489,37 @@ func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, e
|
|||
return
|
||||
}
|
||||
|
||||
// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order.
|
||||
// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing
|
||||
// just the Field field, which defines the extension's field number.
|
||||
func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
|
||||
epb, ok := extendable(pb)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("proto: %T is not an extendable proto.Message", pb)
|
||||
}
|
||||
registeredExtensions := RegisteredExtensions(pb)
|
||||
|
||||
emap, mu := epb.extensionsRead()
|
||||
if emap == nil {
|
||||
return nil, nil
|
||||
}
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
extensions := make([]*ExtensionDesc, 0, len(emap))
|
||||
for extid, e := range emap {
|
||||
desc := e.desc
|
||||
if desc == nil {
|
||||
desc = registeredExtensions[extid]
|
||||
if desc == nil {
|
||||
desc = &ExtensionDesc{Field: extid}
|
||||
}
|
||||
}
|
||||
|
||||
extensions = append(extensions, desc)
|
||||
}
|
||||
return extensions, nil
|
||||
}
|
||||
|
||||
// SetExtension sets the specified extension of pb to the specified value.
|
||||
func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
|
||||
epb, ok := extendable(pb)
|
||||
|
|
|
|||
|
|
@ -308,7 +308,7 @@ func GetStats() Stats { return stats }
|
|||
// temporary Buffer and are fine for most applications.
|
||||
type Buffer struct {
|
||||
buf []byte // encode/decode byte stream
|
||||
index int // write point
|
||||
index int // read point
|
||||
|
||||
// pools of basic types to amortize allocation.
|
||||
bools []bool
|
||||
|
|
|
|||
|
|
@ -844,7 +844,15 @@ func RegisterType(x Message, name string) {
|
|||
}
|
||||
|
||||
// MessageName returns the fully-qualified proto name for the given message type.
|
||||
func MessageName(x Message) string { return revProtoTypes[reflect.TypeOf(x)] }
|
||||
func MessageName(x Message) string {
|
||||
type xname interface {
|
||||
XXX_MessageName() string
|
||||
}
|
||||
if m, ok := x.(xname); ok {
|
||||
return m.XXX_MessageName()
|
||||
}
|
||||
return revProtoTypes[reflect.TypeOf(x)]
|
||||
}
|
||||
|
||||
// MessageType returns the message type (pointer to struct) for a named message.
|
||||
func MessageType(name string) reflect.Type { return protoTypes[name] }
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue