Merge pull request #3578 from jwcesign/metrics-adapter-v4
feat: add metrics-adapter component to support centralized hpa
This commit is contained in:
commit
42355694ae
4
Makefile
4
Makefile
|
@ -17,7 +17,8 @@ TARGETS := karmada-aggregated-apiserver \
|
|||
karmada-scheduler-estimator \
|
||||
karmada-interpreter-webhook-example \
|
||||
karmada-search \
|
||||
karmada-operator
|
||||
karmada-operator \
|
||||
karmada-metrics-adapter
|
||||
|
||||
CTL_TARGETS := karmadactl kubectl-karmada
|
||||
|
||||
|
@ -122,6 +123,7 @@ endif
|
|||
docker push ${REGISTRY}/karmada-aggregated-apiserver:${VERSION}
|
||||
docker push ${REGISTRY}/karmada-search:${VERSION}
|
||||
docker push ${REGISTRY}/karmada-operator:${VERSION}
|
||||
docker push ${REGISTRY}/karmada-metrics-adapter:${VERSION}
|
||||
|
||||
# Build and package binary
|
||||
#
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
"version": "unversioned"
|
||||
},
|
||||
"paths": {
|
||||
"/apis/": {
|
||||
"/apis": {
|
||||
"get": {
|
||||
"description": "get available API versions",
|
||||
"consumes": [
|
||||
|
@ -40,7 +40,7 @@
|
|||
}
|
||||
}
|
||||
},
|
||||
"/apis/cluster.karmada.io/": {
|
||||
"/apis/cluster.karmada.io": {
|
||||
"get": {
|
||||
"description": "get information of a group",
|
||||
"consumes": [
|
||||
|
@ -70,7 +70,7 @@
|
|||
}
|
||||
}
|
||||
},
|
||||
"/apis/cluster.karmada.io/v1alpha1/": {
|
||||
"/apis/cluster.karmada.io/v1alpha1": {
|
||||
"get": {
|
||||
"description": "get available resources",
|
||||
"consumes": [
|
||||
|
@ -1564,7 +1564,7 @@
|
|||
}
|
||||
]
|
||||
},
|
||||
"/apis/config.karmada.io/": {
|
||||
"/apis/config.karmada.io": {
|
||||
"get": {
|
||||
"description": "get information of a group",
|
||||
"consumes": [
|
||||
|
@ -1594,7 +1594,7 @@
|
|||
}
|
||||
}
|
||||
},
|
||||
"/apis/config.karmada.io/v1alpha1/": {
|
||||
"/apis/config.karmada.io/v1alpha1": {
|
||||
"get": {
|
||||
"description": "get available resources",
|
||||
"consumes": [
|
||||
|
@ -2624,7 +2624,7 @@
|
|||
}
|
||||
]
|
||||
},
|
||||
"/apis/networking.karmada.io/": {
|
||||
"/apis/networking.karmada.io": {
|
||||
"get": {
|
||||
"description": "get information of a group",
|
||||
"consumes": [
|
||||
|
@ -2654,7 +2654,7 @@
|
|||
}
|
||||
}
|
||||
},
|
||||
"/apis/networking.karmada.io/v1alpha1/": {
|
||||
"/apis/networking.karmada.io/v1alpha1": {
|
||||
"get": {
|
||||
"description": "get available resources",
|
||||
"consumes": [
|
||||
|
@ -3940,7 +3940,7 @@
|
|||
}
|
||||
]
|
||||
},
|
||||
"/apis/policy.karmada.io/": {
|
||||
"/apis/policy.karmada.io": {
|
||||
"get": {
|
||||
"description": "get information of a group",
|
||||
"consumes": [
|
||||
|
@ -3970,7 +3970,7 @@
|
|||
}
|
||||
}
|
||||
},
|
||||
"/apis/policy.karmada.io/v1alpha1/": {
|
||||
"/apis/policy.karmada.io/v1alpha1": {
|
||||
"get": {
|
||||
"description": "get available resources",
|
||||
"consumes": [
|
||||
|
@ -9768,7 +9768,7 @@
|
|||
}
|
||||
]
|
||||
},
|
||||
"/apis/search.karmada.io/": {
|
||||
"/apis/search.karmada.io": {
|
||||
"get": {
|
||||
"description": "get information of a group",
|
||||
"consumes": [
|
||||
|
@ -9798,7 +9798,7 @@
|
|||
}
|
||||
}
|
||||
},
|
||||
"/apis/search.karmada.io/v1alpha1/": {
|
||||
"/apis/search.karmada.io/v1alpha1": {
|
||||
"get": {
|
||||
"description": "get available resources",
|
||||
"consumes": [
|
||||
|
@ -10828,7 +10828,7 @@
|
|||
}
|
||||
]
|
||||
},
|
||||
"/apis/work.karmada.io/": {
|
||||
"/apis/work.karmada.io": {
|
||||
"get": {
|
||||
"description": "get information of a group",
|
||||
"consumes": [
|
||||
|
@ -10858,7 +10858,7 @@
|
|||
}
|
||||
}
|
||||
},
|
||||
"/apis/work.karmada.io/v1alpha1/": {
|
||||
"/apis/work.karmada.io/v1alpha1": {
|
||||
"get": {
|
||||
"description": "get available resources",
|
||||
"consumes": [
|
||||
|
@ -12144,7 +12144,7 @@
|
|||
}
|
||||
]
|
||||
},
|
||||
"/apis/work.karmada.io/v1alpha2/": {
|
||||
"/apis/work.karmada.io/v1alpha2": {
|
||||
"get": {
|
||||
"description": "get available resources",
|
||||
"consumes": [
|
||||
|
@ -17081,8 +17081,13 @@
|
|||
"type": "string"
|
||||
},
|
||||
"pathType": {
|
||||
"description": "PathType determines the interpretation of the Path matching. PathType can be one of the following values: * Exact: Matches the URL path exactly. * Prefix: Matches based on a URL path prefix split by '/'. Matching is\n done on a path element by element basis. A path element refers is the\n list of labels in the path split by the '/' separator. A request is a\n match for path p if every p is an element-wise prefix of p of the\n request path. Note that if the last element of the path is a substring\n of the last element in request path, it is not a match (e.g. /foo/bar\n matches /foo/bar/baz, but does not match /foo/barbaz).\n* ImplementationSpecific: Interpretation of the Path matching is up to\n the IngressClass. Implementations can treat this as a separate PathType\n or treat it identically to Prefix or Exact path types.\nImplementations are required to support all path types.",
|
||||
"type": "string"
|
||||
"description": "PathType determines the interpretation of the Path matching. PathType can be one of the following values: * Exact: Matches the URL path exactly. * Prefix: Matches based on a URL path prefix split by '/'. Matching is\n done on a path element by element basis. A path element refers is the\n list of labels in the path split by the '/' separator. A request is a\n match for path p if every p is an element-wise prefix of p of the\n request path. Note that if the last element of the path is a substring\n of the last element in request path, it is not a match (e.g. /foo/bar\n matches /foo/bar/baz, but does not match /foo/barbaz).\n* ImplementationSpecific: Interpretation of the Path matching is up to\n the IngressClass. Implementations can treat this as a separate PathType\n or treat it identically to Prefix or Exact path types.\nImplementations are required to support all path types.\n\nPossible enum values:\n - `\"Exact\"` matches the URL path exactly and with case sensitivity.\n - `\"ImplementationSpecific\"` matching is up to the IngressClass. Implementations can treat this as a separate PathType or treat it identically to Prefix or Exact path types.\n - `\"Prefix\"` matches based on a URL path prefix split by '/'. Matching is case sensitive and done on a path element by element basis. A path element refers to the list of labels in the path split by the '/' separator. A request is a match for path p if every p is an element-wise prefix of p of the request path. Note that if the last element of the path is a substring of the last element in request path, it is not a match (e.g. /foo/bar matches /foo/bar/baz, but does not match /foo/barbaz). If multiple matching paths exist in an Ingress spec, the longest matching path is given priority. Examples: - /foo/bar does not match requests to /foo/barbaz - /foo/bar matches request to /foo/bar and /foo/bar/baz - /foo and /foo/ both match requests to /foo and /foo/. If both paths are present in an Ingress spec, the longest matching path (/foo/) is given priority.",
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"Exact",
|
||||
"ImplementationSpecific",
|
||||
"Prefix"
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
|
|
|
@ -0,0 +1,25 @@
|
|||
apiVersion: apiregistration.k8s.io/v1
|
||||
kind: APIService
|
||||
metadata:
|
||||
name: v1beta1.metrics.k8s.io
|
||||
labels:
|
||||
app: karmada-metrics-adapter
|
||||
apiserver: "true"
|
||||
spec:
|
||||
insecureSkipTLSVerify: true
|
||||
group: metrics.k8s.io
|
||||
groupPriorityMinimum: 2000
|
||||
service:
|
||||
name: karmada-metrics-adapter
|
||||
namespace: karmada-system
|
||||
version: v1beta1
|
||||
versionPriority: 10
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: karmada-metrics-adapter
|
||||
namespace: karmada-system
|
||||
spec:
|
||||
type: ExternalName
|
||||
externalName: karmada-metrics-adapter.karmada-system.svc.cluster.local
|
|
@ -0,0 +1,86 @@
|
|||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: karmada-metrics-adapter
|
||||
namespace: karmada-system
|
||||
labels:
|
||||
app: karmada-metrics-adapter
|
||||
apiserver: "true"
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: karmada-metrics-adapter
|
||||
apiserver: "true"
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: karmada-metrics-adapter
|
||||
apiserver: "true"
|
||||
spec:
|
||||
automountServiceAccountToken: false
|
||||
containers:
|
||||
- name: karmada-metrics-adapter
|
||||
image: docker.io/karmada/karmada-metrics-adapter:latest
|
||||
imagePullPolicy: IfNotPresent
|
||||
volumeMounts:
|
||||
- name: karmada-certs
|
||||
mountPath: /etc/karmada/pki
|
||||
readOnly: true
|
||||
- name: kubeconfig
|
||||
subPath: kubeconfig
|
||||
mountPath: /etc/kubeconfig
|
||||
command:
|
||||
- /bin/karmada-metrics-adapter
|
||||
- --kubeconfig=/etc/kubeconfig
|
||||
- --authentication-kubeconfig=/etc/kubeconfig
|
||||
- --authorization-kubeconfig=/etc/kubeconfig
|
||||
- --client-ca-file=/etc/karmada/pki/ca.crt
|
||||
- --audit-log-path=-
|
||||
- --audit-log-maxage=0
|
||||
- --audit-log-maxbackup=0
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /readyz
|
||||
port: 443
|
||||
scheme: HTTPS
|
||||
initialDelaySeconds: 1
|
||||
failureThreshold: 3
|
||||
periodSeconds: 3
|
||||
timeoutSeconds: 15
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 443
|
||||
scheme: HTTPS
|
||||
initialDelaySeconds: 10
|
||||
failureThreshold: 3
|
||||
periodSeconds: 10
|
||||
timeoutSeconds: 15
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
volumes:
|
||||
- name: karmada-certs
|
||||
secret:
|
||||
secretName: karmada-cert-secret
|
||||
- name: kubeconfig
|
||||
secret:
|
||||
secretName: kubeconfig
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: karmada-metrics-adapter
|
||||
namespace: karmada-system
|
||||
labels:
|
||||
app: karmada-metrics-adapter
|
||||
apiserver: "true"
|
||||
spec:
|
||||
ports:
|
||||
- port: 443
|
||||
protocol: TCP
|
||||
targetPort: 443
|
||||
selector:
|
||||
app: karmada-metrics-adapter
|
|
@ -0,0 +1,62 @@
|
|||
package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
cliflag "k8s.io/component-base/cli/flag"
|
||||
"k8s.io/component-base/term"
|
||||
|
||||
"github.com/karmada-io/karmada/cmd/metrics-adapter/app/options"
|
||||
"github.com/karmada-io/karmada/pkg/sharedcli"
|
||||
"github.com/karmada-io/karmada/pkg/sharedcli/klogflag"
|
||||
"github.com/karmada-io/karmada/pkg/version/sharedcommand"
|
||||
)
|
||||
|
||||
// NewMetricsAdapterCommand creates a *cobra.Command object with default parameters
|
||||
func NewMetricsAdapterCommand(ctx context.Context) *cobra.Command {
|
||||
opts := options.NewOptions()
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "karmada-metrics-adapter",
|
||||
Long: `The karmada-metrics-adapter is a adapter to aggregate the metrics from member clusters.`,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if err := opts.Complete(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := opts.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := opts.Run(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
},
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
for _, arg := range args {
|
||||
if len(arg) > 0 {
|
||||
return fmt.Errorf("%q does not take any arguments, got %q", cmd.CommandPath(), args)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
fss := cliflag.NamedFlagSets{}
|
||||
|
||||
genericFlagSet := fss.FlagSet("generic")
|
||||
opts.AddFlags(genericFlagSet)
|
||||
|
||||
// Set klog flags
|
||||
logsFlagSet := fss.FlagSet("logs")
|
||||
klogflag.Add(logsFlagSet)
|
||||
|
||||
cmd.AddCommand(sharedcommand.NewCmdVersion("karmada-metrics-adapter"))
|
||||
cmd.Flags().AddFlagSet(genericFlagSet)
|
||||
cmd.Flags().AddFlagSet(logsFlagSet)
|
||||
|
||||
cols, _, _ := term.TerminalSize(cmd.OutOrStdout())
|
||||
sharedcli.SetUsageAndHelpFunc(cmd, fss, cols)
|
||||
return cmd
|
||||
}
|
|
@ -0,0 +1,99 @@
|
|||
package options
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
openapinamer "k8s.io/apiserver/pkg/endpoints/openapi"
|
||||
genericapiserver "k8s.io/apiserver/pkg/server"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/custom-metrics-apiserver/pkg/cmd/options"
|
||||
"sigs.k8s.io/metrics-server/pkg/api"
|
||||
|
||||
karmadaclientset "github.com/karmada-io/karmada/pkg/generated/clientset/versioned"
|
||||
informerfactory "github.com/karmada-io/karmada/pkg/generated/informers/externalversions"
|
||||
generatedopenapi "github.com/karmada-io/karmada/pkg/generated/openapi"
|
||||
"github.com/karmada-io/karmada/pkg/metricsadapter"
|
||||
"github.com/karmada-io/karmada/pkg/version"
|
||||
)
|
||||
|
||||
// Options contains everything necessary to create and run metrics-adapter.
|
||||
type Options struct {
|
||||
CustomMetricsAdapterServerOptions *options.CustomMetricsAdapterServerOptions
|
||||
|
||||
KubeConfig string
|
||||
}
|
||||
|
||||
// NewOptions builds a default metrics-adapter options.
|
||||
func NewOptions() *Options {
|
||||
o := &Options{
|
||||
CustomMetricsAdapterServerOptions: options.NewCustomMetricsAdapterServerOptions(),
|
||||
}
|
||||
|
||||
return o
|
||||
}
|
||||
|
||||
// Complete fills in fields required to have valid data.
|
||||
func (o *Options) Complete() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddFlags adds flags to the specified FlagSet.
|
||||
func (o *Options) AddFlags(fs *pflag.FlagSet) {
|
||||
o.CustomMetricsAdapterServerOptions.AddFlags(fs)
|
||||
|
||||
fs.StringVar(&o.KubeConfig, "kubeconfig", o.KubeConfig, "Path to karmada control plane kubeconfig file.")
|
||||
}
|
||||
|
||||
// Config returns config for the metrics-adapter server given Options
|
||||
func (o *Options) Config() (*metricsadapter.MetricsServer, error) {
|
||||
restConfig, err := clientcmd.BuildConfigFromFlags("", o.KubeConfig)
|
||||
if err != nil {
|
||||
klog.Errorf("Unable to build restConfig: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
karmadaClient := karmadaclientset.NewForConfigOrDie(restConfig)
|
||||
factory := informerfactory.NewSharedInformerFactory(karmadaClient, 0)
|
||||
|
||||
metricsController := metricsadapter.NewMetricsController(restConfig, factory)
|
||||
metricsAdapter := metricsadapter.NewMetricsAdapter(metricsController, o.CustomMetricsAdapterServerOptions)
|
||||
metricsAdapter.OpenAPIConfig = genericapiserver.DefaultOpenAPIConfig(generatedopenapi.GetOpenAPIDefinitions, openapinamer.NewDefinitionNamer(api.Scheme))
|
||||
metricsAdapter.OpenAPIConfig.Info.Title = "karmada-metrics-adapter"
|
||||
metricsAdapter.OpenAPIConfig.Info.Version = "1.0.0"
|
||||
|
||||
server, err := metricsAdapter.Server()
|
||||
if err != nil {
|
||||
klog.Errorf("Unable to construct metrics adapter: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = server.GenericAPIServer.AddPostStartHook("start-karmada-informers", func(context genericapiserver.PostStartHookContext) error {
|
||||
factory.Start(context.StopCh)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
klog.Errorf("Unable to add post hook: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := api.Install(metricsAdapter, metricsAdapter.PodLister, metricsAdapter.NodeLister, server.GenericAPIServer, nil); err != nil {
|
||||
klog.Errorf("Unable to install resource metrics adapter: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return metricsadapter.NewMetricsServer(metricsController, metricsAdapter), nil
|
||||
}
|
||||
|
||||
// Run runs the metrics-adapter with options. This should never exit.
|
||||
func (o *Options) Run(ctx context.Context) error {
|
||||
klog.Infof("karmada-metrics-adapter version: %s", version.Get())
|
||||
|
||||
metricsServer, err := o.Config()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return metricsServer.StartServer(ctx.Done())
|
||||
}
|
|
@ -0,0 +1,14 @@
|
|||
package options
|
||||
|
||||
import (
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
)
|
||||
|
||||
// Validate checks Options and return a slice of found errs.
|
||||
func (o *Options) Validate() error {
|
||||
var errs []error
|
||||
|
||||
errs = append(errs, o.CustomMetricsAdapterServerOptions.Validate()...)
|
||||
|
||||
return utilerrors.NewAggregate(errs)
|
||||
}
|
|
@ -0,0 +1,18 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"k8s.io/component-base/cli"
|
||||
_ "k8s.io/component-base/logs/json/register" // for JSON log format registration
|
||||
controllerruntime "sigs.k8s.io/controller-runtime"
|
||||
|
||||
"github.com/karmada-io/karmada/cmd/metrics-adapter/app"
|
||||
)
|
||||
|
||||
func main() {
|
||||
ctx := controllerruntime.SetupSignalHandler()
|
||||
cmd := app.NewMetricsAdapterCommand(ctx)
|
||||
code := cli.Run(cmd)
|
||||
os.Exit(code)
|
||||
}
|
19
go.mod
19
go.mod
|
@ -17,7 +17,7 @@ require (
|
|||
github.com/prometheus/client_golang v1.14.0
|
||||
github.com/spf13/cobra v1.6.1
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/stretchr/testify v1.8.1
|
||||
github.com/stretchr/testify v1.8.2
|
||||
github.com/vektra/mockery/v2 v2.10.0
|
||||
github.com/yuin/gopher-lua v0.0.0-20220504180219-658193537a64
|
||||
go.uber.org/atomic v1.9.0
|
||||
|
@ -38,16 +38,19 @@ require (
|
|||
k8s.io/code-generator v0.26.2
|
||||
k8s.io/component-base v0.26.2
|
||||
k8s.io/component-helpers v0.26.2
|
||||
k8s.io/klog/v2 v2.80.1
|
||||
k8s.io/klog/v2 v2.90.1
|
||||
k8s.io/kube-aggregator v0.26.2
|
||||
k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280
|
||||
k8s.io/kube-openapi v0.0.0-20230303024457-afdc3dddf62d
|
||||
k8s.io/kubectl v0.26.2
|
||||
k8s.io/utils v0.0.0-20221128185143-99ec85e7a448
|
||||
k8s.io/metrics v0.26.2
|
||||
k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5
|
||||
layeh.com/gopher-json v0.0.0-20201124131017-552bb3c4c3bf
|
||||
sigs.k8s.io/cluster-api v1.4.0
|
||||
sigs.k8s.io/controller-runtime v0.14.5
|
||||
sigs.k8s.io/custom-metrics-apiserver v1.25.1-0.20230308103314-bd3192a29bc8
|
||||
sigs.k8s.io/kind v0.17.0
|
||||
sigs.k8s.io/mcs-api v0.1.0
|
||||
sigs.k8s.io/metrics-server v0.6.1-0.20230509102056-1a23b5bd2e12
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3
|
||||
sigs.k8s.io/yaml v1.3.0
|
||||
)
|
||||
|
@ -69,7 +72,7 @@ require (
|
|||
github.com/coreos/go-systemd/v22 v22.3.2 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.9.0 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.10.1 // indirect
|
||||
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
|
||||
github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect
|
||||
github.com/fatih/camelcase v1.0.0 // indirect
|
||||
|
@ -80,8 +83,8 @@ require (
|
|||
github.com/go-logr/logr v1.2.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-logr/zapr v1.2.3 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.5 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.0 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.6 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.1 // indirect
|
||||
github.com/go-openapi/swag v0.22.3 // indirect
|
||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect
|
||||
github.com/gobuffalo/flect v1.0.2 // indirect
|
||||
|
@ -175,7 +178,7 @@ require (
|
|||
k8s.io/gengo v0.0.0-20220902162205-c0856e24416d // indirect
|
||||
k8s.io/kms v0.26.2 // indirect
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.35 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||
sigs.k8s.io/kustomize/api v0.12.1 // indirect
|
||||
sigs.k8s.io/kustomize/kyaml v0.13.9 // indirect
|
||||
)
|
||||
|
|
39
go.sum
39
go.sum
|
@ -199,8 +199,8 @@ github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7fo
|
|||
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
|
||||
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||
github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE=
|
||||
github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/emicklei/go-restful/v3 v3.10.1 h1:rc42Y5YTp7Am7CS630D7JmhRjq4UlEUuEKfrDac4bSQ=
|
||||
github.com/emicklei/go-restful/v3 v3.10.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc=
|
||||
github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
|
@ -283,15 +283,15 @@ github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwds
|
|||
github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M=
|
||||
github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
|
||||
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||
github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY=
|
||||
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||
github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
|
||||
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
|
||||
github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
|
||||
github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I=
|
||||
github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I=
|
||||
github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
|
||||
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
|
||||
github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA=
|
||||
github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo=
|
||||
github.com/go-openapi/jsonreference v0.20.1 h1:FBLnyygC4/IZZr893oiomc9XaghoveYTrLC1F86HID8=
|
||||
github.com/go-openapi/jsonreference v0.20.1/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
|
||||
github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
|
||||
github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
|
||||
github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
|
||||
|
@ -533,6 +533,7 @@ github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
|
|||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
|
||||
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
|
@ -779,8 +780,9 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
|
|||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
|
||||
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
||||
github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8=
|
||||
github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0=
|
||||
|
@ -1400,6 +1402,7 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8
|
|||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
|
@ -1487,22 +1490,24 @@ k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
|
|||
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
|
||||
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
|
||||
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||
k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4=
|
||||
k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||
k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw=
|
||||
k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||
k8s.io/kms v0.26.2 h1:GM1gg3tFK3OUU/QQFi93yGjG3lJT8s8l3Wkn2+VxBLM=
|
||||
k8s.io/kms v0.26.2/go.mod h1:69qGnf1NsFOQP07fBYqNLZklqEHSJF024JqYCaeVxHg=
|
||||
k8s.io/kube-aggregator v0.26.2 h1:WtcLGisa5aCKBbBI1/Xe7gdjPlVb5Xhvs4a8Rdk8EXs=
|
||||
k8s.io/kube-aggregator v0.26.2/go.mod h1:swDTw0k/XghVLR+PCWnP6Y36wR2+DsqL2HUVq8eu0RI=
|
||||
k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
|
||||
k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
|
||||
k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 h1:+70TFaan3hfJzs+7VK2o+OGxg8HsuBr/5f6tVAjDu6E=
|
||||
k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4=
|
||||
k8s.io/kube-openapi v0.0.0-20230303024457-afdc3dddf62d h1:VcFq5n7wCJB2FQMCIHfC+f+jNcGgNMar1uKd6rVlifU=
|
||||
k8s.io/kube-openapi v0.0.0-20230303024457-afdc3dddf62d/go.mod h1:y5VtZWM9sHHc2ZodIH/6SHzXj+TPU5USoA8lcIeKEKY=
|
||||
k8s.io/kubectl v0.26.2 h1:SMPB4j48eVFxsYluBq3VLyqXtE6b72YnszkbTAtFye4=
|
||||
k8s.io/kubectl v0.26.2/go.mod h1:KYWOXSwp2BrDn3kPeoU/uKzKtdqvhK1dgZGd0+no4cM=
|
||||
k8s.io/metrics v0.26.2 h1:2gUvUWWnHPdE2tyA5DvyHC8HGryr+izhY9i5dzLP06s=
|
||||
k8s.io/metrics v0.26.2/go.mod h1:PX1wm9REV9hSGuw9GcXTFNDgab1KRXck3mNeiLYbRho=
|
||||
k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
|
||||
k8s.io/utils v0.0.0-20200603063816-c1c6865ac451/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
k8s.io/utils v0.0.0-20221128185143-99ec85e7a448 h1:KTgPnR10d5zhztWptI952TNtt/4u5h3IzDXkdIMuo2Y=
|
||||
k8s.io/utils v0.0.0-20221128185143-99ec85e7a448/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5 h1:kmDqav+P+/5e1i9tFfHq1qcF3sOrDp+YEkVDAHu7Jwk=
|
||||
k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
layeh.com/gopher-json v0.0.0-20201124131017-552bb3c4c3bf h1:rRz0YsF7VXj9fXRF6yQgFI7DzST+hsI3TeFSGupntu0=
|
||||
layeh.com/gopher-json v0.0.0-20201124131017-552bb3c4c3bf/go.mod h1:ivKkcY8Zxw5ba0jldhZCYYQfGdb2K6u9tbYK1AwMIBc=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
|
@ -1517,8 +1522,10 @@ sigs.k8s.io/controller-runtime v0.6.1/go.mod h1:XRYBPdbf5XJu9kpS84VJiZ7h/u1hF3gE
|
|||
sigs.k8s.io/controller-runtime v0.14.5 h1:6xaWFqzT5KuAQ9ufgUaj1G/+C4Y1GRkhrxl+BJ9i+5s=
|
||||
sigs.k8s.io/controller-runtime v0.14.5/go.mod h1:WqIdsAY6JBsjfc/CqO0CORmNtoCtE4S6qbPc9s68h+0=
|
||||
sigs.k8s.io/controller-tools v0.3.0/go.mod h1:enhtKGfxZD1GFEoMgP8Fdbu+uKQ/cq1/WGJhdVChfvI=
|
||||
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k=
|
||||
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
||||
sigs.k8s.io/custom-metrics-apiserver v1.25.1-0.20230308103314-bd3192a29bc8 h1:0dZXAPEWoIAJ3KtBHKAViBSwE1yMRH0PI/UdYb4bIhE=
|
||||
sigs.k8s.io/custom-metrics-apiserver v1.25.1-0.20230308103314-bd3192a29bc8/go.mod h1:9nUXR/EgdYZto1aQ6yhwOksPR7J979jSyOqic1IgaOo=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
||||
sigs.k8s.io/kind v0.8.1/go.mod h1:oNKTxUVPYkV9lWzY6CVMNluVq8cBsyq+UgPJdvA3uu4=
|
||||
sigs.k8s.io/kind v0.17.0 h1:CScmGz/wX66puA06Gj8OZb76Wmk7JIjgWf5JDvY7msM=
|
||||
sigs.k8s.io/kind v0.17.0/go.mod h1:Qqp8AiwOlMZmJWs37Hgs31xcbiYXjtXlRBSftcnZXQk=
|
||||
|
@ -1528,6 +1535,8 @@ sigs.k8s.io/kustomize/kyaml v0.13.9 h1:Qz53EAaFFANyNgyOEJbT/yoIHygK40/ZcvU3rgry2
|
|||
sigs.k8s.io/kustomize/kyaml v0.13.9/go.mod h1:QsRbD0/KcU+wdk0/L0fIp2KLnohkVzs6fQ85/nOXac4=
|
||||
sigs.k8s.io/mcs-api v0.1.0 h1:edDbg0oRGfXw8TmZjKYep06LcJLv/qcYLidejnUp0PM=
|
||||
sigs.k8s.io/mcs-api v0.1.0/go.mod h1:gGiAryeFNB4GBsq2LBmVqSgKoobLxt+p7ii/WG5QYYw=
|
||||
sigs.k8s.io/metrics-server v0.6.1-0.20230509102056-1a23b5bd2e12 h1:M2W5nfr7ATwuEaFjH6qH0GeK2M1LoIw78kRvVxhEX+E=
|
||||
sigs.k8s.io/metrics-server v0.6.1-0.20230509102056-1a23b5bd2e12/go.mod h1:PuZhxxSf39LvZ27uzIaiUoGwaz/RoZzmXdQbi252D5Y=
|
||||
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
|
||||
sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE=
|
||||
|
|
|
@ -213,6 +213,7 @@ openapi-gen \
|
|||
--input-dirs "k8s.io/apimachinery/pkg/apis/meta/v1,k8s.io/apimachinery/pkg/runtime,k8s.io/apimachinery/pkg/version" \
|
||||
--input-dirs "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,k8s.io/api/admissionregistration/v1,k8s.io/api/networking/v1" \
|
||||
--input-dirs "github.com/karmada-io/karmada/pkg/apis/search/v1alpha1" \
|
||||
--input-dirs "k8s.io/metrics/pkg/apis/custom_metrics,k8s.io/metrics/pkg/apis/custom_metrics/v1beta1,k8s.io/metrics/pkg/apis/custom_metrics/v1beta2,k8s.io/metrics/pkg/apis/external_metrics,k8s.io/metrics/pkg/apis/external_metrics/v1beta1,k8s.io/metrics/pkg/apis/metrics,k8s.io/metrics/pkg/apis/metrics/v1beta1" \
|
||||
--output-package "github.com/karmada-io/karmada/pkg/generated/openapi" \
|
||||
-O zz_generated.openapi
|
||||
|
||||
|
|
|
@ -19,6 +19,7 @@ INTERPRETER_WEBHOOK_EXAMPLE_LABEL="karmada-interpreter-webhook-example"
|
|||
KARMADA_SEARCH_LABEL="karmada-search"
|
||||
KARMADA_OPENSEARCH_LABEL="karmada-opensearch"
|
||||
KARMADA_OPENSEARCH_DASHBOARDS_LABEL="karmada-opensearch-dashboards"
|
||||
KARMADA_METRICS_ADAPTER_LABEL="karmada-metrics-adapter"
|
||||
|
||||
KARMADA_GO_PACKAGE="github.com/karmada-io/karmada"
|
||||
|
||||
|
@ -37,6 +38,7 @@ KARMADA_TARGET_SOURCE=(
|
|||
karmada-interpreter-webhook-example=examples/customresourceinterpreter/webhook
|
||||
karmada-search=cmd/karmada-search
|
||||
karmada-operator=operator/cmd/operator
|
||||
karmada-metrics-adapter=cmd/metrics-adapter
|
||||
)
|
||||
|
||||
#https://textkool.com/en/ascii-art-generator?hl=default&vl=default&font=DOS%20Rebel&text=KARMADA
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,29 @@
|
|||
package metricsadapter
|
||||
|
||||
import (
|
||||
basecmd "sigs.k8s.io/custom-metrics-apiserver/pkg/cmd"
|
||||
"sigs.k8s.io/custom-metrics-apiserver/pkg/cmd/options"
|
||||
|
||||
"github.com/karmada-io/karmada/pkg/metricsadapter/provider"
|
||||
)
|
||||
|
||||
// MetricsAdapter is a metrics adapter to provider native metrics, custom metrics and external metrics
|
||||
type MetricsAdapter struct {
|
||||
basecmd.AdapterBase
|
||||
|
||||
*provider.ResourceMetricsProvider
|
||||
}
|
||||
|
||||
// NewMetricsAdapter creates a new metrics adapter
|
||||
func NewMetricsAdapter(controller *MetricsController, customMetricsAdapterServerOptions *options.CustomMetricsAdapterServerOptions) *MetricsAdapter {
|
||||
adapter := &MetricsAdapter{}
|
||||
adapter.CustomMetricsAdapterServerOptions = customMetricsAdapterServerOptions
|
||||
|
||||
adapter.ResourceMetricsProvider = provider.NewResourceMetricsProvider(controller.ClusterLister, controller.InformerManager)
|
||||
customProvider := provider.MakeCustomMetricsProvider()
|
||||
externalProvider := provider.MakeExternalMetricsProvider()
|
||||
adapter.WithCustomMetrics(customProvider)
|
||||
adapter.WithExternalMetrics(externalProvider)
|
||||
|
||||
return adapter
|
||||
}
|
|
@ -0,0 +1,21 @@
|
|||
package metricsadapter
|
||||
|
||||
// MetricsServer is a metrics server
|
||||
type MetricsServer struct {
|
||||
metricsController *MetricsController
|
||||
metricsAdapter *MetricsAdapter
|
||||
}
|
||||
|
||||
// NewMetricsServer creates a new metrics server
|
||||
func NewMetricsServer(controller *MetricsController, metricsAdapter *MetricsAdapter) *MetricsServer {
|
||||
return &MetricsServer{
|
||||
metricsController: controller,
|
||||
metricsAdapter: metricsAdapter,
|
||||
}
|
||||
}
|
||||
|
||||
// StartServer starts the metrics server
|
||||
func (m *MetricsServer) StartServer(stopCh <-chan struct{}) error {
|
||||
go m.metricsController.startController(stopCh)
|
||||
return m.metricsAdapter.Run(stopCh)
|
||||
}
|
|
@ -0,0 +1,158 @@
|
|||
package metricsadapter
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
clusterV1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
|
||||
informerfactory "github.com/karmada-io/karmada/pkg/generated/informers/externalversions"
|
||||
clusterlister "github.com/karmada-io/karmada/pkg/generated/listers/cluster/v1alpha1"
|
||||
"github.com/karmada-io/karmada/pkg/metricsadapter/provider"
|
||||
"github.com/karmada-io/karmada/pkg/util"
|
||||
"github.com/karmada-io/karmada/pkg/util/fedinformer/genericmanager"
|
||||
"github.com/karmada-io/karmada/pkg/util/gclient"
|
||||
)
|
||||
|
||||
// MetricsController is a controller for metrics, control the lifecycle of multi-clusters informer
|
||||
type MetricsController struct {
|
||||
InformerFactory informerfactory.SharedInformerFactory
|
||||
ClusterLister clusterlister.ClusterLister
|
||||
InformerManager genericmanager.MultiClusterInformerManager
|
||||
|
||||
queue workqueue.RateLimitingInterface
|
||||
restConfig *rest.Config
|
||||
}
|
||||
|
||||
// NewMetricsController creates a new metrics controller
|
||||
func NewMetricsController(restConfig *rest.Config, factory informerfactory.SharedInformerFactory) *MetricsController {
|
||||
clusterLister := factory.Cluster().V1alpha1().Clusters().Lister()
|
||||
controller := &MetricsController{
|
||||
InformerFactory: factory,
|
||||
ClusterLister: clusterLister,
|
||||
InformerManager: genericmanager.GetInstance(),
|
||||
restConfig: restConfig,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "metrics-adapter"),
|
||||
}
|
||||
controller.addEventHandler()
|
||||
|
||||
return controller
|
||||
}
|
||||
|
||||
// addEventHandler adds event handler for cluster
|
||||
func (m *MetricsController) addEventHandler() {
|
||||
clusterInformer := m.InformerFactory.Cluster().V1alpha1().Clusters().Informer()
|
||||
_, err := clusterInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: m.addCluster,
|
||||
// Update event and delete event will be handled by the same handler
|
||||
UpdateFunc: m.updateCluster,
|
||||
})
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to add cluster event handler for cluster: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// addCluster adds cluster to queue
|
||||
func (m *MetricsController) addCluster(obj interface{}) {
|
||||
cluster := obj.(*clusterV1alpha1.Cluster)
|
||||
m.queue.Add(cluster.GetName())
|
||||
}
|
||||
|
||||
// updateCluster updates cluster in queue
|
||||
func (m *MetricsController) updateCluster(oldObj, curObj interface{}) {
|
||||
curCluster := curObj.(*clusterV1alpha1.Cluster)
|
||||
oldCluster := oldObj.(*clusterV1alpha1.Cluster)
|
||||
if curCluster.ResourceVersion == oldCluster.ResourceVersion {
|
||||
// no change, do nothing.
|
||||
return
|
||||
}
|
||||
|
||||
if oldCluster.DeletionTimestamp.IsZero() != curCluster.DeletionTimestamp.IsZero() {
|
||||
// cluster is being deleted.
|
||||
m.queue.Add(curCluster.GetName())
|
||||
}
|
||||
|
||||
if util.ClusterAccessCredentialChanged(curCluster.Spec, oldCluster.Spec) ||
|
||||
util.IsClusterReady(&curCluster.Status) != util.IsClusterReady(&oldCluster.Status) {
|
||||
// Cluster.Spec or Cluster health state is changed, rebuild informer.
|
||||
m.InformerManager.Stop(curCluster.GetName())
|
||||
m.queue.Add(curCluster.GetName())
|
||||
}
|
||||
}
|
||||
|
||||
// startController starts controller
|
||||
func (m *MetricsController) startController(stopCh <-chan struct{}) {
|
||||
m.InformerFactory.WaitForCacheSync(stopCh)
|
||||
|
||||
go wait.Until(m.worker, time.Second, stopCh)
|
||||
|
||||
go func() {
|
||||
<-stopCh
|
||||
genericmanager.StopInstance()
|
||||
klog.Infof("Shutting down karmada-metrics-adapter")
|
||||
}()
|
||||
}
|
||||
|
||||
// worker is a worker for handle the data in queue
|
||||
func (m *MetricsController) worker() {
|
||||
for m.handleClusters() {
|
||||
}
|
||||
}
|
||||
|
||||
// handleClusters handles clusters changes
|
||||
func (m *MetricsController) handleClusters() bool {
|
||||
key, shutdown := m.queue.Get()
|
||||
if shutdown {
|
||||
klog.Errorf("Fail to pop item from queue")
|
||||
return false
|
||||
}
|
||||
defer m.queue.Done(key)
|
||||
|
||||
clusterName := key.(string)
|
||||
cls, err := m.ClusterLister.Get(clusterName)
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
klog.Infof("try to stop cluster informer %s", clusterName)
|
||||
m.InformerManager.Stop(clusterName)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
if !cls.DeletionTimestamp.IsZero() {
|
||||
klog.Infof("try to stop cluster informer %s", clusterName)
|
||||
m.InformerManager.Stop(clusterName)
|
||||
return true
|
||||
}
|
||||
|
||||
if !util.IsClusterReady(&cls.Status) {
|
||||
klog.Warningf("cluster %s is notReady try to stop this cluster informer", clusterName)
|
||||
m.InformerManager.Stop(clusterName)
|
||||
return false
|
||||
}
|
||||
|
||||
if !m.InformerManager.IsManagerExist(clusterName) {
|
||||
klog.Info("Try to build informer manager for cluster ", clusterName)
|
||||
controlPlaneClient := gclient.NewForConfigOrDie(m.restConfig)
|
||||
clusterDynamicClient, err := util.NewClusterDynamicClientSet(clusterName, controlPlaneClient)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
_ = m.InformerManager.ForCluster(clusterName, clusterDynamicClient.DynamicClientSet, 0)
|
||||
}
|
||||
sci := m.InformerManager.GetSingleClusterManager(clusterName)
|
||||
|
||||
// Just trigger the informer to work
|
||||
_ = sci.Lister(provider.PodsGVR)
|
||||
_ = sci.Lister(provider.NodesGVR)
|
||||
|
||||
sci.Start()
|
||||
_ = sci.WaitForCacheSync()
|
||||
|
||||
return true
|
||||
}
|
|
@ -0,0 +1,35 @@
|
|||
package provider
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/metrics/pkg/apis/custom_metrics"
|
||||
"sigs.k8s.io/custom-metrics-apiserver/pkg/provider"
|
||||
)
|
||||
|
||||
// CustomMetricsProvider is a custom metrics provider
|
||||
type CustomMetricsProvider struct {
|
||||
}
|
||||
|
||||
// MakeCustomMetricsProvider creates a new custom metrics provider
|
||||
func MakeCustomMetricsProvider() *CustomMetricsProvider {
|
||||
return &CustomMetricsProvider{}
|
||||
}
|
||||
|
||||
// GetMetricByName will query metrics by name from member clusters and return the result
|
||||
func (c *CustomMetricsProvider) GetMetricByName(ctx context.Context, name types.NamespacedName, info provider.CustomMetricInfo, metricSelector labels.Selector) (*custom_metrics.MetricValue, error) {
|
||||
return nil, fmt.Errorf("karmada-metrics-adapter still not implement it")
|
||||
}
|
||||
|
||||
// GetMetricBySelector will query metrics by selector from member clusters and return the result
|
||||
func (c *CustomMetricsProvider) GetMetricBySelector(ctx context.Context, namespace string, selector labels.Selector, info provider.CustomMetricInfo, metricSelector labels.Selector) (*custom_metrics.MetricValueList, error) {
|
||||
return nil, fmt.Errorf("karmada-metrics-adapter still not implement it")
|
||||
}
|
||||
|
||||
// ListAllMetrics returns all metrics in all member clusters
|
||||
func (c *CustomMetricsProvider) ListAllMetrics() []provider.CustomMetricInfo {
|
||||
return []provider.CustomMetricInfo{}
|
||||
}
|
|
@ -0,0 +1,29 @@
|
|||
package provider
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/metrics/pkg/apis/external_metrics"
|
||||
"sigs.k8s.io/custom-metrics-apiserver/pkg/provider"
|
||||
)
|
||||
|
||||
// ExternalMetricsProvider is a custom metrics provider
|
||||
type ExternalMetricsProvider struct {
|
||||
}
|
||||
|
||||
// MakeExternalMetricsProvider creates a new custom metrics provider
|
||||
func MakeExternalMetricsProvider() *ExternalMetricsProvider {
|
||||
return &ExternalMetricsProvider{}
|
||||
}
|
||||
|
||||
// GetExternalMetric will query metrics by selector from member clusters and return the result
|
||||
func (c *ExternalMetricsProvider) GetExternalMetric(ctx context.Context, namespace string, metricSelector labels.Selector, info provider.ExternalMetricInfo) (*external_metrics.ExternalMetricValueList, error) {
|
||||
return nil, fmt.Errorf("karmada-metrics-adapter still not implement it")
|
||||
}
|
||||
|
||||
// ListAllExternalMetrics returns all metrics in all member clusters
|
||||
func (c *ExternalMetricsProvider) ListAllExternalMetrics() []provider.ExternalMetricInfo {
|
||||
return []provider.ExternalMetricInfo{}
|
||||
}
|
|
@ -0,0 +1,668 @@
|
|||
package provider
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/metrics/pkg/apis/metrics"
|
||||
metricsv1beta1 "k8s.io/metrics/pkg/apis/metrics/v1beta1"
|
||||
|
||||
clusterlister "github.com/karmada-io/karmada/pkg/generated/listers/cluster/v1alpha1"
|
||||
"github.com/karmada-io/karmada/pkg/util/fedinformer/genericmanager"
|
||||
"github.com/karmada-io/karmada/pkg/util/helper"
|
||||
)
|
||||
|
||||
const (
|
||||
// labelSelectorAnnotationInternal is the annotation used internal in karmada-metrics-adapter,
|
||||
// to record the selector specified by the user
|
||||
labelSelectorAnnotationInternal = "internal.karmada.io/selector"
|
||||
// namespaceSpecifiedAnnotation is the annotation used in karmada-metrics-adapter,
|
||||
// to record the namespace specified by the user
|
||||
namespaceSpecifiedAnnotation = "internal.karmada.io/namespace"
|
||||
// querySourceAnnotationKey is the annotation used in karmada-metrics-adapter to
|
||||
// record the query source cluster
|
||||
querySourceAnnotationKey = "resource.karmada.io/query-from-cluster"
|
||||
)
|
||||
|
||||
var (
|
||||
// podMetricsGVR is the gvr of pod metrics(v1beta1 version)
|
||||
podMetricsGVR = metricsv1beta1.SchemeGroupVersion.WithResource("pods")
|
||||
// nodeMetricsGVR is the gvr of node metrics(v1beta1 version)
|
||||
nodeMetricsGVR = metricsv1beta1.SchemeGroupVersion.WithResource("nodes")
|
||||
// PodsGVR is the gvr of pods
|
||||
PodsGVR = corev1.SchemeGroupVersion.WithResource("pods")
|
||||
// NodesGVR is the gvr of nodes
|
||||
NodesGVR = corev1.SchemeGroupVersion.WithResource("nodes")
|
||||
)
|
||||
|
||||
type queryResourceFromClustersFunc func(sci genericmanager.SingleClusterInformerManager, clusterName string) error
|
||||
type queryMetricsFromClustersFunc func(sci genericmanager.SingleClusterInformerManager, clusterName string) (interface{}, error)
|
||||
|
||||
// ResourceMetricsProvider is a resource metrics provider, to provide cpu/memory metrics
|
||||
type ResourceMetricsProvider struct {
|
||||
PodLister *PodLister
|
||||
NodeLister *NodeLister
|
||||
|
||||
clusterLister clusterlister.ClusterLister
|
||||
informerManager genericmanager.MultiClusterInformerManager
|
||||
}
|
||||
|
||||
// NewResourceMetricsProvider creates a new resource metrics provider
|
||||
func NewResourceMetricsProvider(clusterLister clusterlister.ClusterLister, informerManager genericmanager.MultiClusterInformerManager) *ResourceMetricsProvider {
|
||||
return &ResourceMetricsProvider{
|
||||
clusterLister: clusterLister,
|
||||
informerManager: informerManager,
|
||||
PodLister: NewPodLister(clusterLister, informerManager),
|
||||
NodeLister: NewNodeLister(clusterLister, informerManager),
|
||||
}
|
||||
}
|
||||
|
||||
// getMetricsParallel is a parallel func to query metrics from member clusters
|
||||
func (r *ResourceMetricsProvider) getMetricsParallel(resourceFunc queryResourceFromClustersFunc,
|
||||
metricsFunc queryMetricsFromClustersFunc) ([]interface{}, error) {
|
||||
clusters, err := r.clusterLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to list clusters: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// step 1. Find out the target clusters in lister cache
|
||||
var targetClusters []string
|
||||
for _, cluster := range clusters {
|
||||
sci := r.informerManager.GetSingleClusterManager(cluster.Name)
|
||||
if sci == nil {
|
||||
klog.Errorf("Failed to get cluster(%s) manager", cluster.Name)
|
||||
continue
|
||||
}
|
||||
err := resourceFunc(sci, cluster.Name)
|
||||
if err != nil {
|
||||
if !errors.IsNotFound(err) {
|
||||
klog.Errorf("Failed to query resource in cluster(%s): %v", cluster.Name, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
targetClusters = append(targetClusters, cluster.Name)
|
||||
}
|
||||
|
||||
var metrics []interface{}
|
||||
if len(targetClusters) == 0 {
|
||||
return metrics, nil
|
||||
}
|
||||
|
||||
// step 2. Query metrics from the filtered target clusters
|
||||
metricsChanel := make(chan interface{})
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for _, clusterName := range targetClusters {
|
||||
wg.Add(1)
|
||||
|
||||
go func(cluster string) {
|
||||
defer wg.Done()
|
||||
|
||||
sci := r.informerManager.GetSingleClusterManager(cluster)
|
||||
if sci == nil {
|
||||
klog.Errorf("Failed to get cluster(%s) manager", cluster)
|
||||
return
|
||||
}
|
||||
|
||||
metrics, err := metricsFunc(sci, cluster)
|
||||
if err != nil {
|
||||
if !errors.IsNotFound(err) {
|
||||
klog.Errorf("Failed to query metrics in cluster(%s): %v", cluster, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// If there are multiple metrics with same name, it's ok because it's an array instead of a map.
|
||||
// The HPA controller will calculate the average utilization with the array.
|
||||
metricsChanel <- metrics
|
||||
}(clusterName)
|
||||
}
|
||||
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(metricsChanel)
|
||||
}()
|
||||
|
||||
for {
|
||||
data, ok := <-metricsChanel
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
metrics = append(metrics, data)
|
||||
}
|
||||
|
||||
return metrics, nil
|
||||
}
|
||||
|
||||
// queryPodMetricsByName queries metrics by pod name from target clusters
|
||||
func (r *ResourceMetricsProvider) queryPodMetricsByName(name, namespace string) ([]metrics.PodMetrics, error) {
|
||||
resourceQueryFunc := func(sci genericmanager.SingleClusterInformerManager, _ string) error {
|
||||
_, err := sci.Lister(PodsGVR).ByNamespace(namespace).Get(name)
|
||||
return err
|
||||
}
|
||||
metricsQueryFunc := func(sci genericmanager.SingleClusterInformerManager, _ string) (interface{}, error) {
|
||||
metrics, err := sci.GetClient().Resource(podMetricsGVR).
|
||||
Namespace(namespace).Get(context.Background(), name, metav1.GetOptions{})
|
||||
return metrics, err
|
||||
}
|
||||
|
||||
metricsQuery, err := r.getMetricsParallel(resourceQueryFunc, metricsQueryFunc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var podMetrics []metrics.PodMetrics
|
||||
for index := range metricsQuery {
|
||||
internalMetrics, err := metricsConvertV1beta1PodToInternalPod(*metricsQuery[index].(*unstructured.Unstructured))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
podMetrics = append(podMetrics, internalMetrics...)
|
||||
}
|
||||
|
||||
return podMetrics, nil
|
||||
}
|
||||
|
||||
// queryPodMetricsBySelector queries metrics by pod selector from target clusters
|
||||
func (r *ResourceMetricsProvider) queryPodMetricsBySelector(selector, namespace string) ([]metrics.PodMetrics, error) {
|
||||
labelSelector, err := labels.Parse(selector)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to parse label selector: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resourceQueryFunc := func(sci genericmanager.SingleClusterInformerManager, clusterName string) error {
|
||||
pods, err := sci.Lister(PodsGVR).ByNamespace(namespace).List(labelSelector)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to list pods in cluster(%s): %v", clusterName, err)
|
||||
return err
|
||||
}
|
||||
if len(pods) == 0 {
|
||||
return errors.NewNotFound(PodsGVR.GroupResource(), "")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
metricsQueryFunc := func(sci genericmanager.SingleClusterInformerManager, _ string) (interface{}, error) {
|
||||
metrics, err := sci.GetClient().Resource(podMetricsGVR).
|
||||
Namespace(namespace).List(context.Background(), metav1.ListOptions{
|
||||
LabelSelector: selector,
|
||||
})
|
||||
return metrics, err
|
||||
}
|
||||
|
||||
metricsQuery, err := r.getMetricsParallel(resourceQueryFunc, metricsQueryFunc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var podMetrics []metrics.PodMetrics
|
||||
for index := range metricsQuery {
|
||||
metricsData := metricsQuery[index].(*unstructured.UnstructuredList)
|
||||
internalMetrics, err := metricsConvertV1beta1PodToInternalPod(metricsData.Items...)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
podMetrics = append(podMetrics, internalMetrics...)
|
||||
}
|
||||
|
||||
return podMetrics, nil
|
||||
}
|
||||
|
||||
// queryNodeMetricsByName queries metrics by node name from target clusters
|
||||
func (r *ResourceMetricsProvider) queryNodeMetricsByName(name string) ([]metrics.NodeMetrics, error) {
|
||||
resourceQueryFunc := func(sci genericmanager.SingleClusterInformerManager, _ string) error {
|
||||
_, err := sci.Lister(NodesGVR).Get(name)
|
||||
return err
|
||||
}
|
||||
metricsQueryFunc := func(sci genericmanager.SingleClusterInformerManager, _ string) (interface{}, error) {
|
||||
metrics, err := sci.GetClient().Resource(nodeMetricsGVR).Get(context.Background(), name, metav1.GetOptions{})
|
||||
return metrics, err
|
||||
}
|
||||
|
||||
metricsQuery, err := r.getMetricsParallel(resourceQueryFunc, metricsQueryFunc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var nodeMetrics []metrics.NodeMetrics
|
||||
for index := range metricsQuery {
|
||||
internalMetrics, err := metricsConvertV1beta1NodeToInternalNode(*metricsQuery[index].(*unstructured.Unstructured))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
nodeMetrics = append(nodeMetrics, internalMetrics...)
|
||||
}
|
||||
|
||||
return nodeMetrics, nil
|
||||
}
|
||||
|
||||
// queryNodeMetricsBySelector queries metrics by node selector from target clusters
|
||||
func (r *ResourceMetricsProvider) queryNodeMetricsBySelector(selector string) ([]metrics.NodeMetrics, error) {
|
||||
labelSelector, err := labels.Parse(selector)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to parse label selector: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resourceQueryFunc := func(sci genericmanager.SingleClusterInformerManager, clusterName string) error {
|
||||
nodes, err := sci.Lister(NodesGVR).List(labelSelector)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to list pods in cluster(%s): %v", clusterName, err)
|
||||
return err
|
||||
}
|
||||
if len(nodes) == 0 {
|
||||
return errors.NewNotFound(PodsGVR.GroupResource(), "")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
metricsQueryFunc := func(sci genericmanager.SingleClusterInformerManager, _ string) (interface{}, error) {
|
||||
metrics, err := sci.GetClient().Resource(nodeMetricsGVR).List(context.Background(), metav1.ListOptions{
|
||||
LabelSelector: selector,
|
||||
})
|
||||
return metrics, err
|
||||
}
|
||||
|
||||
metricsQuery, err := r.getMetricsParallel(resourceQueryFunc, metricsQueryFunc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var nodeMetrics []metrics.NodeMetrics
|
||||
for index := range metricsQuery {
|
||||
metricsData := metricsQuery[index].(*unstructured.UnstructuredList)
|
||||
internalMetrics, err := metricsConvertV1beta1NodeToInternalNode(metricsData.Items...)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
nodeMetrics = append(nodeMetrics, internalMetrics...)
|
||||
}
|
||||
|
||||
return nodeMetrics, nil
|
||||
}
|
||||
|
||||
// GetPodMetrics queries metrics by the internal constructed pod
|
||||
func (r *ResourceMetricsProvider) GetPodMetrics(pods ...*metav1.PartialObjectMetadata) ([]metrics.PodMetrics, error) {
|
||||
var ret []metrics.PodMetrics
|
||||
if len(pods) == 0 {
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
podsKeyMap := map[string]string{}
|
||||
for _, pod := range pods {
|
||||
podKey := generateNamespaceNameKey(pod.Namespace, pod.Name)
|
||||
podsKeyMap[podKey] = pod.Annotations[querySourceAnnotationKey]
|
||||
}
|
||||
|
||||
var queryData []metrics.PodMetrics
|
||||
var err error
|
||||
// In the previous step, we construct the annotations, so it couldn't be nil
|
||||
if _, ok := pods[0].Annotations[labelSelectorAnnotationInternal]; ok {
|
||||
namespace := pods[0].Annotations[namespaceSpecifiedAnnotation]
|
||||
selectorStr := pods[0].Annotations[labelSelectorAnnotationInternal]
|
||||
queryData, err = r.queryPodMetricsBySelector(selectorStr, namespace)
|
||||
} else {
|
||||
queryData, err = r.queryPodMetricsByName(pods[0].Name, pods[0].Namespace)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, i := range queryData {
|
||||
podKey := generateNamespaceNameKey(i.Namespace, i.Name)
|
||||
if queryCluster, ok := podsKeyMap[podKey]; ok {
|
||||
if i.Annotations == nil {
|
||||
i.Annotations = make(map[string]string)
|
||||
}
|
||||
i.Annotations[querySourceAnnotationKey] = queryCluster
|
||||
ret = append(ret, i)
|
||||
}
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// GetNodeMetrics queries metrics by the internal constructed node
|
||||
func (r *ResourceMetricsProvider) GetNodeMetrics(nodes ...*corev1.Node) ([]metrics.NodeMetrics, error) {
|
||||
var ret []metrics.NodeMetrics
|
||||
if len(nodes) == 0 {
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
nodesKeyMap := map[string]string{}
|
||||
for _, node := range nodes {
|
||||
nodeKey := generateNamespaceNameKey("", node.Name)
|
||||
nodesKeyMap[nodeKey] = node.Annotations[querySourceAnnotationKey]
|
||||
}
|
||||
|
||||
var queryData []metrics.NodeMetrics
|
||||
var err error
|
||||
// In the previous step, we construct the annotations, so it couldn't be nil
|
||||
if _, ok := nodes[0].Annotations[labelSelectorAnnotationInternal]; ok {
|
||||
selectorStr := nodes[0].Annotations[labelSelectorAnnotationInternal]
|
||||
queryData, err = r.queryNodeMetricsBySelector(selectorStr)
|
||||
} else {
|
||||
queryData, err = r.queryNodeMetricsByName(nodes[0].Name)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, i := range queryData {
|
||||
nodeKey := generateNamespaceNameKey(i.Namespace, i.Name)
|
||||
if cluster, ok := nodesKeyMap[nodeKey]; ok {
|
||||
if i.Annotations == nil {
|
||||
i.Annotations = make(map[string]string)
|
||||
}
|
||||
i.Annotations[querySourceAnnotationKey] = cluster
|
||||
ret = append(ret, i)
|
||||
}
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// PodLister is an internal lister for pods
|
||||
type PodLister struct {
|
||||
namespaceSpecified string
|
||||
clusterLister clusterlister.ClusterLister
|
||||
informerManager genericmanager.MultiClusterInformerManager
|
||||
}
|
||||
|
||||
// NewPodLister creates an internal new PodLister
|
||||
func NewPodLister(clusterLister clusterlister.ClusterLister, informerManager genericmanager.MultiClusterInformerManager) *PodLister {
|
||||
return &PodLister{
|
||||
clusterLister: clusterLister,
|
||||
informerManager: informerManager,
|
||||
}
|
||||
}
|
||||
|
||||
// List returns the internal constructed pod with label selector info
|
||||
func (p *PodLister) List(selector labels.Selector) (ret []runtime.Object, err error) {
|
||||
klog.V(4).Infof("List query pods with selector: %s", selector.String())
|
||||
|
||||
clusters, err := p.clusterLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, cluster := range clusters {
|
||||
sci := p.informerManager.GetSingleClusterManager(cluster.Name)
|
||||
if sci == nil {
|
||||
klog.Errorf("Failed to get SingleClusterInformerManager for cluster(%s)", cluster.Name)
|
||||
continue
|
||||
}
|
||||
pods, err := sci.Lister(PodsGVR).ByNamespace(p.namespaceSpecified).List(selector)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to list pods from cluster(%s) in namespace(%s): %v", cluster.Name, p.namespaceSpecified, err)
|
||||
return nil, err
|
||||
}
|
||||
for _, pod := range pods {
|
||||
podTyped := &corev1.Pod{}
|
||||
err = helper.ConvertToTypedObject(pod, podTyped)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to convert to typed object: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
podPartial := p.convertToPodPartialData(podTyped, selector.String(), true)
|
||||
podPartial.Annotations[querySourceAnnotationKey] = cluster.Name
|
||||
ret = append(ret, podPartial)
|
||||
}
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// convertToPodPartialData converts pod to partial data
|
||||
func (p *PodLister) convertToPodPartialData(pod *corev1.Pod, selector string, labelSelector bool) *metav1.PartialObjectMetadata {
|
||||
ret := &metav1.PartialObjectMetadata{
|
||||
TypeMeta: pod.TypeMeta,
|
||||
ObjectMeta: pod.ObjectMeta,
|
||||
}
|
||||
if ret.Annotations == nil {
|
||||
ret.Annotations = map[string]string{}
|
||||
}
|
||||
|
||||
//If user sets this annotation, we need to remove it to avoid parsing wrong next.
|
||||
if !labelSelector {
|
||||
delete(ret.Annotations, namespaceSpecifiedAnnotation)
|
||||
delete(ret.Annotations, labelSelectorAnnotationInternal)
|
||||
return ret
|
||||
}
|
||||
ret.Annotations[labelSelectorAnnotationInternal] = selector
|
||||
ret.Annotations[namespaceSpecifiedAnnotation] = p.namespaceSpecified
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
// Get returns the internal constructed pod with name info
|
||||
func (p *PodLister) Get(name string) (runtime.Object, error) {
|
||||
klog.V(4).Infof("Query pod in namespace(%s) with name:%s", p.namespaceSpecified, name)
|
||||
|
||||
clusters, err := p.clusterLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var podPartial *metav1.PartialObjectMetadata
|
||||
for _, cluster := range clusters {
|
||||
sci := p.informerManager.GetSingleClusterManager(cluster.Name)
|
||||
if sci == nil {
|
||||
klog.Errorf("Failed to get SingleClusterInformerManager for cluster(%s)", cluster.Name)
|
||||
continue
|
||||
}
|
||||
pod, err := sci.Lister(PodsGVR).ByNamespace(p.namespaceSpecified).Get(name)
|
||||
if err != nil {
|
||||
if !errors.IsNotFound(err) {
|
||||
klog.Errorf("Failed to get pod from clsuster(%s) in namespace(%s): %v", cluster.Name, p.namespaceSpecified, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if podPartial != nil {
|
||||
err := fmt.Errorf("the pod(%s) found in more than one clusters", name)
|
||||
return nil, errors.NewConflict(PodsGVR.GroupResource(), name, err)
|
||||
}
|
||||
podTyped := &corev1.Pod{}
|
||||
err = helper.ConvertToTypedObject(pod, podTyped)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to convert to typed object: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
podPartial = p.convertToPodPartialData(podTyped, "", false)
|
||||
podPartial.Annotations[querySourceAnnotationKey] = cluster.Name
|
||||
}
|
||||
|
||||
if podPartial != nil {
|
||||
return podPartial, nil
|
||||
}
|
||||
|
||||
return nil, errors.NewNotFound(PodsGVR.GroupResource(), name)
|
||||
}
|
||||
|
||||
// ByNamespace returns the pod lister with namespace info
|
||||
func (p *PodLister) ByNamespace(namespace string) cache.GenericNamespaceLister {
|
||||
klog.V(4).Infof("Query Pods in namespace: %s", namespace)
|
||||
|
||||
listerCopy := &PodLister{
|
||||
clusterLister: p.clusterLister,
|
||||
informerManager: p.informerManager,
|
||||
}
|
||||
listerCopy.namespaceSpecified = namespace
|
||||
return listerCopy
|
||||
}
|
||||
|
||||
// NodeLister is an internal lister for nodes
|
||||
type NodeLister struct {
|
||||
clusterLister clusterlister.ClusterLister
|
||||
informerManager genericmanager.MultiClusterInformerManager
|
||||
}
|
||||
|
||||
// NewNodeLister creates an internal new NodeLister
|
||||
func NewNodeLister(clusterLister clusterlister.ClusterLister, informerManager genericmanager.MultiClusterInformerManager) *NodeLister {
|
||||
return &NodeLister{
|
||||
clusterLister: clusterLister,
|
||||
informerManager: informerManager,
|
||||
}
|
||||
}
|
||||
|
||||
// List returns the internal constructed node with label selector info
|
||||
func (n *NodeLister) List(selector labels.Selector) (ret []*corev1.Node, err error) {
|
||||
klog.V(4).Infof("Query node metrics with selector: %s", selector.String())
|
||||
|
||||
clusters, err := n.clusterLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, cluster := range clusters {
|
||||
sci := n.informerManager.GetSingleClusterManager(cluster.Name)
|
||||
if sci == nil {
|
||||
klog.Errorf("Failed to get SingleClusterInformerManager for cluster(%s)", cluster.Name)
|
||||
continue
|
||||
}
|
||||
nodes, err := sci.Lister(NodesGVR).List(selector)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to list nodes from cluster(%s): %v", cluster.Name, err)
|
||||
return nil, err
|
||||
}
|
||||
for index := range nodes {
|
||||
nodeTyped := &corev1.Node{}
|
||||
err = helper.ConvertToTypedObject(nodes[index], nodeTyped)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to convert to typed object: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
if nodeTyped.Annotations == nil {
|
||||
nodeTyped.Annotations = map[string]string{}
|
||||
}
|
||||
|
||||
//If user sets this annotation, we need to reset it.
|
||||
nodeTyped.Annotations[labelSelectorAnnotationInternal] = selector.String()
|
||||
nodeTyped.Annotations[querySourceAnnotationKey] = cluster.Name
|
||||
ret = append(ret, nodeTyped)
|
||||
}
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// Get returns the internal constructed node with name info
|
||||
func (n *NodeLister) Get(name string) (*corev1.Node, error) {
|
||||
klog.V(4).Infof("Query node metrics with name:%s", name)
|
||||
|
||||
clusters, err := n.clusterLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var nodeTyped *corev1.Node
|
||||
for _, cluster := range clusters {
|
||||
sci := n.informerManager.GetSingleClusterManager(cluster.Name)
|
||||
if sci == nil {
|
||||
klog.Errorf("Failed to get SingleClusterInformerManager for cluster(%s)", cluster.Name)
|
||||
continue
|
||||
}
|
||||
node, err := sci.Lister(NodesGVR).Get(name)
|
||||
if err != nil {
|
||||
if !errors.IsNotFound(err) {
|
||||
klog.Errorf("Failed to get node from cluster(%s):%v", cluster.Name, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if nodeTyped != nil {
|
||||
err := fmt.Errorf("the node(%s) found in more than one clusters", name)
|
||||
return nil, errors.NewConflict(NodesGVR.GroupResource(), name, err)
|
||||
}
|
||||
|
||||
err = helper.ConvertToTypedObject(node, nodeTyped)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to convert to typed object: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
if nodeTyped.Annotations == nil {
|
||||
nodeTyped.Annotations = map[string]string{}
|
||||
}
|
||||
|
||||
//If user sets this annotation, we need to remove it to avoid parsing wrong next.
|
||||
delete(nodeTyped.Annotations, labelSelectorAnnotationInternal)
|
||||
nodeTyped.Annotations[querySourceAnnotationKey] = cluster.Name
|
||||
}
|
||||
|
||||
if nodeTyped != nil {
|
||||
return nodeTyped, nil
|
||||
}
|
||||
|
||||
return nil, errors.NewNotFound(NodesGVR.GroupResource(), name)
|
||||
}
|
||||
|
||||
// metricsConvertV1beta1PodToInternalPod converts metricsv1beta1.PodMetrics to metrics.PodMetrics
|
||||
func metricsConvertV1beta1PodToInternalPod(objs ...unstructured.Unstructured) ([]metrics.PodMetrics, error) {
|
||||
var podMetricsV1beta1 []metricsv1beta1.PodMetrics
|
||||
|
||||
for index := range objs {
|
||||
single := metricsv1beta1.PodMetrics{}
|
||||
if err := helper.ConvertToTypedObject(&objs[index], &single); err != nil {
|
||||
klog.Errorf("Failed to convert to typed object: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
podMetricsV1beta1 = append(podMetricsV1beta1, single)
|
||||
}
|
||||
|
||||
var podMetricsInternal []metrics.PodMetrics
|
||||
for index := range podMetricsV1beta1 {
|
||||
single := metrics.PodMetrics{}
|
||||
if err := metricsv1beta1.Convert_v1beta1_PodMetrics_To_metrics_PodMetrics(&podMetricsV1beta1[index], &single, nil); err != nil {
|
||||
klog.Errorf("Failed to convert to typed object: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
podMetricsInternal = append(podMetricsInternal, single)
|
||||
}
|
||||
|
||||
return podMetricsInternal, nil
|
||||
}
|
||||
|
||||
// metricsConvertV1beta1NodeToInternalNode converts metricsv1beta1.NodeMetrics to metrics.NodeMetrics
|
||||
func metricsConvertV1beta1NodeToInternalNode(objs ...unstructured.Unstructured) ([]metrics.NodeMetrics, error) {
|
||||
var nodeMetricsV1beta1 []metricsv1beta1.NodeMetrics
|
||||
|
||||
for index := range objs {
|
||||
single := metricsv1beta1.NodeMetrics{}
|
||||
if err := helper.ConvertToTypedObject(&objs[index], &single); err != nil {
|
||||
klog.Errorf("Failed to convert to typed object: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
nodeMetricsV1beta1 = append(nodeMetricsV1beta1, single)
|
||||
}
|
||||
|
||||
var nodeMetricsInternal []metrics.NodeMetrics
|
||||
for index := range nodeMetricsV1beta1 {
|
||||
single := metrics.NodeMetrics{}
|
||||
if err := metricsv1beta1.Convert_v1beta1_NodeMetrics_To_metrics_NodeMetrics(&nodeMetricsV1beta1[index], &single, nil); err != nil {
|
||||
klog.Errorf("Failed to convert to typed object: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
nodeMetricsInternal = append(nodeMetricsInternal, single)
|
||||
}
|
||||
|
||||
return nodeMetricsInternal, nil
|
||||
}
|
||||
|
||||
// generateNamespaceNameKey generates namespace/name key
|
||||
func generateNamespaceNameKey(namespace, name string) string {
|
||||
return namespace + "/" + name
|
||||
}
|
|
@ -6,6 +6,7 @@ import (
|
|||
"reflect"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/equality"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -190,3 +191,14 @@ func IsClusterIdentifyUnique(controlPlaneClient karmadaclientset.Interface, id s
|
|||
}
|
||||
return true, "", nil
|
||||
}
|
||||
|
||||
// ClusterAccessCredentialChanged checks whether the cluster a ccess credential changed
|
||||
func ClusterAccessCredentialChanged(newSpec, oldSpec clusterv1alpha1.ClusterSpec) bool {
|
||||
if oldSpec.APIEndpoint == newSpec.APIEndpoint &&
|
||||
oldSpec.InsecureSkipTLSVerification == newSpec.InsecureSkipTLSVerification &&
|
||||
oldSpec.ProxyURL == newSpec.ProxyURL &&
|
||||
equality.Semantic.DeepEqual(oldSpec.ProxyHeader, newSpec.ProxyHeader) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
|
|
@ -1,10 +1,21 @@
|
|||
# Change history of go-restful
|
||||
|
||||
## [v3.9.0] - 20221-07-21
|
||||
## [v3.10.1] - 2022-11-19
|
||||
|
||||
- fix broken 3.10.0 by using path package for joining paths
|
||||
|
||||
## [v3.10.0] - 2022-10-11 - BROKEN
|
||||
|
||||
- changed tokenizer to match std route match behavior; do not trimright the path (#511)
|
||||
- Add MIME_ZIP (#512)
|
||||
- Add MIME_ZIP and HEADER_ContentDisposition (#513)
|
||||
- Changed how to get query parameter issue #510
|
||||
|
||||
## [v3.9.0] - 2022-07-21
|
||||
|
||||
- add support for http.Handler implementations to work as FilterFunction, issue #504 (thanks to https://github.com/ggicci)
|
||||
|
||||
## [v3.8.0] - 20221-06-06
|
||||
## [v3.8.0] - 2022-06-06
|
||||
|
||||
- use exact matching of allowed domain entries, issue #489 (#493)
|
||||
- this changes fixes [security] Authorization Bypass Through User-Controlled Key
|
||||
|
|
|
@ -7,12 +7,14 @@ package restful
|
|||
const (
|
||||
MIME_XML = "application/xml" // Accept or Content-Type used in Consumes() and/or Produces()
|
||||
MIME_JSON = "application/json" // Accept or Content-Type used in Consumes() and/or Produces()
|
||||
MIME_ZIP = "application/zip" // Accept or Content-Type used in Consumes() and/or Produces()
|
||||
MIME_OCTET = "application/octet-stream" // If Content-Type is not present in request, use the default
|
||||
|
||||
HEADER_Allow = "Allow"
|
||||
HEADER_Accept = "Accept"
|
||||
HEADER_Origin = "Origin"
|
||||
HEADER_ContentType = "Content-Type"
|
||||
HEADER_ContentDisposition = "Content-Disposition"
|
||||
HEADER_LastModified = "Last-Modified"
|
||||
HEADER_AcceptEncoding = "Accept-Encoding"
|
||||
HEADER_ContentEncoding = "Content-Encoding"
|
||||
|
|
|
@ -31,7 +31,8 @@ func NewRequest(httpRequest *http.Request) *Request {
|
|||
// a "Unable to unmarshal content of type:" response is returned.
|
||||
// Valid values are restful.MIME_JSON and restful.MIME_XML
|
||||
// Example:
|
||||
// restful.DefaultRequestContentType(restful.MIME_JSON)
|
||||
//
|
||||
// restful.DefaultRequestContentType(restful.MIME_JSON)
|
||||
func DefaultRequestContentType(mime string) {
|
||||
defaultRequestContentType = mime
|
||||
}
|
||||
|
@ -48,7 +49,7 @@ func (r *Request) PathParameters() map[string]string {
|
|||
|
||||
// QueryParameter returns the (first) Query parameter value by its name
|
||||
func (r *Request) QueryParameter(name string) string {
|
||||
return r.Request.FormValue(name)
|
||||
return r.Request.URL.Query().Get(name)
|
||||
}
|
||||
|
||||
// QueryParameters returns the all the query parameters values by name
|
||||
|
|
|
@ -109,6 +109,9 @@ func (r *Response) EntityWriter() (EntityReaderWriter, bool) {
|
|||
if DefaultResponseMimeType == MIME_XML {
|
||||
return entityAccessRegistry.accessorAt(MIME_XML)
|
||||
}
|
||||
if DefaultResponseMimeType == MIME_ZIP {
|
||||
return entityAccessRegistry.accessorAt(MIME_ZIP)
|
||||
}
|
||||
// Fallback to whatever the route says it can produce.
|
||||
// https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
|
||||
for _, each := range r.routeProduces {
|
||||
|
|
|
@ -164,7 +164,7 @@ func tokenizePath(path string) []string {
|
|||
if "/" == path {
|
||||
return nil
|
||||
}
|
||||
return strings.Split(strings.Trim(path, "/"), "/")
|
||||
return strings.Split(strings.TrimLeft(path, "/"), "/")
|
||||
}
|
||||
|
||||
// for debugging
|
||||
|
@ -176,3 +176,5 @@ func (r *Route) String() string {
|
|||
func (r *Route) EnableContentEncoding(enabled bool) {
|
||||
r.contentEncodingEnabled = &enabled
|
||||
}
|
||||
|
||||
var TrimRightSlashEnabled = false
|
||||
|
|
|
@ -7,6 +7,7 @@ package restful
|
|||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
@ -46,11 +47,12 @@ type RouteBuilder struct {
|
|||
// Do evaluates each argument with the RouteBuilder itself.
|
||||
// This allows you to follow DRY principles without breaking the fluent programming style.
|
||||
// Example:
|
||||
// ws.Route(ws.DELETE("/{name}").To(t.deletePerson).Do(Returns200, Returns500))
|
||||
//
|
||||
// func Returns500(b *RouteBuilder) {
|
||||
// b.Returns(500, "Internal Server Error", restful.ServiceError{})
|
||||
// }
|
||||
// ws.Route(ws.DELETE("/{name}").To(t.deletePerson).Do(Returns200, Returns500))
|
||||
//
|
||||
// func Returns500(b *RouteBuilder) {
|
||||
// b.Returns(500, "Internal Server Error", restful.ServiceError{})
|
||||
// }
|
||||
func (b *RouteBuilder) Do(oneArgBlocks ...func(*RouteBuilder)) *RouteBuilder {
|
||||
for _, each := range oneArgBlocks {
|
||||
each(b)
|
||||
|
@ -352,7 +354,7 @@ func (b *RouteBuilder) Build() Route {
|
|||
}
|
||||
|
||||
func concatPath(path1, path2 string) string {
|
||||
return strings.TrimRight(path1, "/") + "/" + strings.TrimLeft(path2, "/")
|
||||
return path.Join(path1, path2)
|
||||
}
|
||||
|
||||
var anonymousFuncCount int32
|
||||
|
|
|
@ -1,15 +0,0 @@
|
|||
after_success:
|
||||
- bash <(curl -s https://codecov.io/bash)
|
||||
go:
|
||||
- 1.14.x
|
||||
- 1.15.x
|
||||
install:
|
||||
- GO111MODULE=off go get -u gotest.tools/gotestsum
|
||||
env:
|
||||
- GO111MODULE=on
|
||||
language: go
|
||||
notifications:
|
||||
slack:
|
||||
secure: a5VgoiwB1G/AZqzmephPZIhEB9avMlsWSlVnM1dSAtYAwdrQHGTQxAmpOxYIoSPDhWNN5bfZmjd29++UlTwLcHSR+e0kJhH6IfDlsHj/HplNCJ9tyI0zYc7XchtdKgeMxMzBKCzgwFXGSbQGydXTliDNBo0HOzmY3cou/daMFTP60K+offcjS+3LRAYb1EroSRXZqrk1nuF/xDL3792DZUdPMiFR/L/Df6y74D6/QP4sTkTDFQitz4Wy/7jbsfj8dG6qK2zivgV6/l+w4OVjFkxVpPXogDWY10vVXNVynqxfJ7to2d1I9lNCHE2ilBCkWMIPdyJF7hjF8pKW+82yP4EzRh0vu8Xn0HT5MZpQxdRY/YMxNrWaG7SxsoEaO4q5uhgdzAqLYY3TRa7MjIK+7Ur+aqOeTXn6OKwVi0CjvZ6mIU3WUKSwiwkFZMbjRAkSb5CYwMEfGFO/z964xz83qGt6WAtBXNotqCQpTIiKtDHQeLOMfksHImCg6JLhQcWBVxamVgu0G3Pdh8Y6DyPnxraXY95+QDavbjqv7TeYT9T/FNnrkXaTTK0s4iWE5H4ACU0Qvz0wUYgfQrZv0/Hp7V17+rabUwnzYySHCy9SWX/7OV9Cfh31iMp9ZIffr76xmmThtOEqs8TrTtU6BWI3rWwvA9cXQipZTVtL0oswrGw=
|
||||
script:
|
||||
- gotestsum -f short-verbose -- -race -coverprofile=coverage.txt -covermode=atomic ./...
|
|
@ -1,8 +1,6 @@
|
|||
linters-settings:
|
||||
govet:
|
||||
check-shadowing: true
|
||||
golint:
|
||||
min-confidence: 0
|
||||
gocyclo:
|
||||
min-complexity: 30
|
||||
maligned:
|
||||
|
@ -12,6 +10,8 @@ linters-settings:
|
|||
goconst:
|
||||
min-len: 2
|
||||
min-occurrences: 4
|
||||
paralleltest:
|
||||
ignore-missing: true
|
||||
linters:
|
||||
enable-all: true
|
||||
disable:
|
||||
|
@ -39,3 +39,12 @@ linters:
|
|||
- nestif
|
||||
- godot
|
||||
- errorlint
|
||||
- varcheck
|
||||
- interfacer
|
||||
- deadcode
|
||||
- golint
|
||||
- ifshort
|
||||
- structcheck
|
||||
- nosnakecase
|
||||
- varnamelen
|
||||
- exhaustruct
|
||||
|
|
|
@ -1,24 +0,0 @@
|
|||
after_success:
|
||||
- bash <(curl -s https://codecov.io/bash)
|
||||
go:
|
||||
- 1.14.x
|
||||
- 1.x
|
||||
install:
|
||||
- go get gotest.tools/gotestsum
|
||||
jobs:
|
||||
include:
|
||||
# include linting job, but only for latest go version and amd64 arch
|
||||
- go: 1.x
|
||||
arch: amd64
|
||||
install:
|
||||
go get github.com/golangci/golangci-lint/cmd/golangci-lint
|
||||
script:
|
||||
- golangci-lint run --new-from-rev master
|
||||
env:
|
||||
- GO111MODULE=on
|
||||
language: go
|
||||
notifications:
|
||||
slack:
|
||||
secure: OpQG/36F7DSF00HLm9WZMhyqFCYYyYTsVDObW226cWiR8PWYiNfLZiSEvIzT1Gx4dDjhigKTIqcLhG34CkL5iNXDjm9Yyo2RYhQPlK8NErNqUEXuBqn4RqYHW48VGhEhOyDd4Ei0E2FN5ZbgpvHgtpkdZ6XDi64r3Ac89isP9aPHXQTuv2Jog6b4/OKKiUTftLcTIst0p4Cp3gqOJWf1wnoj+IadWiECNVQT6zb47IYjtyw6+uV8iUjTzdKcRB6Zc6b4Dq7JAg1Zd7Jfxkql3hlKp4PNlRf9Cy7y5iA3G7MLyg3FcPX5z2kmcyPt2jOTRMBWUJ5zIQpOxizAcN8WsT3WWBL5KbuYK6k0PzujrIDLqdxGpNmjkkMfDBT9cKmZpm2FdW+oZgPFJP+oKmAo4u4KJz/vjiPTXgQlN5bmrLuRMCp+AwC5wkIohTqWZVPE2TK6ZSnMYcg/W39s+RP/9mJoyryAvPSpBOLTI+biCgaUCTOAZxNTWpMFc3tPYntc41WWkdKcooZ9JA5DwfcaVFyTGQ3YXz+HvX6G1z/gW0Q/A4dBi9mj2iE1xm7tRTT+4VQ2AXFvSEI1HJpfPgYnwAtwOD1v3Qm2EUHk9sCdtEDR4wVGEPIVn44GnwFMnGKx9JWppMPYwFu3SVDdHt+E+LOlhZUply11Aa+IVrT2KUQ=
|
||||
script:
|
||||
- gotestsum -f short-verbose -- -race -coverprofile=coverage.txt -covermode=atomic ./...
|
|
@ -7,8 +7,8 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
defaultHttpPort = ":80"
|
||||
defaultHttpsPort = ":443"
|
||||
defaultHTTPPort = ":80"
|
||||
defaultHTTPSPort = ":443"
|
||||
)
|
||||
|
||||
// Regular expressions used by the normalizations
|
||||
|
@ -18,13 +18,14 @@ var rxDupSlashes = regexp.MustCompile(`/{2,}`)
|
|||
// NormalizeURL will normalize the specified URL
|
||||
// This was added to replace a previous call to the no longer maintained purell library:
|
||||
// The call that was used looked like the following:
|
||||
// url.Parse(purell.NormalizeURL(parsed, purell.FlagsSafe|purell.FlagRemoveDuplicateSlashes))
|
||||
//
|
||||
// url.Parse(purell.NormalizeURL(parsed, purell.FlagsSafe|purell.FlagRemoveDuplicateSlashes))
|
||||
//
|
||||
// To explain all that was included in the call above, purell.FlagsSafe was really just the following:
|
||||
// - FlagLowercaseScheme
|
||||
// - FlagLowercaseHost
|
||||
// - FlagRemoveDefaultPort
|
||||
// - FlagRemoveDuplicateSlashes (and this was mixed in with the |)
|
||||
// - FlagLowercaseScheme
|
||||
// - FlagLowercaseHost
|
||||
// - FlagRemoveDefaultPort
|
||||
// - FlagRemoveDuplicateSlashes (and this was mixed in with the |)
|
||||
func NormalizeURL(u *url.URL) {
|
||||
lowercaseScheme(u)
|
||||
lowercaseHost(u)
|
||||
|
@ -48,7 +49,7 @@ func removeDefaultPort(u *url.URL) {
|
|||
if len(u.Host) > 0 {
|
||||
scheme := strings.ToLower(u.Scheme)
|
||||
u.Host = rxPort.ReplaceAllStringFunc(u.Host, func(val string) string {
|
||||
if (scheme == "http" && val == defaultHttpPort) || (scheme == "https" && val == defaultHttpsPort) {
|
||||
if (scheme == "http" && val == defaultHTTPPort) || (scheme == "https" && val == defaultHTTPSPort) {
|
||||
return ""
|
||||
}
|
||||
return val
|
||||
|
|
|
@ -8,7 +8,6 @@ import (
|
|||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"runtime"
|
||||
|
@ -141,12 +140,11 @@ func CallerInfo() []string {
|
|||
}
|
||||
|
||||
parts := strings.Split(file, "/")
|
||||
file = parts[len(parts)-1]
|
||||
if len(parts) > 1 {
|
||||
filename := parts[len(parts)-1]
|
||||
dir := parts[len(parts)-2]
|
||||
if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" {
|
||||
path, _ := filepath.Abs(file)
|
||||
callers = append(callers, fmt.Sprintf("%s:%d", path, line))
|
||||
if (dir != "assert" && dir != "mock" && dir != "require") || filename == "mock_test.go" {
|
||||
callers = append(callers, fmt.Sprintf("%s:%d", file, line))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -530,7 +528,7 @@ func isNil(object interface{}) bool {
|
|||
[]reflect.Kind{
|
||||
reflect.Chan, reflect.Func,
|
||||
reflect.Interface, reflect.Map,
|
||||
reflect.Ptr, reflect.Slice},
|
||||
reflect.Ptr, reflect.Slice, reflect.UnsafePointer},
|
||||
kind)
|
||||
|
||||
if isNilableKind && value.IsNil() {
|
||||
|
@ -818,49 +816,44 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok
|
|||
return true // we consider nil to be equal to the nil set
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
ok = false
|
||||
}
|
||||
}()
|
||||
|
||||
listKind := reflect.TypeOf(list).Kind()
|
||||
subsetKind := reflect.TypeOf(subset).Kind()
|
||||
|
||||
if listKind != reflect.Array && listKind != reflect.Slice && listKind != reflect.Map {
|
||||
return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...)
|
||||
}
|
||||
|
||||
subsetKind := reflect.TypeOf(subset).Kind()
|
||||
if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map {
|
||||
return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...)
|
||||
}
|
||||
|
||||
subsetValue := reflect.ValueOf(subset)
|
||||
if subsetKind == reflect.Map && listKind == reflect.Map {
|
||||
listValue := reflect.ValueOf(list)
|
||||
subsetKeys := subsetValue.MapKeys()
|
||||
subsetMap := reflect.ValueOf(subset)
|
||||
actualMap := reflect.ValueOf(list)
|
||||
|
||||
for i := 0; i < len(subsetKeys); i++ {
|
||||
subsetKey := subsetKeys[i]
|
||||
subsetElement := subsetValue.MapIndex(subsetKey).Interface()
|
||||
listElement := listValue.MapIndex(subsetKey).Interface()
|
||||
for _, k := range subsetMap.MapKeys() {
|
||||
ev := subsetMap.MapIndex(k)
|
||||
av := actualMap.MapIndex(k)
|
||||
|
||||
if !ObjectsAreEqual(subsetElement, listElement) {
|
||||
return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, subsetElement), msgAndArgs...)
|
||||
if !av.IsValid() {
|
||||
return Fail(t, fmt.Sprintf("%#v does not contain %#v", list, subset), msgAndArgs...)
|
||||
}
|
||||
if !ObjectsAreEqual(ev.Interface(), av.Interface()) {
|
||||
return Fail(t, fmt.Sprintf("%#v does not contain %#v", list, subset), msgAndArgs...)
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
for i := 0; i < subsetValue.Len(); i++ {
|
||||
element := subsetValue.Index(i).Interface()
|
||||
subsetList := reflect.ValueOf(subset)
|
||||
for i := 0; i < subsetList.Len(); i++ {
|
||||
element := subsetList.Index(i).Interface()
|
||||
ok, found := containsElement(list, element)
|
||||
if !ok {
|
||||
return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...)
|
||||
return Fail(t, fmt.Sprintf("%#v could not be applied builtin len()", list), msgAndArgs...)
|
||||
}
|
||||
if !found {
|
||||
return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, element), msgAndArgs...)
|
||||
return Fail(t, fmt.Sprintf("%#v does not contain %#v", list, element), msgAndArgs...)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -879,34 +872,28 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{})
|
|||
return Fail(t, "nil is the empty set which is a subset of every set", msgAndArgs...)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
ok = false
|
||||
}
|
||||
}()
|
||||
|
||||
listKind := reflect.TypeOf(list).Kind()
|
||||
subsetKind := reflect.TypeOf(subset).Kind()
|
||||
|
||||
if listKind != reflect.Array && listKind != reflect.Slice && listKind != reflect.Map {
|
||||
return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...)
|
||||
}
|
||||
|
||||
subsetKind := reflect.TypeOf(subset).Kind()
|
||||
if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map {
|
||||
return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...)
|
||||
}
|
||||
|
||||
subsetValue := reflect.ValueOf(subset)
|
||||
if subsetKind == reflect.Map && listKind == reflect.Map {
|
||||
listValue := reflect.ValueOf(list)
|
||||
subsetKeys := subsetValue.MapKeys()
|
||||
subsetMap := reflect.ValueOf(subset)
|
||||
actualMap := reflect.ValueOf(list)
|
||||
|
||||
for i := 0; i < len(subsetKeys); i++ {
|
||||
subsetKey := subsetKeys[i]
|
||||
subsetElement := subsetValue.MapIndex(subsetKey).Interface()
|
||||
listElement := listValue.MapIndex(subsetKey).Interface()
|
||||
for _, k := range subsetMap.MapKeys() {
|
||||
ev := subsetMap.MapIndex(k)
|
||||
av := actualMap.MapIndex(k)
|
||||
|
||||
if !ObjectsAreEqual(subsetElement, listElement) {
|
||||
if !av.IsValid() {
|
||||
return true
|
||||
}
|
||||
if !ObjectsAreEqual(ev.Interface(), av.Interface()) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
@ -914,8 +901,9 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{})
|
|||
return Fail(t, fmt.Sprintf("%q is a subset of %q", subset, list), msgAndArgs...)
|
||||
}
|
||||
|
||||
for i := 0; i < subsetValue.Len(); i++ {
|
||||
element := subsetValue.Index(i).Interface()
|
||||
subsetList := reflect.ValueOf(subset)
|
||||
for i := 0; i < subsetList.Len(); i++ {
|
||||
element := subsetList.Index(i).Interface()
|
||||
ok, found := containsElement(list, element)
|
||||
if !ok {
|
||||
return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...)
|
||||
|
|
|
@ -218,16 +218,22 @@ func (c *Call) Unset() *Call {
|
|||
|
||||
foundMatchingCall := false
|
||||
|
||||
for i, call := range c.Parent.ExpectedCalls {
|
||||
// in-place filter slice for calls to be removed - iterate from 0'th to last skipping unnecessary ones
|
||||
var index int // write index
|
||||
for _, call := range c.Parent.ExpectedCalls {
|
||||
if call.Method == c.Method {
|
||||
_, diffCount := call.Arguments.Diff(c.Arguments)
|
||||
if diffCount == 0 {
|
||||
foundMatchingCall = true
|
||||
// Remove from ExpectedCalls
|
||||
c.Parent.ExpectedCalls = append(c.Parent.ExpectedCalls[:i], c.Parent.ExpectedCalls[i+1:]...)
|
||||
// Remove from ExpectedCalls - just skip it
|
||||
continue
|
||||
}
|
||||
}
|
||||
c.Parent.ExpectedCalls[index] = call
|
||||
index++
|
||||
}
|
||||
// trim slice up to last copied index
|
||||
c.Parent.ExpectedCalls = c.Parent.ExpectedCalls[:index]
|
||||
|
||||
if !foundMatchingCall {
|
||||
unlockOnce.Do(c.unlock)
|
||||
|
|
|
@ -70,11 +70,14 @@ func SetLogger(logger logr.Logger) {
|
|||
// routing log entries through klogr into klog and then into the actual Logger
|
||||
// backend.
|
||||
func SetLoggerWithOptions(logger logr.Logger, opts ...LoggerOption) {
|
||||
logging.logger = &logger
|
||||
logging.loggerOptions = loggerOptions{}
|
||||
for _, opt := range opts {
|
||||
opt(&logging.loggerOptions)
|
||||
}
|
||||
logging.logger = &logWriter{
|
||||
Logger: logger,
|
||||
writeKlogBuffer: logging.loggerOptions.writeKlogBuffer,
|
||||
}
|
||||
}
|
||||
|
||||
// ContextualLogger determines whether the logger passed to
|
||||
|
@ -93,6 +96,22 @@ func FlushLogger(flush func()) LoggerOption {
|
|||
}
|
||||
}
|
||||
|
||||
// WriteKlogBuffer sets a callback that will be invoked by klog to write output
|
||||
// produced by non-structured log calls like Infof.
|
||||
//
|
||||
// The buffer will contain exactly the same data that klog normally would write
|
||||
// into its own output stream(s). In particular this includes the header, if
|
||||
// klog is configured to write one. The callback then can divert that data into
|
||||
// its own output streams. The buffer may or may not end in a line break.
|
||||
//
|
||||
// Without such a callback, klog will call the logger's Info or Error method
|
||||
// with just the message string (i.e. no header).
|
||||
func WriteKlogBuffer(write func([]byte)) LoggerOption {
|
||||
return func(o *loggerOptions) {
|
||||
o.writeKlogBuffer = write
|
||||
}
|
||||
}
|
||||
|
||||
// LoggerOption implements the functional parameter paradigm for
|
||||
// SetLoggerWithOptions.
|
||||
type LoggerOption func(o *loggerOptions)
|
||||
|
@ -100,6 +119,13 @@ type LoggerOption func(o *loggerOptions)
|
|||
type loggerOptions struct {
|
||||
contextualLogger bool
|
||||
flush func()
|
||||
writeKlogBuffer func([]byte)
|
||||
}
|
||||
|
||||
// logWriter combines a logger (always set) with a write callback (optional).
|
||||
type logWriter struct {
|
||||
Logger
|
||||
writeKlogBuffer func([]byte)
|
||||
}
|
||||
|
||||
// ClearLogger removes a backing Logger implementation if one was set earlier
|
||||
|
@ -152,7 +178,7 @@ func Background() Logger {
|
|||
if logging.loggerOptions.contextualLogger {
|
||||
// Is non-nil because logging.loggerOptions.contextualLogger is
|
||||
// only true if a logger was set.
|
||||
return *logging.logger
|
||||
return logging.logger.Logger
|
||||
}
|
||||
|
||||
return klogLogger
|
||||
|
|
|
@ -40,44 +40,33 @@ type Buffer struct {
|
|||
next *Buffer
|
||||
}
|
||||
|
||||
// Buffers manages the reuse of individual buffer instances. It is thread-safe.
|
||||
type Buffers struct {
|
||||
// mu protects the free list. It is separate from the main mutex
|
||||
// so buffers can be grabbed and printed to without holding the main lock,
|
||||
// for better parallelization.
|
||||
mu sync.Mutex
|
||||
|
||||
// freeList is a list of byte buffers, maintained under mu.
|
||||
freeList *Buffer
|
||||
var buffers = sync.Pool{
|
||||
New: func() interface{} {
|
||||
return new(Buffer)
|
||||
},
|
||||
}
|
||||
|
||||
// GetBuffer returns a new, ready-to-use buffer.
|
||||
func (bl *Buffers) GetBuffer() *Buffer {
|
||||
bl.mu.Lock()
|
||||
b := bl.freeList
|
||||
if b != nil {
|
||||
bl.freeList = b.next
|
||||
}
|
||||
bl.mu.Unlock()
|
||||
if b == nil {
|
||||
b = new(Buffer)
|
||||
} else {
|
||||
b.next = nil
|
||||
b.Reset()
|
||||
}
|
||||
func GetBuffer() *Buffer {
|
||||
b := buffers.Get().(*Buffer)
|
||||
b.Reset()
|
||||
return b
|
||||
}
|
||||
|
||||
// PutBuffer returns a buffer to the free list.
|
||||
func (bl *Buffers) PutBuffer(b *Buffer) {
|
||||
func PutBuffer(b *Buffer) {
|
||||
if b.Len() >= 256 {
|
||||
// Let big buffers die a natural death.
|
||||
// Let big buffers die a natural death, without relying on
|
||||
// sync.Pool behavior. The documentation implies that items may
|
||||
// get deallocated while stored there ("If the Pool holds the
|
||||
// only reference when this [= be removed automatically]
|
||||
// happens, the item might be deallocated."), but
|
||||
// https://github.com/golang/go/issues/23199 leans more towards
|
||||
// having such a size limit.
|
||||
return
|
||||
}
|
||||
bl.mu.Lock()
|
||||
b.next = bl.freeList
|
||||
bl.freeList = b
|
||||
bl.mu.Unlock()
|
||||
|
||||
buffers.Put(b)
|
||||
}
|
||||
|
||||
// Some custom tiny helper functions to print the log header efficiently.
|
||||
|
@ -121,7 +110,8 @@ func (buf *Buffer) someDigits(i, d int) int {
|
|||
return copy(buf.Tmp[i:], buf.Tmp[j:])
|
||||
}
|
||||
|
||||
// FormatHeader formats a log header using the provided file name and line number.
|
||||
// FormatHeader formats a log header using the provided file name and line number
|
||||
// and writes it into the buffer.
|
||||
func (buf *Buffer) FormatHeader(s severity.Severity, file string, line int, now time.Time) {
|
||||
if line < 0 {
|
||||
line = 0 // not a real line number, but acceptable to someDigits
|
||||
|
@ -157,3 +147,30 @@ func (buf *Buffer) FormatHeader(s severity.Severity, file string, line int, now
|
|||
buf.Tmp[n+2] = ' '
|
||||
buf.Write(buf.Tmp[:n+3])
|
||||
}
|
||||
|
||||
// SprintHeader formats a log header and returns a string. This is a simpler
|
||||
// version of FormatHeader for use in ktesting.
|
||||
func (buf *Buffer) SprintHeader(s severity.Severity, now time.Time) string {
|
||||
if s > severity.FatalLog {
|
||||
s = severity.InfoLog // for safety.
|
||||
}
|
||||
|
||||
// Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand.
|
||||
// It's worth about 3X. Fprintf is hard.
|
||||
_, month, day := now.Date()
|
||||
hour, minute, second := now.Clock()
|
||||
// Lmmdd hh:mm:ss.uuuuuu threadid file:line]
|
||||
buf.Tmp[0] = severity.Char[s]
|
||||
buf.twoDigits(1, int(month))
|
||||
buf.twoDigits(3, day)
|
||||
buf.Tmp[5] = ' '
|
||||
buf.twoDigits(6, hour)
|
||||
buf.Tmp[8] = ':'
|
||||
buf.twoDigits(9, minute)
|
||||
buf.Tmp[11] = ':'
|
||||
buf.twoDigits(12, second)
|
||||
buf.Tmp[14] = '.'
|
||||
buf.nDigits(6, 15, now.Nanosecond()/1000, '0')
|
||||
buf.Tmp[21] = ']'
|
||||
return string(buf.Tmp[:22])
|
||||
}
|
||||
|
|
|
@ -24,6 +24,10 @@ import (
|
|||
"github.com/go-logr/logr"
|
||||
)
|
||||
|
||||
type textWriter interface {
|
||||
WriteText(*bytes.Buffer)
|
||||
}
|
||||
|
||||
// WithValues implements LogSink.WithValues. The old key/value pairs are
|
||||
// assumed to be well-formed, the new ones are checked and padded if
|
||||
// necessary. It returns a new slice.
|
||||
|
@ -91,11 +95,66 @@ func MergeKVs(first, second []interface{}) []interface{} {
|
|||
return merged
|
||||
}
|
||||
|
||||
type Formatter struct {
|
||||
AnyToStringHook AnyToStringFunc
|
||||
}
|
||||
|
||||
type AnyToStringFunc func(v interface{}) string
|
||||
|
||||
// MergeKVsInto is a variant of MergeKVs which directly formats the key/value
|
||||
// pairs into a buffer.
|
||||
func (f Formatter) MergeAndFormatKVs(b *bytes.Buffer, first, second []interface{}) {
|
||||
if len(first) == 0 && len(second) == 0 {
|
||||
// Nothing to do at all.
|
||||
return
|
||||
}
|
||||
|
||||
if len(first) == 0 && len(second)%2 == 0 {
|
||||
// Nothing to be overridden, second slice is well-formed
|
||||
// and can be used directly.
|
||||
for i := 0; i < len(second); i += 2 {
|
||||
f.KVFormat(b, second[i], second[i+1])
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Determine which keys are in the second slice so that we can skip
|
||||
// them when iterating over the first one. The code intentionally
|
||||
// favors performance over completeness: we assume that keys are string
|
||||
// constants and thus compare equal when the string values are equal. A
|
||||
// string constant being overridden by, for example, a fmt.Stringer is
|
||||
// not handled.
|
||||
overrides := map[interface{}]bool{}
|
||||
for i := 0; i < len(second); i += 2 {
|
||||
overrides[second[i]] = true
|
||||
}
|
||||
for i := 0; i < len(first); i += 2 {
|
||||
key := first[i]
|
||||
if overrides[key] {
|
||||
continue
|
||||
}
|
||||
f.KVFormat(b, key, first[i+1])
|
||||
}
|
||||
// Round down.
|
||||
l := len(second)
|
||||
l = l / 2 * 2
|
||||
for i := 1; i < l; i += 2 {
|
||||
f.KVFormat(b, second[i-1], second[i])
|
||||
}
|
||||
if len(second)%2 == 1 {
|
||||
f.KVFormat(b, second[len(second)-1], missingValue)
|
||||
}
|
||||
}
|
||||
|
||||
func MergeAndFormatKVs(b *bytes.Buffer, first, second []interface{}) {
|
||||
Formatter{}.MergeAndFormatKVs(b, first, second)
|
||||
}
|
||||
|
||||
const missingValue = "(MISSING)"
|
||||
|
||||
// KVListFormat serializes all key/value pairs into the provided buffer.
|
||||
// A space gets inserted before the first pair and between each pair.
|
||||
func KVListFormat(b *bytes.Buffer, keysAndValues ...interface{}) {
|
||||
func (f Formatter) KVListFormat(b *bytes.Buffer, keysAndValues ...interface{}) {
|
||||
for i := 0; i < len(keysAndValues); i += 2 {
|
||||
var v interface{}
|
||||
k := keysAndValues[i]
|
||||
|
@ -104,69 +163,93 @@ func KVListFormat(b *bytes.Buffer, keysAndValues ...interface{}) {
|
|||
} else {
|
||||
v = missingValue
|
||||
}
|
||||
b.WriteByte(' ')
|
||||
// Keys are assumed to be well-formed according to
|
||||
// https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments
|
||||
// for the sake of performance. Keys with spaces,
|
||||
// special characters, etc. will break parsing.
|
||||
if sK, ok := k.(string); ok {
|
||||
// Avoid one allocation when the key is a string, which
|
||||
// normally it should be.
|
||||
b.WriteString(sK)
|
||||
} else {
|
||||
b.WriteString(fmt.Sprintf("%s", k))
|
||||
}
|
||||
|
||||
// The type checks are sorted so that more frequently used ones
|
||||
// come first because that is then faster in the common
|
||||
// cases. In Kubernetes, ObjectRef (a Stringer) is more common
|
||||
// than plain strings
|
||||
// (https://github.com/kubernetes/kubernetes/pull/106594#issuecomment-975526235).
|
||||
switch v := v.(type) {
|
||||
case fmt.Stringer:
|
||||
writeStringValue(b, true, StringerToString(v))
|
||||
case string:
|
||||
writeStringValue(b, true, v)
|
||||
case error:
|
||||
writeStringValue(b, true, ErrorToString(v))
|
||||
case logr.Marshaler:
|
||||
value := MarshalerToValue(v)
|
||||
// A marshaler that returns a string is useful for
|
||||
// delayed formatting of complex values. We treat this
|
||||
// case like a normal string. This is useful for
|
||||
// multi-line support.
|
||||
//
|
||||
// We could do this by recursively formatting a value,
|
||||
// but that comes with the risk of infinite recursion
|
||||
// if a marshaler returns itself. Instead we call it
|
||||
// only once and rely on it returning the intended
|
||||
// value directly.
|
||||
switch value := value.(type) {
|
||||
case string:
|
||||
writeStringValue(b, true, value)
|
||||
default:
|
||||
writeStringValue(b, false, fmt.Sprintf("%+v", value))
|
||||
}
|
||||
case []byte:
|
||||
// In https://github.com/kubernetes/klog/pull/237 it was decided
|
||||
// to format byte slices with "%+q". The advantages of that are:
|
||||
// - readable output if the bytes happen to be printable
|
||||
// - non-printable bytes get represented as unicode escape
|
||||
// sequences (\uxxxx)
|
||||
//
|
||||
// The downsides are that we cannot use the faster
|
||||
// strconv.Quote here and that multi-line output is not
|
||||
// supported. If developers know that a byte array is
|
||||
// printable and they want multi-line output, they can
|
||||
// convert the value to string before logging it.
|
||||
b.WriteByte('=')
|
||||
b.WriteString(fmt.Sprintf("%+q", v))
|
||||
default:
|
||||
writeStringValue(b, false, fmt.Sprintf("%+v", v))
|
||||
}
|
||||
f.KVFormat(b, k, v)
|
||||
}
|
||||
}
|
||||
|
||||
func KVListFormat(b *bytes.Buffer, keysAndValues ...interface{}) {
|
||||
Formatter{}.KVListFormat(b, keysAndValues...)
|
||||
}
|
||||
|
||||
// KVFormat serializes one key/value pair into the provided buffer.
|
||||
// A space gets inserted before the pair.
|
||||
func (f Formatter) KVFormat(b *bytes.Buffer, k, v interface{}) {
|
||||
b.WriteByte(' ')
|
||||
// Keys are assumed to be well-formed according to
|
||||
// https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments
|
||||
// for the sake of performance. Keys with spaces,
|
||||
// special characters, etc. will break parsing.
|
||||
if sK, ok := k.(string); ok {
|
||||
// Avoid one allocation when the key is a string, which
|
||||
// normally it should be.
|
||||
b.WriteString(sK)
|
||||
} else {
|
||||
b.WriteString(fmt.Sprintf("%s", k))
|
||||
}
|
||||
|
||||
// The type checks are sorted so that more frequently used ones
|
||||
// come first because that is then faster in the common
|
||||
// cases. In Kubernetes, ObjectRef (a Stringer) is more common
|
||||
// than plain strings
|
||||
// (https://github.com/kubernetes/kubernetes/pull/106594#issuecomment-975526235).
|
||||
switch v := v.(type) {
|
||||
case textWriter:
|
||||
writeTextWriterValue(b, v)
|
||||
case fmt.Stringer:
|
||||
writeStringValue(b, true, StringerToString(v))
|
||||
case string:
|
||||
writeStringValue(b, true, v)
|
||||
case error:
|
||||
writeStringValue(b, true, ErrorToString(v))
|
||||
case logr.Marshaler:
|
||||
value := MarshalerToValue(v)
|
||||
// A marshaler that returns a string is useful for
|
||||
// delayed formatting of complex values. We treat this
|
||||
// case like a normal string. This is useful for
|
||||
// multi-line support.
|
||||
//
|
||||
// We could do this by recursively formatting a value,
|
||||
// but that comes with the risk of infinite recursion
|
||||
// if a marshaler returns itself. Instead we call it
|
||||
// only once and rely on it returning the intended
|
||||
// value directly.
|
||||
switch value := value.(type) {
|
||||
case string:
|
||||
writeStringValue(b, true, value)
|
||||
default:
|
||||
writeStringValue(b, false, f.AnyToString(value))
|
||||
}
|
||||
case []byte:
|
||||
// In https://github.com/kubernetes/klog/pull/237 it was decided
|
||||
// to format byte slices with "%+q". The advantages of that are:
|
||||
// - readable output if the bytes happen to be printable
|
||||
// - non-printable bytes get represented as unicode escape
|
||||
// sequences (\uxxxx)
|
||||
//
|
||||
// The downsides are that we cannot use the faster
|
||||
// strconv.Quote here and that multi-line output is not
|
||||
// supported. If developers know that a byte array is
|
||||
// printable and they want multi-line output, they can
|
||||
// convert the value to string before logging it.
|
||||
b.WriteByte('=')
|
||||
b.WriteString(fmt.Sprintf("%+q", v))
|
||||
default:
|
||||
writeStringValue(b, false, f.AnyToString(v))
|
||||
}
|
||||
}
|
||||
|
||||
func KVFormat(b *bytes.Buffer, k, v interface{}) {
|
||||
Formatter{}.KVFormat(b, k, v)
|
||||
}
|
||||
|
||||
// AnyToString is the historic fallback formatter.
|
||||
func (f Formatter) AnyToString(v interface{}) string {
|
||||
if f.AnyToStringHook != nil {
|
||||
return f.AnyToStringHook(v)
|
||||
}
|
||||
return fmt.Sprintf("%+v", v)
|
||||
}
|
||||
|
||||
// StringerToString converts a Stringer to a string,
|
||||
// handling panics if they occur.
|
||||
func StringerToString(s fmt.Stringer) (ret string) {
|
||||
|
@ -203,6 +286,16 @@ func ErrorToString(err error) (ret string) {
|
|||
return
|
||||
}
|
||||
|
||||
func writeTextWriterValue(b *bytes.Buffer, v textWriter) {
|
||||
b.WriteRune('=')
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
fmt.Fprintf(b, `"<panic: %s>"`, err)
|
||||
}
|
||||
}()
|
||||
v.WriteText(b)
|
||||
}
|
||||
|
||||
func writeStringValue(b *bytes.Buffer, quote bool, v string) {
|
||||
data := []byte(v)
|
||||
index := bytes.IndexByte(data, '\n')
|
||||
|
|
|
@ -17,8 +17,10 @@ limitations under the License.
|
|||
package klog
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
)
|
||||
|
@ -31,11 +33,30 @@ type ObjectRef struct {
|
|||
|
||||
func (ref ObjectRef) String() string {
|
||||
if ref.Namespace != "" {
|
||||
return fmt.Sprintf("%s/%s", ref.Namespace, ref.Name)
|
||||
var builder strings.Builder
|
||||
builder.Grow(len(ref.Namespace) + len(ref.Name) + 1)
|
||||
builder.WriteString(ref.Namespace)
|
||||
builder.WriteRune('/')
|
||||
builder.WriteString(ref.Name)
|
||||
return builder.String()
|
||||
}
|
||||
return ref.Name
|
||||
}
|
||||
|
||||
func (ref ObjectRef) WriteText(out *bytes.Buffer) {
|
||||
out.WriteRune('"')
|
||||
ref.writeUnquoted(out)
|
||||
out.WriteRune('"')
|
||||
}
|
||||
|
||||
func (ref ObjectRef) writeUnquoted(out *bytes.Buffer) {
|
||||
if ref.Namespace != "" {
|
||||
out.WriteString(ref.Namespace)
|
||||
out.WriteRune('/')
|
||||
}
|
||||
out.WriteString(ref.Name)
|
||||
}
|
||||
|
||||
// MarshalLog ensures that loggers with support for structured output will log
|
||||
// as a struct by removing the String method via a custom type.
|
||||
func (ref ObjectRef) MarshalLog() interface{} {
|
||||
|
@ -117,31 +138,31 @@ var _ fmt.Stringer = kobjSlice{}
|
|||
var _ logr.Marshaler = kobjSlice{}
|
||||
|
||||
func (ks kobjSlice) String() string {
|
||||
objectRefs, err := ks.process()
|
||||
if err != nil {
|
||||
return err.Error()
|
||||
objectRefs, errStr := ks.process()
|
||||
if errStr != "" {
|
||||
return errStr
|
||||
}
|
||||
return fmt.Sprintf("%v", objectRefs)
|
||||
}
|
||||
|
||||
func (ks kobjSlice) MarshalLog() interface{} {
|
||||
objectRefs, err := ks.process()
|
||||
if err != nil {
|
||||
return err.Error()
|
||||
objectRefs, errStr := ks.process()
|
||||
if errStr != "" {
|
||||
return errStr
|
||||
}
|
||||
return objectRefs
|
||||
}
|
||||
|
||||
func (ks kobjSlice) process() ([]interface{}, error) {
|
||||
func (ks kobjSlice) process() (objs []interface{}, err string) {
|
||||
s := reflect.ValueOf(ks.arg)
|
||||
switch s.Kind() {
|
||||
case reflect.Invalid:
|
||||
// nil parameter, print as nil.
|
||||
return nil, nil
|
||||
return nil, ""
|
||||
case reflect.Slice:
|
||||
// Okay, handle below.
|
||||
default:
|
||||
return nil, fmt.Errorf("<KObjSlice needs a slice, got type %T>", ks.arg)
|
||||
return nil, fmt.Sprintf("<KObjSlice needs a slice, got type %T>", ks.arg)
|
||||
}
|
||||
objectRefs := make([]interface{}, 0, s.Len())
|
||||
for i := 0; i < s.Len(); i++ {
|
||||
|
@ -151,8 +172,41 @@ func (ks kobjSlice) process() ([]interface{}, error) {
|
|||
} else if v, ok := item.(KMetadata); ok {
|
||||
objectRefs = append(objectRefs, KObj(v))
|
||||
} else {
|
||||
return nil, fmt.Errorf("<KObjSlice needs a slice of values implementing KMetadata, got type %T>", item)
|
||||
return nil, fmt.Sprintf("<KObjSlice needs a slice of values implementing KMetadata, got type %T>", item)
|
||||
}
|
||||
}
|
||||
return objectRefs, ""
|
||||
}
|
||||
|
||||
var nilToken = []byte("<nil>")
|
||||
|
||||
func (ks kobjSlice) WriteText(out *bytes.Buffer) {
|
||||
s := reflect.ValueOf(ks.arg)
|
||||
switch s.Kind() {
|
||||
case reflect.Invalid:
|
||||
// nil parameter, print as empty slice.
|
||||
out.WriteString("[]")
|
||||
return
|
||||
case reflect.Slice:
|
||||
// Okay, handle below.
|
||||
default:
|
||||
fmt.Fprintf(out, `"<KObjSlice needs a slice, got type %T>"`, ks.arg)
|
||||
return
|
||||
}
|
||||
out.Write([]byte{'['})
|
||||
defer out.Write([]byte{']'})
|
||||
for i := 0; i < s.Len(); i++ {
|
||||
if i > 0 {
|
||||
out.Write([]byte{' '})
|
||||
}
|
||||
item := s.Index(i).Interface()
|
||||
if item == nil {
|
||||
out.Write(nilToken)
|
||||
} else if v, ok := item.(KMetadata); ok {
|
||||
KObj(v).writeUnquoted(out)
|
||||
} else {
|
||||
fmt.Fprintf(out, "<KObjSlice needs a slice of values implementing KMetadata, got type %T>", item)
|
||||
return
|
||||
}
|
||||
}
|
||||
return objectRefs, nil
|
||||
}
|
||||
|
|
|
@ -91,8 +91,6 @@ import (
|
|||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
|
||||
"k8s.io/klog/v2/internal/buffer"
|
||||
"k8s.io/klog/v2/internal/clock"
|
||||
"k8s.io/klog/v2/internal/dbg"
|
||||
|
@ -453,7 +451,7 @@ type settings struct {
|
|||
|
||||
// logger is the global Logger chosen by users of klog, nil if
|
||||
// none is available.
|
||||
logger *Logger
|
||||
logger *logWriter
|
||||
|
||||
// loggerOptions contains the options that were supplied for
|
||||
// globalLogger.
|
||||
|
@ -525,6 +523,11 @@ func (s settings) deepCopy() settings {
|
|||
}
|
||||
s.vmodule.filter = filter
|
||||
|
||||
if s.logger != nil {
|
||||
logger := *s.logger
|
||||
s.logger = &logger
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
|
@ -532,11 +535,6 @@ func (s settings) deepCopy() settings {
|
|||
type loggingT struct {
|
||||
settings
|
||||
|
||||
// bufferCache maintains the free list. It uses its own mutex
|
||||
// so buffers can be grabbed and printed to without holding the main lock,
|
||||
// for better parallelization.
|
||||
bufferCache buffer.Buffers
|
||||
|
||||
// flushD holds a flushDaemon that frequently flushes log file buffers.
|
||||
// Uses its own mutex.
|
||||
flushD *flushDaemon
|
||||
|
@ -664,7 +662,7 @@ func (l *loggingT) header(s severity.Severity, depth int) (*buffer.Buffer, strin
|
|||
|
||||
// formatHeader formats a log header using the provided file name and line number.
|
||||
func (l *loggingT) formatHeader(s severity.Severity, file string, line int) *buffer.Buffer {
|
||||
buf := l.bufferCache.GetBuffer()
|
||||
buf := buffer.GetBuffer()
|
||||
if l.skipHeaders {
|
||||
return buf
|
||||
}
|
||||
|
@ -673,17 +671,18 @@ func (l *loggingT) formatHeader(s severity.Severity, file string, line int) *buf
|
|||
return buf
|
||||
}
|
||||
|
||||
func (l *loggingT) println(s severity.Severity, logger *logr.Logger, filter LogFilter, args ...interface{}) {
|
||||
func (l *loggingT) println(s severity.Severity, logger *logWriter, filter LogFilter, args ...interface{}) {
|
||||
l.printlnDepth(s, logger, filter, 1, args...)
|
||||
}
|
||||
|
||||
func (l *loggingT) printlnDepth(s severity.Severity, logger *logr.Logger, filter LogFilter, depth int, args ...interface{}) {
|
||||
func (l *loggingT) printlnDepth(s severity.Severity, logger *logWriter, filter LogFilter, depth int, args ...interface{}) {
|
||||
buf, file, line := l.header(s, depth)
|
||||
// if logger is set, we clear the generated header as we rely on the backing
|
||||
// logger implementation to print headers
|
||||
if logger != nil {
|
||||
l.bufferCache.PutBuffer(buf)
|
||||
buf = l.bufferCache.GetBuffer()
|
||||
// If a logger is set and doesn't support writing a formatted buffer,
|
||||
// we clear the generated header as we rely on the backing
|
||||
// logger implementation to print headers.
|
||||
if logger != nil && logger.writeKlogBuffer == nil {
|
||||
buffer.PutBuffer(buf)
|
||||
buf = buffer.GetBuffer()
|
||||
}
|
||||
if filter != nil {
|
||||
args = filter.Filter(args)
|
||||
|
@ -692,17 +691,18 @@ func (l *loggingT) printlnDepth(s severity.Severity, logger *logr.Logger, filter
|
|||
l.output(s, logger, buf, depth, file, line, false)
|
||||
}
|
||||
|
||||
func (l *loggingT) print(s severity.Severity, logger *logr.Logger, filter LogFilter, args ...interface{}) {
|
||||
func (l *loggingT) print(s severity.Severity, logger *logWriter, filter LogFilter, args ...interface{}) {
|
||||
l.printDepth(s, logger, filter, 1, args...)
|
||||
}
|
||||
|
||||
func (l *loggingT) printDepth(s severity.Severity, logger *logr.Logger, filter LogFilter, depth int, args ...interface{}) {
|
||||
func (l *loggingT) printDepth(s severity.Severity, logger *logWriter, filter LogFilter, depth int, args ...interface{}) {
|
||||
buf, file, line := l.header(s, depth)
|
||||
// if logr is set, we clear the generated header as we rely on the backing
|
||||
// logr implementation to print headers
|
||||
if logger != nil {
|
||||
l.bufferCache.PutBuffer(buf)
|
||||
buf = l.bufferCache.GetBuffer()
|
||||
// If a logger is set and doesn't support writing a formatted buffer,
|
||||
// we clear the generated header as we rely on the backing
|
||||
// logger implementation to print headers.
|
||||
if logger != nil && logger.writeKlogBuffer == nil {
|
||||
buffer.PutBuffer(buf)
|
||||
buf = buffer.GetBuffer()
|
||||
}
|
||||
if filter != nil {
|
||||
args = filter.Filter(args)
|
||||
|
@ -714,17 +714,18 @@ func (l *loggingT) printDepth(s severity.Severity, logger *logr.Logger, filter L
|
|||
l.output(s, logger, buf, depth, file, line, false)
|
||||
}
|
||||
|
||||
func (l *loggingT) printf(s severity.Severity, logger *logr.Logger, filter LogFilter, format string, args ...interface{}) {
|
||||
func (l *loggingT) printf(s severity.Severity, logger *logWriter, filter LogFilter, format string, args ...interface{}) {
|
||||
l.printfDepth(s, logger, filter, 1, format, args...)
|
||||
}
|
||||
|
||||
func (l *loggingT) printfDepth(s severity.Severity, logger *logr.Logger, filter LogFilter, depth int, format string, args ...interface{}) {
|
||||
func (l *loggingT) printfDepth(s severity.Severity, logger *logWriter, filter LogFilter, depth int, format string, args ...interface{}) {
|
||||
buf, file, line := l.header(s, depth)
|
||||
// if logr is set, we clear the generated header as we rely on the backing
|
||||
// logr implementation to print headers
|
||||
if logger != nil {
|
||||
l.bufferCache.PutBuffer(buf)
|
||||
buf = l.bufferCache.GetBuffer()
|
||||
// If a logger is set and doesn't support writing a formatted buffer,
|
||||
// we clear the generated header as we rely on the backing
|
||||
// logger implementation to print headers.
|
||||
if logger != nil && logger.writeKlogBuffer == nil {
|
||||
buffer.PutBuffer(buf)
|
||||
buf = buffer.GetBuffer()
|
||||
}
|
||||
if filter != nil {
|
||||
format, args = filter.FilterF(format, args)
|
||||
|
@ -739,13 +740,14 @@ func (l *loggingT) printfDepth(s severity.Severity, logger *logr.Logger, filter
|
|||
// printWithFileLine behaves like print but uses the provided file and line number. If
|
||||
// alsoLogToStderr is true, the log message always appears on standard error; it
|
||||
// will also appear in the log file unless --logtostderr is set.
|
||||
func (l *loggingT) printWithFileLine(s severity.Severity, logger *logr.Logger, filter LogFilter, file string, line int, alsoToStderr bool, args ...interface{}) {
|
||||
func (l *loggingT) printWithFileLine(s severity.Severity, logger *logWriter, filter LogFilter, file string, line int, alsoToStderr bool, args ...interface{}) {
|
||||
buf := l.formatHeader(s, file, line)
|
||||
// if logr is set, we clear the generated header as we rely on the backing
|
||||
// logr implementation to print headers
|
||||
if logger != nil {
|
||||
l.bufferCache.PutBuffer(buf)
|
||||
buf = l.bufferCache.GetBuffer()
|
||||
// If a logger is set and doesn't support writing a formatted buffer,
|
||||
// we clear the generated header as we rely on the backing
|
||||
// logger implementation to print headers.
|
||||
if logger != nil && logger.writeKlogBuffer == nil {
|
||||
buffer.PutBuffer(buf)
|
||||
buf = buffer.GetBuffer()
|
||||
}
|
||||
if filter != nil {
|
||||
args = filter.Filter(args)
|
||||
|
@ -758,7 +760,7 @@ func (l *loggingT) printWithFileLine(s severity.Severity, logger *logr.Logger, f
|
|||
}
|
||||
|
||||
// if loggr is specified, will call loggr.Error, otherwise output with logging module.
|
||||
func (l *loggingT) errorS(err error, logger *logr.Logger, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) {
|
||||
func (l *loggingT) errorS(err error, logger *logWriter, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) {
|
||||
if filter != nil {
|
||||
msg, keysAndValues = filter.FilterS(msg, keysAndValues)
|
||||
}
|
||||
|
@ -770,7 +772,7 @@ func (l *loggingT) errorS(err error, logger *logr.Logger, filter LogFilter, dept
|
|||
}
|
||||
|
||||
// if loggr is specified, will call loggr.Info, otherwise output with logging module.
|
||||
func (l *loggingT) infoS(logger *logr.Logger, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) {
|
||||
func (l *loggingT) infoS(logger *logWriter, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) {
|
||||
if filter != nil {
|
||||
msg, keysAndValues = filter.FilterS(msg, keysAndValues)
|
||||
}
|
||||
|
@ -785,7 +787,7 @@ func (l *loggingT) infoS(logger *logr.Logger, filter LogFilter, depth int, msg s
|
|||
// set log severity by s
|
||||
func (l *loggingT) printS(err error, s severity.Severity, depth int, msg string, keysAndValues ...interface{}) {
|
||||
// Only create a new buffer if we don't have one cached.
|
||||
b := l.bufferCache.GetBuffer()
|
||||
b := buffer.GetBuffer()
|
||||
// The message is always quoted, even if it contains line breaks.
|
||||
// If developers want multi-line output, they should use a small, fixed
|
||||
// message and put the multi-line output into a value.
|
||||
|
@ -796,7 +798,7 @@ func (l *loggingT) printS(err error, s severity.Severity, depth int, msg string,
|
|||
serialize.KVListFormat(&b.Buffer, keysAndValues...)
|
||||
l.printDepth(s, logging.logger, nil, depth+1, &b.Buffer)
|
||||
// Make the buffer available for reuse.
|
||||
l.bufferCache.PutBuffer(b)
|
||||
buffer.PutBuffer(b)
|
||||
}
|
||||
|
||||
// redirectBuffer is used to set an alternate destination for the logs
|
||||
|
@ -851,7 +853,7 @@ func LogToStderr(stderr bool) {
|
|||
}
|
||||
|
||||
// output writes the data to the log files and releases the buffer.
|
||||
func (l *loggingT) output(s severity.Severity, log *logr.Logger, buf *buffer.Buffer, depth int, file string, line int, alsoToStderr bool) {
|
||||
func (l *loggingT) output(s severity.Severity, logger *logWriter, buf *buffer.Buffer, depth int, file string, line int, alsoToStderr bool) {
|
||||
var isLocked = true
|
||||
l.mu.Lock()
|
||||
defer func() {
|
||||
|
@ -867,13 +869,17 @@ func (l *loggingT) output(s severity.Severity, log *logr.Logger, buf *buffer.Buf
|
|||
}
|
||||
}
|
||||
data := buf.Bytes()
|
||||
if log != nil {
|
||||
// TODO: set 'severity' and caller information as structured log info
|
||||
// keysAndValues := []interface{}{"severity", severityName[s], "file", file, "line", line}
|
||||
if s == severity.ErrorLog {
|
||||
logging.logger.WithCallDepth(depth+3).Error(nil, string(data))
|
||||
if logger != nil {
|
||||
if logger.writeKlogBuffer != nil {
|
||||
logger.writeKlogBuffer(data)
|
||||
} else {
|
||||
log.WithCallDepth(depth + 3).Info(string(data))
|
||||
// TODO: set 'severity' and caller information as structured log info
|
||||
// keysAndValues := []interface{}{"severity", severityName[s], "file", file, "line", line}
|
||||
if s == severity.ErrorLog {
|
||||
logger.WithCallDepth(depth+3).Error(nil, string(data))
|
||||
} else {
|
||||
logger.WithCallDepth(depth + 3).Info(string(data))
|
||||
}
|
||||
}
|
||||
} else if l.toStderr {
|
||||
os.Stderr.Write(data)
|
||||
|
@ -948,7 +954,7 @@ func (l *loggingT) output(s severity.Severity, log *logr.Logger, buf *buffer.Buf
|
|||
timeoutFlush(ExitFlushTimeout)
|
||||
OsExit(255) // C++ uses -1, which is silly because it's anded with 255 anyway.
|
||||
}
|
||||
l.bufferCache.PutBuffer(buf)
|
||||
buffer.PutBuffer(buf)
|
||||
|
||||
if stats := severityStats[s]; stats != nil {
|
||||
atomic.AddInt64(&stats.lines, 1)
|
||||
|
@ -1282,7 +1288,7 @@ func (l *loggingT) setV(pc uintptr) Level {
|
|||
// See the documentation of V for more information.
|
||||
type Verbose struct {
|
||||
enabled bool
|
||||
logr *logr.Logger
|
||||
logger *logWriter
|
||||
}
|
||||
|
||||
func newVerbose(level Level, b bool) Verbose {
|
||||
|
@ -1290,7 +1296,7 @@ func newVerbose(level Level, b bool) Verbose {
|
|||
return Verbose{b, nil}
|
||||
}
|
||||
v := logging.logger.V(int(level))
|
||||
return Verbose{b, &v}
|
||||
return Verbose{b, &logWriter{Logger: v, writeKlogBuffer: logging.loggerOptions.writeKlogBuffer}}
|
||||
}
|
||||
|
||||
// V reports whether verbosity at the call site is at least the requested level.
|
||||
|
@ -1313,6 +1319,13 @@ func newVerbose(level Level, b bool) Verbose {
|
|||
// less than or equal to the value of the -vmodule pattern matching the source file
|
||||
// containing the call.
|
||||
func V(level Level) Verbose {
|
||||
return VDepth(1, level)
|
||||
}
|
||||
|
||||
// VDepth is a variant of V that accepts a number of stack frames that will be
|
||||
// skipped when checking the -vmodule patterns. VDepth(0) is equivalent to
|
||||
// V().
|
||||
func VDepth(depth int, level Level) Verbose {
|
||||
// This function tries hard to be cheap unless there's work to do.
|
||||
// The fast path is two atomic loads and compares.
|
||||
|
||||
|
@ -1329,7 +1342,7 @@ func V(level Level) Verbose {
|
|||
// but if V logging is enabled we're slow anyway.
|
||||
logging.mu.Lock()
|
||||
defer logging.mu.Unlock()
|
||||
if runtime.Callers(2, logging.pcs[:]) == 0 {
|
||||
if runtime.Callers(2+depth, logging.pcs[:]) == 0 {
|
||||
return newVerbose(level, false)
|
||||
}
|
||||
// runtime.Callers returns "return PCs", but we want
|
||||
|
@ -1357,7 +1370,7 @@ func (v Verbose) Enabled() bool {
|
|||
// See the documentation of V for usage.
|
||||
func (v Verbose) Info(args ...interface{}) {
|
||||
if v.enabled {
|
||||
logging.print(severity.InfoLog, v.logr, logging.filter, args...)
|
||||
logging.print(severity.InfoLog, v.logger, logging.filter, args...)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1365,7 +1378,7 @@ func (v Verbose) Info(args ...interface{}) {
|
|||
// See the documentation of V for usage.
|
||||
func (v Verbose) InfoDepth(depth int, args ...interface{}) {
|
||||
if v.enabled {
|
||||
logging.printDepth(severity.InfoLog, v.logr, logging.filter, depth, args...)
|
||||
logging.printDepth(severity.InfoLog, v.logger, logging.filter, depth, args...)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1373,7 +1386,7 @@ func (v Verbose) InfoDepth(depth int, args ...interface{}) {
|
|||
// See the documentation of V for usage.
|
||||
func (v Verbose) Infoln(args ...interface{}) {
|
||||
if v.enabled {
|
||||
logging.println(severity.InfoLog, v.logr, logging.filter, args...)
|
||||
logging.println(severity.InfoLog, v.logger, logging.filter, args...)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1381,7 +1394,7 @@ func (v Verbose) Infoln(args ...interface{}) {
|
|||
// See the documentation of V for usage.
|
||||
func (v Verbose) InfolnDepth(depth int, args ...interface{}) {
|
||||
if v.enabled {
|
||||
logging.printlnDepth(severity.InfoLog, v.logr, logging.filter, depth, args...)
|
||||
logging.printlnDepth(severity.InfoLog, v.logger, logging.filter, depth, args...)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1389,7 +1402,7 @@ func (v Verbose) InfolnDepth(depth int, args ...interface{}) {
|
|||
// See the documentation of V for usage.
|
||||
func (v Verbose) Infof(format string, args ...interface{}) {
|
||||
if v.enabled {
|
||||
logging.printf(severity.InfoLog, v.logr, logging.filter, format, args...)
|
||||
logging.printf(severity.InfoLog, v.logger, logging.filter, format, args...)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1397,7 +1410,7 @@ func (v Verbose) Infof(format string, args ...interface{}) {
|
|||
// See the documentation of V for usage.
|
||||
func (v Verbose) InfofDepth(depth int, format string, args ...interface{}) {
|
||||
if v.enabled {
|
||||
logging.printfDepth(severity.InfoLog, v.logr, logging.filter, depth, format, args...)
|
||||
logging.printfDepth(severity.InfoLog, v.logger, logging.filter, depth, format, args...)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1405,7 +1418,7 @@ func (v Verbose) InfofDepth(depth int, format string, args ...interface{}) {
|
|||
// See the documentation of V for usage.
|
||||
func (v Verbose) InfoS(msg string, keysAndValues ...interface{}) {
|
||||
if v.enabled {
|
||||
logging.infoS(v.logr, logging.filter, 0, msg, keysAndValues...)
|
||||
logging.infoS(v.logger, logging.filter, 0, msg, keysAndValues...)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1419,14 +1432,14 @@ func InfoSDepth(depth int, msg string, keysAndValues ...interface{}) {
|
|||
// See the documentation of V for usage.
|
||||
func (v Verbose) InfoSDepth(depth int, msg string, keysAndValues ...interface{}) {
|
||||
if v.enabled {
|
||||
logging.infoS(v.logr, logging.filter, depth, msg, keysAndValues...)
|
||||
logging.infoS(v.logger, logging.filter, depth, msg, keysAndValues...)
|
||||
}
|
||||
}
|
||||
|
||||
// Deprecated: Use ErrorS instead.
|
||||
func (v Verbose) Error(err error, msg string, args ...interface{}) {
|
||||
if v.enabled {
|
||||
logging.errorS(err, v.logr, logging.filter, 0, msg, args...)
|
||||
logging.errorS(err, v.logger, logging.filter, 0, msg, args...)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1434,7 +1447,7 @@ func (v Verbose) Error(err error, msg string, args ...interface{}) {
|
|||
// See the documentation of V for usage.
|
||||
func (v Verbose) ErrorS(err error, msg string, keysAndValues ...interface{}) {
|
||||
if v.enabled {
|
||||
logging.errorS(err, v.logr, logging.filter, 0, msg, keysAndValues...)
|
||||
logging.errorS(err, v.logger, logging.filter, 0, msg, keysAndValues...)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -42,19 +42,21 @@ func (l *klogger) Init(info logr.RuntimeInfo) {
|
|||
l.callDepth += info.CallDepth
|
||||
}
|
||||
|
||||
func (l klogger) Info(level int, msg string, kvList ...interface{}) {
|
||||
func (l *klogger) Info(level int, msg string, kvList ...interface{}) {
|
||||
merged := serialize.MergeKVs(l.values, kvList)
|
||||
if l.prefix != "" {
|
||||
msg = l.prefix + ": " + msg
|
||||
}
|
||||
V(Level(level)).InfoSDepth(l.callDepth+1, msg, merged...)
|
||||
// Skip this function.
|
||||
VDepth(l.callDepth+1, Level(level)).InfoSDepth(l.callDepth+1, msg, merged...)
|
||||
}
|
||||
|
||||
func (l klogger) Enabled(level int) bool {
|
||||
return V(Level(level)).Enabled()
|
||||
func (l *klogger) Enabled(level int) bool {
|
||||
// Skip this function and logr.Logger.Info where Enabled is called.
|
||||
return VDepth(l.callDepth+2, Level(level)).Enabled()
|
||||
}
|
||||
|
||||
func (l klogger) Error(err error, msg string, kvList ...interface{}) {
|
||||
func (l *klogger) Error(err error, msg string, kvList ...interface{}) {
|
||||
merged := serialize.MergeKVs(l.values, kvList)
|
||||
if l.prefix != "" {
|
||||
msg = l.prefix + ": " + msg
|
||||
|
|
|
@ -327,6 +327,21 @@ func BuildOpenAPISpecFromRoutes(webServices []common.RouteContainer, config *com
|
|||
return a.spec, nil
|
||||
}
|
||||
|
||||
// BuildOpenAPIDefinitionsForResource builds a partial OpenAPI spec given a sample object and common.Config to customize it.
|
||||
// BuildOpenAPIDefinitionsForResources returns the OpenAPI spec which includes the definitions for the
|
||||
// passed type names.
|
||||
func BuildOpenAPIDefinitionsForResources(config *common.Config, names ...string) (map[string]*spec.Schema, error) {
|
||||
o := newOpenAPI(config)
|
||||
// We can discard the return value of toSchema because all we care about is the side effect of calling it.
|
||||
// All the models created for this resource get added to o.swagger.Definitions
|
||||
for _, name := range names {
|
||||
_, err := o.toSchema(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return o.spec.Components.Schemas, nil
|
||||
}
|
||||
func (o *openAPI) findCommonParameters(routes []common.Route) (map[interface{}]*spec3.Parameter, error) {
|
||||
commonParamsMap := make(map[interface{}]*spec3.Parameter, 0)
|
||||
paramOpsCountByName := make(map[interface{}]int, 0)
|
||||
|
|
|
@ -246,38 +246,42 @@ var schemaTypeFormatMap = map[string]typeInfo{
|
|||
// the spec does not need to be simple type,format) or can even return a simple type,format (e.g. IntOrString). For simple
|
||||
// type formats, the benefit of adding OpenAPIDefinitionGetter interface is to keep both type and property documentation.
|
||||
// Example:
|
||||
// type Sample struct {
|
||||
// ...
|
||||
// // port of the server
|
||||
// port IntOrString
|
||||
// ...
|
||||
// }
|
||||
//
|
||||
// type Sample struct {
|
||||
// ...
|
||||
// // port of the server
|
||||
// port IntOrString
|
||||
// ...
|
||||
// }
|
||||
//
|
||||
// // IntOrString documentation...
|
||||
// type IntOrString { ... }
|
||||
//
|
||||
// Adding IntOrString to this function:
|
||||
// "port" : {
|
||||
// format: "string",
|
||||
// type: "int-or-string",
|
||||
// Description: "port of the server"
|
||||
// }
|
||||
//
|
||||
// "port" : {
|
||||
// format: "string",
|
||||
// type: "int-or-string",
|
||||
// Description: "port of the server"
|
||||
// }
|
||||
//
|
||||
// Implement OpenAPIDefinitionGetter for IntOrString:
|
||||
//
|
||||
// "port" : {
|
||||
// $Ref: "#/definitions/IntOrString"
|
||||
// Description: "port of the server"
|
||||
// }
|
||||
// "port" : {
|
||||
// $Ref: "#/definitions/IntOrString"
|
||||
// Description: "port of the server"
|
||||
// }
|
||||
//
|
||||
// ...
|
||||
// definitions:
|
||||
// {
|
||||
// "IntOrString": {
|
||||
// format: "string",
|
||||
// type: "int-or-string",
|
||||
// Description: "IntOrString documentation..." // new
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// {
|
||||
// "IntOrString": {
|
||||
// format: "string",
|
||||
// type: "int-or-string",
|
||||
// Description: "IntOrString documentation..." // new
|
||||
// }
|
||||
// }
|
||||
func OpenAPITypeFormat(typeName string) (string, string) {
|
||||
mapped, ok := schemaTypeFormatMap[typeName]
|
||||
if !ok {
|
||||
|
|
|
@ -13,4 +13,3 @@ func AdaptWebServices(webServices []*restful.WebService) []common.RouteContainer
|
|||
}
|
||||
return containers
|
||||
}
|
||||
|
||||
|
|
|
@ -20,7 +20,6 @@ import (
|
|||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"sort"
|
||||
|
||||
|
@ -64,7 +63,7 @@ func (a apiViolationFile) VerifyFile(f *generator.File, path string) error {
|
|||
path = a.unmangledPath
|
||||
|
||||
formatted := f.Body.Bytes()
|
||||
existing, err := ioutil.ReadFile(path)
|
||||
existing, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to read file %q for comparison: %v", path, err)
|
||||
}
|
||||
|
|
|
@ -55,6 +55,10 @@ func newEnumContext(c *generator.Context) *enumContext {
|
|||
// If the given type is a known enum type, returns the enumType, true
|
||||
// Otherwise, returns nil, false
|
||||
func (ec *enumContext) EnumType(t *types.Type) (enum *enumType, isEnum bool) {
|
||||
// if t is a pointer, use its underlying type instead
|
||||
if t.Kind == types.Pointer {
|
||||
t = t.Elem
|
||||
}
|
||||
enum, ok := ec.enumTypes[t.Name]
|
||||
return enum, ok
|
||||
}
|
||||
|
@ -74,9 +78,12 @@ func (et *enumType) ValueStrings() []string {
|
|||
// DescriptionLines returns a description of the enum in this format:
|
||||
//
|
||||
// Possible enum values:
|
||||
// - `"value1"` description 1
|
||||
// - `"value2"` description 2
|
||||
// - `"value1"` description 1
|
||||
// - `"value2"` description 2
|
||||
func (et *enumType) DescriptionLines() []string {
|
||||
if len(et.Values) == 0 {
|
||||
return nil
|
||||
}
|
||||
var lines []string
|
||||
for _, value := range et.Values {
|
||||
lines = append(lines, value.Description())
|
||||
|
@ -90,9 +97,9 @@ func parseEnums(c *generator.Context) enumMap {
|
|||
// First, find the builtin "string" type
|
||||
stringType := c.Universe.Type(types.Name{Name: "string"})
|
||||
|
||||
// find all enum types.
|
||||
enumTypes := make(enumMap)
|
||||
for _, p := range c.Universe {
|
||||
// find all enum types.
|
||||
for _, t := range p.Types {
|
||||
if isEnumType(stringType, t) {
|
||||
if _, ok := enumTypes[t.Name]; !ok {
|
||||
|
@ -102,7 +109,10 @@ func parseEnums(c *generator.Context) enumMap {
|
|||
}
|
||||
}
|
||||
}
|
||||
// find all enum values from constants, and try to match each with its type.
|
||||
}
|
||||
|
||||
// find all enum values from constants, and try to match each with its type.
|
||||
for _, p := range c.Universe {
|
||||
for _, c := range p.Constants {
|
||||
enumType := c.Underlying
|
||||
if _, ok := enumTypes[enumType.Name]; ok {
|
||||
|
@ -125,7 +135,7 @@ func (et *enumType) appendValue(value *enumValue) {
|
|||
|
||||
// Description returns the description line for the enumValue
|
||||
// with the format:
|
||||
// - `"FooValue"` is the Foo value
|
||||
// - `"FooValue"` is the Foo value
|
||||
func (ev *enumValue) Description() string {
|
||||
comment := strings.TrimSpace(ev.Comment)
|
||||
// The comment should starts with the type name, trim it first.
|
||||
|
|
|
@ -686,7 +686,7 @@ func (g openAPITypeWriter) generateProperty(m *types.Member, parent *types.Type)
|
|||
g.generateSimpleProperty(typeString, format)
|
||||
if enumType, isEnum := g.enumContext.EnumType(m.Type); isEnum {
|
||||
// original type is an enum, add "Enum: " and the values
|
||||
g.Do("Enum: []interface{}{$.$}", strings.Join(enumType.ValueStrings(), ", "))
|
||||
g.Do("Enum: []interface{}{$.$},\n", strings.Join(enumType.ValueStrings(), ", "))
|
||||
}
|
||||
g.Do("},\n},\n", nil)
|
||||
return nil
|
||||
|
|
|
@ -56,24 +56,29 @@ Go field names must be CamelCase. JSON field names must be camelCase. Other than
|
|||
initial letter, the two should almost always match. No underscores nor dashes in either.
|
||||
This rule verifies the convention "Other than capitalization of the initial letter, the two should almost always match."
|
||||
Examples (also in unit test):
|
||||
Go name | JSON name | match
|
||||
podSpec false
|
||||
PodSpec podSpec true
|
||||
PodSpec PodSpec false
|
||||
podSpec podSpec false
|
||||
PodSpec spec false
|
||||
Spec podSpec false
|
||||
JSONSpec jsonSpec true
|
||||
JSONSpec jsonspec false
|
||||
HTTPJSONSpec httpJSONSpec true
|
||||
|
||||
Go name | JSON name | match
|
||||
podSpec false
|
||||
PodSpec podSpec true
|
||||
PodSpec PodSpec false
|
||||
podSpec podSpec false
|
||||
PodSpec spec false
|
||||
Spec podSpec false
|
||||
JSONSpec jsonSpec true
|
||||
JSONSpec jsonspec false
|
||||
HTTPJSONSpec httpJSONSpec true
|
||||
|
||||
NOTE: this validator cannot tell two sequential all-capital words from one word, therefore the case below
|
||||
is also considered matched.
|
||||
HTTPJSONSpec httpjsonSpec true
|
||||
|
||||
HTTPJSONSpec httpjsonSpec true
|
||||
|
||||
NOTE: JSON names in jsonNameBlacklist should skip evaluation
|
||||
true
|
||||
podSpec true
|
||||
podSpec - true
|
||||
podSpec metadata true
|
||||
|
||||
true
|
||||
podSpec true
|
||||
podSpec - true
|
||||
podSpec metadata true
|
||||
*/
|
||||
type NamesMatch struct{}
|
||||
|
||||
|
@ -114,14 +119,15 @@ func (n *NamesMatch) Validate(t *types.Type) ([]string, error) {
|
|||
|
||||
// namesMatch evaluates if goName and jsonName match the API rule
|
||||
// TODO: Use an off-the-shelf CamelCase solution instead of implementing this logic. The following existing
|
||||
// packages have been tried out:
|
||||
// github.com/markbates/inflect
|
||||
// github.com/segmentio/go-camelcase
|
||||
// github.com/iancoleman/strcase
|
||||
// github.com/fatih/camelcase
|
||||
// Please see https://github.com/kubernetes/kube-openapi/pull/83#issuecomment-400842314 for more details
|
||||
// about why they don't satisfy our need. What we need can be a function that detects an acronym at the
|
||||
// beginning of a string.
|
||||
//
|
||||
// packages have been tried out:
|
||||
// github.com/markbates/inflect
|
||||
// github.com/segmentio/go-camelcase
|
||||
// github.com/iancoleman/strcase
|
||||
// github.com/fatih/camelcase
|
||||
// Please see https://github.com/kubernetes/kube-openapi/pull/83#issuecomment-400842314 for more details
|
||||
// about why they don't satisfy our need. What we need can be a function that detects an acronym at the
|
||||
// beginning of a string.
|
||||
func namesMatch(goName, jsonName string) bool {
|
||||
if jsonNameBlacklist.Has(jsonName) {
|
||||
return true
|
||||
|
|
|
@ -18,11 +18,8 @@ package handler
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"crypto/sha512"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"mime"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
@ -42,12 +39,9 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
jsonExt = ".json"
|
||||
|
||||
mimeJson = "application/json"
|
||||
// TODO(mehdy): change @68f4ded to a version tag when gnostic add version tags.
|
||||
mimePb = "application/com.github.googleapis.gnostic.OpenAPIv2@68f4ded+protobuf"
|
||||
mimePbGz = "application/x-gzip"
|
||||
subTypeProtobufDeprecated = "com.github.proto-openapi.spec.v2@v1.0+protobuf"
|
||||
subTypeProtobuf = "com.github.proto-openapi.spec.v2.v1.0+protobuf"
|
||||
subTypeJSON = "json"
|
||||
)
|
||||
|
||||
func computeETag(data []byte) string {
|
||||
|
@ -70,12 +64,6 @@ type OpenAPIService struct {
|
|||
etagCache handler.HandlerCache
|
||||
}
|
||||
|
||||
func init() {
|
||||
mime.AddExtensionType(".json", mimeJson)
|
||||
mime.AddExtensionType(".pb-v1", mimePb)
|
||||
mime.AddExtensionType(".gz", mimePbGz)
|
||||
}
|
||||
|
||||
// NewOpenAPIService builds an OpenAPIService starting with the given spec.
|
||||
func NewOpenAPIService(spec *spec.Swagger) (*OpenAPIService, error) {
|
||||
o := &OpenAPIService{}
|
||||
|
@ -117,7 +105,7 @@ func (o *OpenAPIService) UpdateSpec(openapiSpec *spec.Swagger) (err error) {
|
|||
o.rwMutex.Lock()
|
||||
defer o.rwMutex.Unlock()
|
||||
o.jsonCache = o.jsonCache.New(func() ([]byte, error) {
|
||||
return json.Marshal(openapiSpec)
|
||||
return openapiSpec.MarshalJSON()
|
||||
})
|
||||
o.protoCache = o.protoCache.New(func() ([]byte, error) {
|
||||
json, err := o.jsonCache.Get()
|
||||
|
@ -146,14 +134,6 @@ func ToProtoBinary(json []byte) ([]byte, error) {
|
|||
return proto.Marshal(document)
|
||||
}
|
||||
|
||||
func toGzip(data []byte) []byte {
|
||||
var buf bytes.Buffer
|
||||
zw := gzip.NewWriter(&buf)
|
||||
zw.Write(data)
|
||||
zw.Close()
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
// RegisterOpenAPIVersionedService registers a handler to provide access to provided swagger spec.
|
||||
//
|
||||
// Deprecated: use OpenAPIService.RegisterOpenAPIVersionedService instead.
|
||||
|
@ -168,12 +148,14 @@ func RegisterOpenAPIVersionedService(spec *spec.Swagger, servePath string, handl
|
|||
// RegisterOpenAPIVersionedService registers a handler to provide access to provided swagger spec.
|
||||
func (o *OpenAPIService) RegisterOpenAPIVersionedService(servePath string, handler common.PathHandler) error {
|
||||
accepted := []struct {
|
||||
Type string
|
||||
SubType string
|
||||
GetDataAndETag func() ([]byte, string, time.Time, error)
|
||||
Type string
|
||||
SubType string
|
||||
ReturnedContentType string
|
||||
GetDataAndETag func() ([]byte, string, time.Time, error)
|
||||
}{
|
||||
{"application", "json", o.getSwaggerBytes},
|
||||
{"application", "com.github.proto-openapi.spec.v2@v1.0+protobuf", o.getSwaggerPbBytes},
|
||||
{"application", subTypeJSON, "application/" + subTypeJSON, o.getSwaggerBytes},
|
||||
{"application", subTypeProtobufDeprecated, "application/" + subTypeProtobuf, o.getSwaggerPbBytes},
|
||||
{"application", subTypeProtobuf, "application/" + subTypeProtobuf, o.getSwaggerPbBytes},
|
||||
}
|
||||
|
||||
handler.Handle(servePath, gziphandler.GzipHandler(http.HandlerFunc(
|
||||
|
@ -192,7 +174,6 @@ func (o *OpenAPIService) RegisterOpenAPIVersionedService(servePath string, handl
|
|||
if clause.SubType != accepts.SubType && clause.SubType != "*" {
|
||||
continue
|
||||
}
|
||||
|
||||
// serve the first matching media type in the sorted clause list
|
||||
data, etag, lastModified, err := accepts.GetDataAndETag()
|
||||
if err != nil {
|
||||
|
@ -203,6 +184,9 @@ func (o *OpenAPIService) RegisterOpenAPIVersionedService(servePath string, handl
|
|||
return
|
||||
}
|
||||
}
|
||||
// Set Content-Type header in the reponse
|
||||
w.Header().Set("Content-Type", accepts.ReturnedContentType)
|
||||
|
||||
// ETag must be enclosed in double quotes: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/ETag
|
||||
w.Header().Set("Etag", strconv.Quote(etag))
|
||||
// ServeContent will take care of caching using eTag.
|
||||
|
|
|
@ -21,7 +21,6 @@ import (
|
|||
"crypto/sha512"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"mime"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
|
@ -41,15 +40,9 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
jsonExt = ".json"
|
||||
|
||||
mimeJson = "application/json"
|
||||
// TODO(mehdy): change @68f4ded to a version tag when gnostic add version tags.
|
||||
mimePb = "application/com.github.googleapis.gnostic.OpenAPIv3@68f4ded+protobuf"
|
||||
mimePbGz = "application/x-gzip"
|
||||
|
||||
subTypeProtobuf = "com.github.proto-openapi.spec.v3@v1.0+protobuf"
|
||||
subTypeJSON = "json"
|
||||
subTypeProtobufDeprecated = "com.github.proto-openapi.spec.v3@v1.0+protobuf"
|
||||
subTypeProtobuf = "com.github.proto-openapi.spec.v3.v1.0+protobuf"
|
||||
subTypeJSON = "json"
|
||||
)
|
||||
|
||||
// OpenAPIV3Discovery is the format of the Discovery document for OpenAPI V3
|
||||
|
@ -84,12 +77,6 @@ type OpenAPIV3Group struct {
|
|||
etagCache handler.HandlerCache
|
||||
}
|
||||
|
||||
func init() {
|
||||
mime.AddExtensionType(".json", mimeJson)
|
||||
mime.AddExtensionType(".pb-v1", mimePb)
|
||||
mime.AddExtensionType(".gz", mimePbGz)
|
||||
}
|
||||
|
||||
func computeETag(data []byte) string {
|
||||
if data == nil {
|
||||
return ""
|
||||
|
@ -154,7 +141,7 @@ func (o *OpenAPIService) getSingleGroupBytes(getType string, group string) ([]by
|
|||
}
|
||||
etagBytes, err := v.etagCache.Get()
|
||||
return specBytes, string(etagBytes), v.lastModified, err
|
||||
} else if getType == subTypeProtobuf {
|
||||
} else if getType == subTypeProtobuf || getType == subTypeProtobufDeprecated {
|
||||
specPb, err := v.pbCache.Get()
|
||||
if err != nil {
|
||||
return nil, "", v.lastModified, err
|
||||
|
@ -191,6 +178,8 @@ func ToV3ProtoBinary(json []byte) ([]byte, error) {
|
|||
|
||||
func (o *OpenAPIService) HandleDiscovery(w http.ResponseWriter, r *http.Request) {
|
||||
data, _ := o.getGroupBytes()
|
||||
w.Header().Set("Etag", strconv.Quote(computeETag(data)))
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
http.ServeContent(w, r, "/openapi/v3", time.Now(), bytes.NewReader(data))
|
||||
}
|
||||
|
||||
|
@ -210,11 +199,13 @@ func (o *OpenAPIService) HandleGroupVersion(w http.ResponseWriter, r *http.Reque
|
|||
}
|
||||
|
||||
accepted := []struct {
|
||||
Type string
|
||||
SubType string
|
||||
Type string
|
||||
SubType string
|
||||
ReturnedContentType string
|
||||
}{
|
||||
{"application", subTypeJSON},
|
||||
{"application", subTypeProtobuf},
|
||||
{"application", subTypeJSON, "application/" + subTypeJSON},
|
||||
{"application", subTypeProtobuf, "application/" + subTypeProtobuf},
|
||||
{"application", subTypeProtobufDeprecated, "application/" + subTypeProtobuf},
|
||||
}
|
||||
|
||||
for _, clause := range clauses {
|
||||
|
@ -229,6 +220,9 @@ func (o *OpenAPIService) HandleGroupVersion(w http.ResponseWriter, r *http.Reque
|
|||
if err != nil {
|
||||
return
|
||||
}
|
||||
// Set Content-Type header in the reponse
|
||||
w.Header().Set("Content-Type", accepts.ReturnedContentType)
|
||||
|
||||
// ETag must be enclosed in double quotes: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/ETag
|
||||
w.Header().Set("Etag", strconv.Quote(etag))
|
||||
|
||||
|
|
|
@ -18,3 +18,7 @@ package internal
|
|||
|
||||
// Used by tests to selectively disable experimental JSON unmarshaler
|
||||
var UseOptimizedJSONUnmarshaling bool = true
|
||||
var UseOptimizedJSONUnmarshalingV3 bool = true
|
||||
|
||||
// Used by tests to selectively disable experimental JSON marshaler
|
||||
var UseOptimizedJSONMarshaling bool = true
|
||||
|
|
|
@ -0,0 +1,65 @@
|
|||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"github.com/go-openapi/jsonreference"
|
||||
jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json"
|
||||
)
|
||||
|
||||
// DeterministicMarshal calls the jsonv2 library with the deterministic
|
||||
// flag in order to have stable marshaling.
|
||||
func DeterministicMarshal(in any) ([]byte, error) {
|
||||
return jsonv2.MarshalOptions{Deterministic: true}.Marshal(jsonv2.EncodeOptions{}, in)
|
||||
}
|
||||
|
||||
// JSONRefFromMap populates a json reference object if the map v contains a $ref key.
|
||||
func JSONRefFromMap(jsonRef *jsonreference.Ref, v map[string]interface{}) error {
|
||||
if v == nil {
|
||||
return nil
|
||||
}
|
||||
if vv, ok := v["$ref"]; ok {
|
||||
if str, ok := vv.(string); ok {
|
||||
ref, err := jsonreference.New(str)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*jsonRef = ref
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SanitizeExtensions sanitizes the input map such that non extension
|
||||
// keys (non x-*, X-*) keys are dropped from the map. Returns the new
|
||||
// modified map, or nil if the map is now empty.
|
||||
func SanitizeExtensions(e map[string]interface{}) map[string]interface{} {
|
||||
for k := range e {
|
||||
if !IsExtensionKey(k) {
|
||||
delete(e, k)
|
||||
}
|
||||
}
|
||||
if len(e) == 0 {
|
||||
e = nil
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
// IsExtensionKey returns true if the input string is of format x-* or X-*
|
||||
func IsExtensionKey(k string) bool {
|
||||
return len(k) > 1 && (k[0] == 'x' || k[0] == 'X') && k[1] == '-'
|
||||
}
|
|
@ -34,6 +34,13 @@ type MarshalOptions struct {
|
|||
// unknown JSON object members.
|
||||
DiscardUnknownMembers bool
|
||||
|
||||
// Deterministic specifies that the same input value will be serialized
|
||||
// as the exact same output bytes. Different processes of
|
||||
// the same program will serialize equal values to the same bytes,
|
||||
// but different versions of the same program are not guaranteed
|
||||
// to produce the exact same sequence of bytes.
|
||||
Deterministic bool
|
||||
|
||||
// formatDepth is the depth at which we respect the format flag.
|
||||
formatDepth int
|
||||
// format is custom formatting for the value at the specified depth.
|
||||
|
|
|
@ -62,7 +62,7 @@ func unmarshalValueAny(uo UnmarshalOptions, dec *Decoder) (any, error) {
|
|||
}
|
||||
return dec.stringCache.make(val), nil
|
||||
case '0':
|
||||
fv, _ := parseFloat(val, 64) // ignore error since readValue gaurantees val is valid
|
||||
fv, _ := parseFloat(val, 64) // ignore error since readValue guarantees val is valid
|
||||
return fv, nil
|
||||
default:
|
||||
panic("BUG: invalid kind: " + k.String())
|
||||
|
@ -99,13 +99,32 @@ func marshalObjectAny(mo MarshalOptions, enc *Encoder, obj map[string]any) error
|
|||
if !enc.options.AllowInvalidUTF8 {
|
||||
enc.tokens.last.disableNamespace()
|
||||
}
|
||||
for name, val := range obj {
|
||||
if err := enc.WriteToken(String(name)); err != nil {
|
||||
return err
|
||||
if !mo.Deterministic || len(obj) <= 1 {
|
||||
for name, val := range obj {
|
||||
if err := enc.WriteToken(String(name)); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := marshalValueAny(mo, enc, val); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := marshalValueAny(mo, enc, val); err != nil {
|
||||
return err
|
||||
} else {
|
||||
names := getStrings(len(obj))
|
||||
var i int
|
||||
for name := range obj {
|
||||
(*names)[i] = name
|
||||
i++
|
||||
}
|
||||
names.Sort()
|
||||
for _, name := range *names {
|
||||
if err := enc.WriteToken(String(name)); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := marshalValueAny(mo, enc, obj[name]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
putStrings(names)
|
||||
}
|
||||
if err := enc.WriteToken(ObjectEnd); err != nil {
|
||||
return err
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
package json
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base32"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
|
@ -12,6 +13,7 @@ import (
|
|||
"fmt"
|
||||
"math"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"sync"
|
||||
)
|
||||
|
@ -228,13 +230,7 @@ func makeBytesArshaler(t reflect.Type, fncs *arshaler) *arshaler {
|
|||
}
|
||||
}
|
||||
val := enc.UnusedBuffer()
|
||||
var b []byte
|
||||
if va.Kind() == reflect.Array {
|
||||
// TODO(https://go.dev/issue/47066): Avoid reflect.Value.Slice.
|
||||
b = va.Slice(0, va.Len()).Bytes()
|
||||
} else {
|
||||
b = va.Bytes()
|
||||
}
|
||||
b := va.Bytes()
|
||||
n := len(`"`) + encodedLen(len(b)) + len(`"`)
|
||||
if cap(val) < n {
|
||||
val = make([]byte, n)
|
||||
|
@ -248,19 +244,19 @@ func makeBytesArshaler(t reflect.Type, fncs *arshaler) *arshaler {
|
|||
}
|
||||
unmarshalDefault := fncs.unmarshal
|
||||
fncs.unmarshal = func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error {
|
||||
decode, decodedLen := decodeBase64, decodedLenBase64
|
||||
decode, decodedLen, encodedLen := decodeBase64, decodedLenBase64, encodedLenBase64
|
||||
if uo.format != "" && uo.formatDepth == dec.tokens.depth() {
|
||||
switch uo.format {
|
||||
case "base64":
|
||||
decode, decodedLen = decodeBase64, decodedLenBase64
|
||||
decode, decodedLen, encodedLen = decodeBase64, decodedLenBase64, encodedLenBase64
|
||||
case "base64url":
|
||||
decode, decodedLen = decodeBase64URL, decodedLenBase64URL
|
||||
decode, decodedLen, encodedLen = decodeBase64URL, decodedLenBase64URL, encodedLenBase64URL
|
||||
case "base32":
|
||||
decode, decodedLen = decodeBase32, decodedLenBase32
|
||||
decode, decodedLen, encodedLen = decodeBase32, decodedLenBase32, encodedLenBase32
|
||||
case "base32hex":
|
||||
decode, decodedLen = decodeBase32Hex, decodedLenBase32Hex
|
||||
decode, decodedLen, encodedLen = decodeBase32Hex, decodedLenBase32Hex, encodedLenBase32Hex
|
||||
case "base16", "hex":
|
||||
decode, decodedLen = decodeBase16, decodedLenBase16
|
||||
decode, decodedLen, encodedLen = decodeBase16, decodedLenBase16, encodedLenBase16
|
||||
case "array":
|
||||
uo.format = ""
|
||||
return unmarshalDefault(uo, dec, va)
|
||||
|
@ -290,23 +286,28 @@ func makeBytesArshaler(t reflect.Type, fncs *arshaler) *arshaler {
|
|||
n--
|
||||
}
|
||||
n = decodedLen(n)
|
||||
var b []byte
|
||||
b := va.Bytes()
|
||||
if va.Kind() == reflect.Array {
|
||||
// TODO(https://go.dev/issue/47066): Avoid reflect.Value.Slice.
|
||||
b = va.Slice(0, va.Len()).Bytes()
|
||||
if n != len(b) {
|
||||
err := fmt.Errorf("decoded base64 length of %d mismatches array length of %d", n, len(b))
|
||||
return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t, Err: err}
|
||||
}
|
||||
} else {
|
||||
b = va.Bytes()
|
||||
if b == nil || cap(b) < n {
|
||||
b = make([]byte, n)
|
||||
} else {
|
||||
b = b[:n]
|
||||
}
|
||||
}
|
||||
if _, err := decode(b, val); err != nil {
|
||||
n2, err := decode(b, val)
|
||||
if err == nil && len(val) != encodedLen(n2) {
|
||||
// TODO(https://go.dev/issue/53845): RFC 4648, section 3.3,
|
||||
// specifies that non-alphabet characters must be rejected.
|
||||
// Unfortunately, the "base32" and "base64" packages allow
|
||||
// '\r' and '\n' characters by default.
|
||||
err = errors.New("illegal data at input byte " + strconv.Itoa(bytes.IndexAny(val, "\r\n")))
|
||||
}
|
||||
if err != nil {
|
||||
return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t, Err: err}
|
||||
}
|
||||
if va.Kind() == reflect.Slice {
|
||||
|
@ -412,7 +413,7 @@ func makeUintArshaler(t reflect.Type) *arshaler {
|
|||
return nil
|
||||
}
|
||||
|
||||
x := math.Float64frombits(uint64(va.Uint()))
|
||||
x := math.Float64frombits(va.Uint())
|
||||
return enc.writeNumber(x, rawUintNumber, mo.StringifyNumbers)
|
||||
}
|
||||
fncs.unmarshal = func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error {
|
||||
|
@ -450,7 +451,7 @@ func makeUintArshaler(t reflect.Type) *arshaler {
|
|||
err := fmt.Errorf("cannot parse %q as unsigned integer: %w", val, strconv.ErrRange)
|
||||
return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t, Err: err}
|
||||
}
|
||||
va.SetUint(uint64(n))
|
||||
va.SetUint(n)
|
||||
return nil
|
||||
}
|
||||
return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t}
|
||||
|
@ -549,23 +550,9 @@ func makeFloatArshaler(t reflect.Type) *arshaler {
|
|||
return &fncs
|
||||
}
|
||||
|
||||
var mapIterPool = sync.Pool{
|
||||
New: func() any { return new(reflect.MapIter) },
|
||||
}
|
||||
|
||||
func getMapIter(mv reflect.Value) *reflect.MapIter {
|
||||
iter := mapIterPool.Get().(*reflect.MapIter)
|
||||
iter.Reset(mv)
|
||||
return iter
|
||||
}
|
||||
func putMapIter(iter *reflect.MapIter) {
|
||||
iter.Reset(reflect.Value{}) // allow underlying map to be garbage collected
|
||||
mapIterPool.Put(iter)
|
||||
}
|
||||
|
||||
func makeMapArshaler(t reflect.Type) *arshaler {
|
||||
// NOTE: The logic below disables namespaces for tracking duplicate names
|
||||
// when handling map keys with a unique represention.
|
||||
// when handling map keys with a unique representation.
|
||||
|
||||
// NOTE: Values retrieved from a map are not addressable,
|
||||
// so we shallow copy the values to make them addressable and
|
||||
|
@ -641,24 +628,76 @@ func makeMapArshaler(t reflect.Type) *arshaler {
|
|||
enc.tokens.last.disableNamespace()
|
||||
}
|
||||
|
||||
// NOTE: Map entries are serialized in a non-deterministic order.
|
||||
// Users that need stable output should call RawValue.Canonicalize.
|
||||
// TODO(go1.19): Remove use of a sync.Pool with reflect.MapIter.
|
||||
// Calling reflect.Value.MapRange no longer allocates.
|
||||
// See https://go.dev/cl/400675.
|
||||
iter := getMapIter(va.Value)
|
||||
defer putMapIter(iter)
|
||||
for iter.Next() {
|
||||
k.SetIterKey(iter)
|
||||
if err := marshalKey(mko, enc, k); err != nil {
|
||||
// TODO: If err is errMissingName, then wrap it as a
|
||||
// SemanticError since this key type cannot be serialized
|
||||
// as a JSON string.
|
||||
return err
|
||||
switch {
|
||||
case !mo.Deterministic || n <= 1:
|
||||
for iter := va.Value.MapRange(); iter.Next(); {
|
||||
k.SetIterKey(iter)
|
||||
if err := marshalKey(mko, enc, k); err != nil {
|
||||
// TODO: If err is errMissingName, then wrap it as a
|
||||
// SemanticError since this key type cannot be serialized
|
||||
// as a JSON string.
|
||||
return err
|
||||
}
|
||||
v.SetIterValue(iter)
|
||||
if err := marshalVal(mo, enc, v); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
v.SetIterValue(iter)
|
||||
if err := marshalVal(mo, enc, v); err != nil {
|
||||
return err
|
||||
case !nonDefaultKey && t.Key().Kind() == reflect.String:
|
||||
names := getStrings(n)
|
||||
for i, iter := 0, va.Value.MapRange(); i < n && iter.Next(); i++ {
|
||||
k.SetIterKey(iter)
|
||||
(*names)[i] = k.String()
|
||||
}
|
||||
names.Sort()
|
||||
for _, name := range *names {
|
||||
if err := enc.WriteToken(String(name)); err != nil {
|
||||
return err
|
||||
}
|
||||
// TODO(https://go.dev/issue/57061): Use v.SetMapIndexOf.
|
||||
k.SetString(name)
|
||||
v.Set(va.MapIndex(k.Value))
|
||||
if err := marshalVal(mo, enc, v); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
putStrings(names)
|
||||
default:
|
||||
type member struct {
|
||||
name string // unquoted name
|
||||
key addressableValue
|
||||
}
|
||||
members := make([]member, n)
|
||||
keys := reflect.MakeSlice(reflect.SliceOf(t.Key()), n, n)
|
||||
for i, iter := 0, va.Value.MapRange(); i < n && iter.Next(); i++ {
|
||||
// Marshal the member name.
|
||||
k := addressableValue{keys.Index(i)} // indexed slice element is always addressable
|
||||
k.SetIterKey(iter)
|
||||
if err := marshalKey(mko, enc, k); err != nil {
|
||||
// TODO: If err is errMissingName, then wrap it as a
|
||||
// SemanticError since this key type cannot be serialized
|
||||
// as a JSON string.
|
||||
return err
|
||||
}
|
||||
name := enc.unwriteOnlyObjectMemberName()
|
||||
members[i] = member{name, k}
|
||||
}
|
||||
// TODO: If AllowDuplicateNames is enabled, then sort according
|
||||
// to reflect.Value as well if the names are equal.
|
||||
// See internal/fmtsort.
|
||||
// TODO(https://go.dev/issue/47619): Use slices.SortFunc instead.
|
||||
sort.Slice(members, func(i, j int) bool {
|
||||
return lessUTF16(members[i].name, members[j].name)
|
||||
})
|
||||
for _, member := range members {
|
||||
if err := enc.WriteToken(String(member.name)); err != nil {
|
||||
return err
|
||||
}
|
||||
// TODO(https://go.dev/issue/57061): Use v.SetMapIndexOf.
|
||||
v.Set(va.MapIndex(member.key.Value))
|
||||
if err := marshalVal(mo, enc, v); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -856,7 +895,7 @@ func makeStructArshaler(t reflect.Type) *arshaler {
|
|||
// 2. The object namespace is guaranteed to be disabled.
|
||||
// 3. The object name is guaranteed to be valid and pre-escaped.
|
||||
// 4. There is no need to flush the buffer (for unwrite purposes).
|
||||
// 5. There is no possibility of an error occuring.
|
||||
// 5. There is no possibility of an error occurring.
|
||||
if optimizeCommon {
|
||||
// Append any delimiters or optional whitespace.
|
||||
if enc.tokens.last.length() > 0 {
|
||||
|
@ -996,7 +1035,7 @@ func makeStructArshaler(t reflect.Type) *arshaler {
|
|||
|
||||
if fields.inlinedFallback == nil {
|
||||
// Skip unknown value since we have no place to store it.
|
||||
if err := dec.skipValue(); err != nil {
|
||||
if err := dec.SkipValue(); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
package json
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"reflect"
|
||||
)
|
||||
|
@ -89,35 +90,61 @@ func marshalInlinedFallbackAll(mo MarshalOptions, enc *Encoder, va addressableVa
|
|||
}
|
||||
return nil
|
||||
} else {
|
||||
if v.Len() == 0 {
|
||||
m := v // must be a map[string]V
|
||||
n := m.Len()
|
||||
if n == 0 {
|
||||
return nil
|
||||
}
|
||||
m := v
|
||||
mk := newAddressableValue(stringType)
|
||||
mv := newAddressableValue(m.Type().Elem())
|
||||
for iter := m.MapRange(); iter.Next(); {
|
||||
b, err := appendString(enc.UnusedBuffer(), iter.Key().String(), !enc.options.AllowInvalidUTF8, nil)
|
||||
marshalKey := func(mk addressableValue) error {
|
||||
b, err := appendString(enc.UnusedBuffer(), mk.String(), !enc.options.AllowInvalidUTF8, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if insertUnquotedName != nil {
|
||||
isVerbatim := consumeSimpleString(b) == len(b)
|
||||
isVerbatim := bytes.IndexByte(b, '\\') < 0
|
||||
name := unescapeStringMayCopy(b, isVerbatim)
|
||||
if !insertUnquotedName(name) {
|
||||
return &SyntacticError{str: "duplicate name " + string(b) + " in object"}
|
||||
}
|
||||
}
|
||||
if err := enc.WriteValue(b); err != nil {
|
||||
return err
|
||||
return enc.WriteValue(b)
|
||||
}
|
||||
marshalVal := f.fncs.marshal
|
||||
if mo.Marshalers != nil {
|
||||
marshalVal, _ = mo.Marshalers.lookup(marshalVal, mv.Type())
|
||||
}
|
||||
if !mo.Deterministic || n <= 1 {
|
||||
for iter := m.MapRange(); iter.Next(); {
|
||||
mk.SetIterKey(iter)
|
||||
if err := marshalKey(mk); err != nil {
|
||||
return err
|
||||
}
|
||||
mv.Set(iter.Value())
|
||||
if err := marshalVal(mo, enc, mv); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
mv.Set(iter.Value())
|
||||
marshal := f.fncs.marshal
|
||||
if mo.Marshalers != nil {
|
||||
marshal, _ = mo.Marshalers.lookup(marshal, mv.Type())
|
||||
} else {
|
||||
names := getStrings(n)
|
||||
for i, iter := 0, m.Value.MapRange(); i < n && iter.Next(); i++ {
|
||||
mk.SetIterKey(iter)
|
||||
(*names)[i] = mk.String()
|
||||
}
|
||||
if err := marshal(mo, enc, mv); err != nil {
|
||||
return err
|
||||
names.Sort()
|
||||
for _, name := range *names {
|
||||
mk.SetString(name)
|
||||
if err := marshalKey(mk); err != nil {
|
||||
return err
|
||||
}
|
||||
// TODO(https://go.dev/issue/57061): Use mv.SetMapIndexOf.
|
||||
mv.Set(m.MapIndex(mk.Value))
|
||||
if err := marshalVal(mo, enc, mv); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
putStrings(names)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -162,7 +189,7 @@ func unmarshalInlinedFallbackNext(uo UnmarshalOptions, dec *Decoder, va addressa
|
|||
} else {
|
||||
name := string(unquotedName) // TODO: Intern this?
|
||||
|
||||
m := v
|
||||
m := v // must be a map[string]V
|
||||
if m.IsNil() {
|
||||
m.Set(reflect.MakeMap(m.Type()))
|
||||
}
|
||||
|
|
|
@ -21,8 +21,8 @@ var (
|
|||
)
|
||||
|
||||
// MarshalerV1 is implemented by types that can marshal themselves.
|
||||
// It is recommended that types implement MarshalerV2 unless
|
||||
// the implementation is trying to avoid a hard dependency on this package.
|
||||
// It is recommended that types implement MarshalerV2 unless the implementation
|
||||
// is trying to avoid a hard dependency on the "jsontext" package.
|
||||
//
|
||||
// It is recommended that implementations return a buffer that is safe
|
||||
// for the caller to retain and potentially mutate.
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
package json
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
@ -85,25 +86,39 @@ func makeTimeArshaler(fncs *arshaler, t reflect.Type) *arshaler {
|
|||
fncs.nonDefault = true
|
||||
fncs.marshal = func(mo MarshalOptions, enc *Encoder, va addressableValue) error {
|
||||
format := time.RFC3339Nano
|
||||
isRFC3339 := true
|
||||
if mo.format != "" && mo.formatDepth == enc.tokens.depth() {
|
||||
var err error
|
||||
format, err = checkTimeFormat(mo.format)
|
||||
format, isRFC3339, err = checkTimeFormat(mo.format)
|
||||
if err != nil {
|
||||
return &SemanticError{action: "marshal", GoType: t, Err: err}
|
||||
}
|
||||
}
|
||||
|
||||
tt := va.Interface().(time.Time)
|
||||
if y := tt.Year(); y < 0 || y >= 10000 {
|
||||
// RFC 3339 is clear that years are 4 digits exactly.
|
||||
// See https://go.dev/issue/4556#c15 for more discussion.
|
||||
err := fmt.Errorf("year %d outside of range [0,9999]", y)
|
||||
return &SemanticError{action: "marshal", GoType: t, Err: err}
|
||||
}
|
||||
b := enc.UnusedBuffer()
|
||||
b = append(b, '"')
|
||||
b = tt.AppendFormat(b, format)
|
||||
b = append(b, '"')
|
||||
if isRFC3339 {
|
||||
// Not all Go timestamps can be represented as valid RFC 3339.
|
||||
// Explicitly check for these edge cases.
|
||||
// See https://go.dev/issue/4556 and https://go.dev/issue/54580.
|
||||
var err error
|
||||
switch b := b[len(`"`) : len(b)-len(`"`)]; {
|
||||
case b[len("9999")] != '-': // year must be exactly 4 digits wide
|
||||
err = errors.New("year outside of range [0,9999]")
|
||||
case b[len(b)-1] != 'Z':
|
||||
c := b[len(b)-len("Z07:00")]
|
||||
if ('0' <= c && c <= '9') || parseDec2(b[len(b)-len("07:00"):]) >= 24 {
|
||||
err = errors.New("timezone hour outside of range [0,23]")
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return &SemanticError{action: "marshal", GoType: t, Err: err}
|
||||
}
|
||||
return enc.WriteValue(b) // RFC 3339 never needs JSON escaping
|
||||
}
|
||||
// The format may contain special characters that need escaping.
|
||||
// Verify that the result is a valid JSON string (common case),
|
||||
// otherwise escape the string correctly (slower case).
|
||||
|
@ -113,10 +128,11 @@ func makeTimeArshaler(fncs *arshaler, t reflect.Type) *arshaler {
|
|||
return enc.WriteValue(b)
|
||||
}
|
||||
fncs.unmarshal = func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error {
|
||||
format := time.RFC3339Nano
|
||||
format := time.RFC3339
|
||||
isRFC3339 := true
|
||||
if uo.format != "" && uo.formatDepth == dec.tokens.depth() {
|
||||
var err error
|
||||
format, err = checkTimeFormat(uo.format)
|
||||
format, isRFC3339, err = checkTimeFormat(uo.format)
|
||||
if err != nil {
|
||||
return &SemanticError{action: "unmarshal", GoType: t, Err: err}
|
||||
}
|
||||
|
@ -136,6 +152,29 @@ func makeTimeArshaler(fncs *arshaler, t reflect.Type) *arshaler {
|
|||
case '"':
|
||||
val = unescapeStringMayCopy(val, flags.isVerbatim())
|
||||
tt2, err := time.Parse(format, string(val))
|
||||
if isRFC3339 && err == nil {
|
||||
// TODO(https://go.dev/issue/54580): RFC 3339 specifies
|
||||
// the exact grammar of a valid timestamp. However,
|
||||
// the parsing functionality in "time" is too loose and
|
||||
// incorrectly accepts invalid timestamps as valid.
|
||||
// Remove these manual checks when "time" checks it for us.
|
||||
newParseError := func(layout, value, layoutElem, valueElem, message string) error {
|
||||
return &time.ParseError{Layout: layout, Value: value, LayoutElem: layoutElem, ValueElem: valueElem, Message: message}
|
||||
}
|
||||
switch {
|
||||
case val[len("2006-01-02T")+1] == ':': // hour must be two digits
|
||||
err = newParseError(format, string(val), "15", string(val[len("2006-01-02T"):][:1]), "")
|
||||
case val[len("2006-01-02T15:04:05")] == ',': // sub-second separator must be a period
|
||||
err = newParseError(format, string(val), ".", ",", "")
|
||||
case val[len(val)-1] != 'Z':
|
||||
switch {
|
||||
case parseDec2(val[len(val)-len("07:00"):]) >= 24: // timezone hour must be in range
|
||||
err = newParseError(format, string(val), "Z07:00", string(val[len(val)-len("Z07:00"):]), ": timezone hour out of range")
|
||||
case parseDec2(val[len(val)-len("00"):]) >= 60: // timezone minute must be in range
|
||||
err = newParseError(format, string(val), "Z07:00", string(val[len(val)-len("Z07:00"):]), ": timezone minute out of range")
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t, Err: err}
|
||||
}
|
||||
|
@ -149,48 +188,54 @@ func makeTimeArshaler(fncs *arshaler, t reflect.Type) *arshaler {
|
|||
return fncs
|
||||
}
|
||||
|
||||
func checkTimeFormat(format string) (string, error) {
|
||||
func checkTimeFormat(format string) (string, bool, error) {
|
||||
// We assume that an exported constant in the time package will
|
||||
// always start with an uppercase ASCII letter.
|
||||
if len(format) > 0 && 'A' <= format[0] && format[0] <= 'Z' {
|
||||
switch format {
|
||||
case "ANSIC":
|
||||
return time.ANSIC, nil
|
||||
return time.ANSIC, false, nil
|
||||
case "UnixDate":
|
||||
return time.UnixDate, nil
|
||||
return time.UnixDate, false, nil
|
||||
case "RubyDate":
|
||||
return time.RubyDate, nil
|
||||
return time.RubyDate, false, nil
|
||||
case "RFC822":
|
||||
return time.RFC822, nil
|
||||
return time.RFC822, false, nil
|
||||
case "RFC822Z":
|
||||
return time.RFC822Z, nil
|
||||
return time.RFC822Z, false, nil
|
||||
case "RFC850":
|
||||
return time.RFC850, nil
|
||||
return time.RFC850, false, nil
|
||||
case "RFC1123":
|
||||
return time.RFC1123, nil
|
||||
return time.RFC1123, false, nil
|
||||
case "RFC1123Z":
|
||||
return time.RFC1123Z, nil
|
||||
return time.RFC1123Z, false, nil
|
||||
case "RFC3339":
|
||||
return time.RFC3339, nil
|
||||
return time.RFC3339, true, nil
|
||||
case "RFC3339Nano":
|
||||
return time.RFC3339Nano, nil
|
||||
return time.RFC3339Nano, true, nil
|
||||
case "Kitchen":
|
||||
return time.Kitchen, nil
|
||||
return time.Kitchen, false, nil
|
||||
case "Stamp":
|
||||
return time.Stamp, nil
|
||||
return time.Stamp, false, nil
|
||||
case "StampMilli":
|
||||
return time.StampMilli, nil
|
||||
return time.StampMilli, false, nil
|
||||
case "StampMicro":
|
||||
return time.StampMicro, nil
|
||||
return time.StampMicro, false, nil
|
||||
case "StampNano":
|
||||
return time.StampNano, nil
|
||||
return time.StampNano, false, nil
|
||||
default:
|
||||
// Reject any format that is an exported Go identifier in case
|
||||
// new format constants are added to the time package.
|
||||
if strings.TrimFunc(format, isLetterOrDigit) == "" {
|
||||
return "", fmt.Errorf("undefined format layout: %v", format)
|
||||
return "", false, fmt.Errorf("undefined format layout: %v", format)
|
||||
}
|
||||
}
|
||||
}
|
||||
return format, nil
|
||||
return format, false, nil
|
||||
}
|
||||
|
||||
// parseDec2 parses b as an unsigned, base-10, 2-digit number.
|
||||
// It panics if len(b) < 2. The result is undefined if digits are not base-10.
|
||||
func parseDec2(b []byte) byte {
|
||||
return 10*(b[0]-'0') + (b[1] - '0')
|
||||
}
|
||||
|
|
12
vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/decode.go
generated
vendored
12
vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/decode.go
generated
vendored
|
@ -347,9 +347,9 @@ func (d *Decoder) PeekKind() Kind {
|
|||
return next
|
||||
}
|
||||
|
||||
// skipValue is semantically equivalent to calling ReadValue and discarding
|
||||
// SkipValue is semantically equivalent to calling ReadValue and discarding
|
||||
// the result except that memory is not wasted trying to hold the entire result.
|
||||
func (d *Decoder) skipValue() error {
|
||||
func (d *Decoder) SkipValue() error {
|
||||
switch d.PeekKind() {
|
||||
case '{', '[':
|
||||
// For JSON objects and arrays, keep skipping all tokens
|
||||
|
@ -374,7 +374,7 @@ func (d *Decoder) skipValue() error {
|
|||
}
|
||||
|
||||
// ReadToken reads the next Token, advancing the read offset.
|
||||
// The returned token is only valid until the next Peek or Read call.
|
||||
// The returned token is only valid until the next Peek, Read, or Skip call.
|
||||
// It returns io.EOF if there are no more tokens.
|
||||
func (d *Decoder) ReadToken() (Token, error) {
|
||||
// Determine the next kind.
|
||||
|
@ -585,7 +585,7 @@ func (f valueFlags) isCanonical() bool { return f&stringNonCanonical == 0 }
|
|||
|
||||
// ReadValue returns the next raw JSON value, advancing the read offset.
|
||||
// The value is stripped of any leading or trailing whitespace.
|
||||
// The returned value is only valid until the next Peek or Read call and
|
||||
// The returned value is only valid until the next Peek, Read, or Skip call and
|
||||
// may not be mutated while the Decoder remains in use.
|
||||
// If the decoder is currently at the end token for an object or array,
|
||||
// then it reports a SyntacticError and the internal state remains unchanged.
|
||||
|
@ -1013,7 +1013,7 @@ func (d *Decoder) InputOffset() int64 {
|
|||
// UnreadBuffer returns the data remaining in the unread buffer,
|
||||
// which may contain zero or more bytes.
|
||||
// The returned buffer must not be mutated while Decoder continues to be used.
|
||||
// The buffer contents are valid until the next Peek or Read call.
|
||||
// The buffer contents are valid until the next Peek, Read, or Skip call.
|
||||
func (d *Decoder) UnreadBuffer() []byte {
|
||||
return d.unreadBuffer()
|
||||
}
|
||||
|
@ -1213,7 +1213,7 @@ func consumeStringResumable(flags *valueFlags, b []byte, resumeOffset int, valid
|
|||
return n, &SyntacticError{str: "invalid escape sequence " + strconv.Quote(string(b[n:n+6])) + " within string"}
|
||||
}
|
||||
// Only certain control characters can use the \uFFFF notation
|
||||
// for canonical formating (per RFC 8785, section 3.2.2.2.).
|
||||
// for canonical formatting (per RFC 8785, section 3.2.2.2.).
|
||||
switch v1 {
|
||||
// \uFFFF notation not permitted for these characters.
|
||||
case '\b', '\f', '\n', '\r', '\t':
|
||||
|
|
9
vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/doc.go
generated
vendored
9
vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/doc.go
generated
vendored
|
@ -8,8 +8,7 @@
|
|||
// primitive data types such as booleans, strings, and numbers,
|
||||
// in addition to structured data types such as objects and arrays.
|
||||
//
|
||||
//
|
||||
// Terminology
|
||||
// # Terminology
|
||||
//
|
||||
// This package uses the terms "encode" and "decode" for syntactic functionality
|
||||
// that is concerned with processing JSON based on its grammar, and
|
||||
|
@ -32,8 +31,7 @@
|
|||
//
|
||||
// See RFC 8259 for more information.
|
||||
//
|
||||
//
|
||||
// Specifications
|
||||
// # Specifications
|
||||
//
|
||||
// Relevant specifications include RFC 4627, RFC 7159, RFC 7493, RFC 8259,
|
||||
// and RFC 8785. Each RFC is generally a stricter subset of another RFC.
|
||||
|
@ -60,8 +58,7 @@
|
|||
// In particular, it makes specific choices about behavior that RFC 8259
|
||||
// leaves as undefined in order to ensure greater interoperability.
|
||||
//
|
||||
//
|
||||
// JSON Representation of Go structs
|
||||
// # JSON Representation of Go structs
|
||||
//
|
||||
// A Go struct is naturally represented as a JSON object,
|
||||
// where each Go struct field corresponds with a JSON object member.
|
||||
|
|
24
vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/encode.go
generated
vendored
24
vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/encode.go
generated
vendored
|
@ -347,6 +347,30 @@ func (e *Encoder) unwriteEmptyObjectMember(prevName *string) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
// unwriteOnlyObjectMemberName unwrites the only object member name
|
||||
// and returns the unquoted name.
|
||||
func (e *Encoder) unwriteOnlyObjectMemberName() string {
|
||||
if last := e.tokens.last; !last.isObject() || last.length() != 1 {
|
||||
panic("BUG: must be called on an object after writing first name")
|
||||
}
|
||||
|
||||
// Unwrite the name and whitespace.
|
||||
b := trimSuffixString(e.buf)
|
||||
isVerbatim := bytes.IndexByte(e.buf[len(b):], '\\') < 0
|
||||
name := string(unescapeStringMayCopy(e.buf[len(b):], isVerbatim))
|
||||
e.buf = trimSuffixWhitespace(b)
|
||||
|
||||
// Undo state changes.
|
||||
e.tokens.last.decrement()
|
||||
if !e.options.AllowDuplicateNames {
|
||||
if e.tokens.last.isActiveNamespace() {
|
||||
e.namespaces.last().removeLast()
|
||||
}
|
||||
e.names.clearLast()
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
func trimSuffixWhitespace(b []byte) []byte {
|
||||
// NOTE: The arguments and logic are kept simple to keep this inlineable.
|
||||
n := len(b) - 1
|
||||
|
|
32
vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/pools.go
generated
vendored
32
vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/pools.go
generated
vendored
|
@ -8,6 +8,7 @@ import (
|
|||
"bytes"
|
||||
"io"
|
||||
"math/bits"
|
||||
"sort"
|
||||
"sync"
|
||||
)
|
||||
|
||||
|
@ -148,3 +149,34 @@ func putStreamingDecoder(d *Decoder) {
|
|||
streamingDecoderPool.Put(d)
|
||||
}
|
||||
}
|
||||
|
||||
var stringsPools = &sync.Pool{New: func() any { return new(stringSlice) }}
|
||||
|
||||
type stringSlice []string
|
||||
|
||||
// getStrings returns a non-nil pointer to a slice with length n.
|
||||
func getStrings(n int) *stringSlice {
|
||||
s := stringsPools.Get().(*stringSlice)
|
||||
if cap(*s) < n {
|
||||
*s = make([]string, n)
|
||||
}
|
||||
*s = (*s)[:n]
|
||||
return s
|
||||
}
|
||||
|
||||
func putStrings(s *stringSlice) {
|
||||
if cap(*s) > 1<<10 {
|
||||
*s = nil // avoid pinning arbitrarily large amounts of memory
|
||||
}
|
||||
stringsPools.Put(s)
|
||||
}
|
||||
|
||||
// Sort sorts the string slice according to RFC 8785, section 3.2.3.
|
||||
func (ss *stringSlice) Sort() {
|
||||
// TODO(https://go.dev/issue/47619): Use slices.SortFunc instead.
|
||||
sort.Sort(ss)
|
||||
}
|
||||
|
||||
func (ss *stringSlice) Len() int { return len(*ss) }
|
||||
func (ss *stringSlice) Less(i, j int) bool { return lessUTF16((*ss)[i], (*ss)[j]) }
|
||||
func (ss *stringSlice) Swap(i, j int) { (*ss)[i], (*ss)[j] = (*ss)[j], (*ss)[i] }
|
||||
|
|
4
vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/state.go
generated
vendored
4
vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/state.go
generated
vendored
|
@ -721,7 +721,7 @@ func (s *uintSet) has(i uint) bool {
|
|||
return s.lo.has(i)
|
||||
} else {
|
||||
i -= 64
|
||||
iHi, iLo := int(i/64), uint(i%64)
|
||||
iHi, iLo := int(i/64), i%64
|
||||
return iHi < len(s.hi) && s.hi[iHi].has(iLo)
|
||||
}
|
||||
}
|
||||
|
@ -735,7 +735,7 @@ func (s *uintSet) insert(i uint) bool {
|
|||
return !has
|
||||
} else {
|
||||
i -= 64
|
||||
iHi, iLo := int(i/64), uint(i%64)
|
||||
iHi, iLo := int(i/64), i%64
|
||||
if iHi >= len(s.hi) {
|
||||
s.hi = append(s.hi, make([]uintSet64, iHi+1-len(s.hi))...)
|
||||
s.hi = s.hi[:cap(s.hi)]
|
||||
|
|
10
vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/token.go
generated
vendored
10
vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/token.go
generated
vendored
|
@ -112,7 +112,7 @@ func Bool(b bool) Token {
|
|||
return False
|
||||
}
|
||||
|
||||
// String construct a Token representing a JSON string.
|
||||
// String constructs a Token representing a JSON string.
|
||||
// The provided string should contain valid UTF-8, otherwise invalid characters
|
||||
// may be mangled as the Unicode replacement character.
|
||||
func String(s string) Token {
|
||||
|
@ -225,7 +225,7 @@ func (t Token) appendString(dst []byte, validateUTF8, preserveRaw bool, escapeRu
|
|||
}
|
||||
|
||||
// String returns the unescaped string value for a JSON string.
|
||||
// For other JSON kinds, this returns the raw JSON represention.
|
||||
// For other JSON kinds, this returns the raw JSON representation.
|
||||
func (t Token) String() string {
|
||||
// This is inlinable to take advantage of "function outlining".
|
||||
// This avoids an allocation for the string(b) conversion
|
||||
|
@ -373,10 +373,10 @@ func (t Token) Int() int64 {
|
|||
case 'i':
|
||||
return int64(t.num)
|
||||
case 'u':
|
||||
if uint64(t.num) > maxInt64 {
|
||||
if t.num > maxInt64 {
|
||||
return maxInt64
|
||||
}
|
||||
return int64(uint64(t.num))
|
||||
return int64(t.num)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -425,7 +425,7 @@ func (t Token) Uint() uint64 {
|
|||
// Handle exact integer value.
|
||||
switch t.str[0] {
|
||||
case 'u':
|
||||
return uint64(t.num)
|
||||
return t.num
|
||||
case 'i':
|
||||
if int64(t.num) < minUint64 {
|
||||
return minUint64
|
||||
|
|
56
vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/value.go
generated
vendored
56
vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/value.go
generated
vendored
|
@ -263,7 +263,7 @@ func reorderObjects(d *Decoder, scratch *[]byte) {
|
|||
afterValue := d.InputOffset()
|
||||
|
||||
if isSorted && len(*members) > 0 {
|
||||
isSorted = lessUTF16(prevName, name)
|
||||
isSorted = lessUTF16(prevName, []byte(name))
|
||||
}
|
||||
*members = append(*members, memberName{name, beforeName, afterValue})
|
||||
prevName = name
|
||||
|
@ -317,7 +317,7 @@ func reorderObjects(d *Decoder, scratch *[]byte) {
|
|||
// to the UTF-16 codepoints of the UTF-8 encoded input strings.
|
||||
// This implements the ordering specified in RFC 8785, section 3.2.3.
|
||||
// The inputs must be valid UTF-8, otherwise this may panic.
|
||||
func lessUTF16(x, y []byte) bool {
|
||||
func lessUTF16[Bytes []byte | string](x, y Bytes) bool {
|
||||
// NOTE: This is an optimized, allocation-free implementation
|
||||
// of lessUTF16Simple in fuzz_test.go. FuzzLessUTF16 verifies that the
|
||||
// two implementations agree on the result of comparing any two strings.
|
||||
|
@ -326,8 +326,13 @@ func lessUTF16(x, y []byte) bool {
|
|||
return ('\u0000' <= r && r <= '\uD7FF') || ('\uE000' <= r && r <= '\uFFFF')
|
||||
}
|
||||
|
||||
var invalidUTF8 bool
|
||||
x0, y0 := x, y
|
||||
for {
|
||||
if len(x) == 0 || len(y) == 0 {
|
||||
if len(x) == len(y) && invalidUTF8 {
|
||||
return string(x0) < string(y0)
|
||||
}
|
||||
return len(x) < len(y)
|
||||
}
|
||||
|
||||
|
@ -341,35 +346,36 @@ func lessUTF16(x, y []byte) bool {
|
|||
}
|
||||
|
||||
// Decode next pair of runes as UTF-8.
|
||||
rx, nx := utf8.DecodeRune(x)
|
||||
ry, ny := utf8.DecodeRune(y)
|
||||
// TODO(https://go.dev/issue/56948): Use a generic implementation
|
||||
// of utf8.DecodeRune, or rely on a compiler optimization to statically
|
||||
// hide the cost of a type switch (https://go.dev/issue/57072).
|
||||
var rx, ry rune
|
||||
var nx, ny int
|
||||
switch any(x).(type) {
|
||||
case string:
|
||||
rx, nx = utf8.DecodeRuneInString(string(x))
|
||||
ry, ny = utf8.DecodeRuneInString(string(y))
|
||||
case []byte:
|
||||
rx, nx = utf8.DecodeRune([]byte(x))
|
||||
ry, ny = utf8.DecodeRune([]byte(y))
|
||||
}
|
||||
|
||||
selfx := isUTF16Self(rx)
|
||||
selfy := isUTF16Self(ry)
|
||||
switch {
|
||||
|
||||
// Both runes encode as either a single or surrogate pair
|
||||
// of UTF-16 codepoints.
|
||||
case isUTF16Self(rx) == isUTF16Self(ry):
|
||||
if rx != ry {
|
||||
return rx < ry
|
||||
}
|
||||
|
||||
// The x rune is a single UTF-16 codepoint, while
|
||||
// the y rune is a surrogate pair of UTF-16 codepoints.
|
||||
case isUTF16Self(rx):
|
||||
ry, _ := utf16.EncodeRune(ry)
|
||||
if rx != ry {
|
||||
return rx < ry
|
||||
}
|
||||
panic("BUG: invalid UTF-8") // implies rx is an unpaired surrogate half
|
||||
|
||||
case selfx && !selfy:
|
||||
ry, _ = utf16.EncodeRune(ry)
|
||||
// The y rune is a single UTF-16 codepoint, while
|
||||
// the x rune is a surrogate pair of UTF-16 codepoints.
|
||||
case isUTF16Self(ry):
|
||||
rx, _ := utf16.EncodeRune(rx)
|
||||
if rx != ry {
|
||||
return rx < ry
|
||||
}
|
||||
panic("BUG: invalid UTF-8") // implies ry is an unpaired surrogate half
|
||||
case selfy && !selfx:
|
||||
rx, _ = utf16.EncodeRune(rx)
|
||||
}
|
||||
if rx != ry {
|
||||
return rx < ry
|
||||
}
|
||||
invalidUTF8 = invalidUTF8 || (rx == utf8.RuneError && nx == 1) || (ry == utf8.RuneError && ny == 1)
|
||||
x, y = x[nx:], y[ny:]
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,260 @@
|
|||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package schemaconv
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"k8s.io/kube-openapi/pkg/validation/spec"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/schema"
|
||||
)
|
||||
|
||||
// ToSchemaFromOpenAPI converts a directory of OpenAPI schemas to an smd Schema.
|
||||
// - models: a map from definition name to OpenAPI V3 structural schema for each definition.
|
||||
// Key in map is used to resolve references in the schema.
|
||||
// - preserveUnknownFields: flag indicating whether unknown fields in all schemas should be preserved.
|
||||
// - returns: nil and an error if there is a parse error, or if schema does not satisfy a
|
||||
// required structural schema invariant for conversion. If no error, returns
|
||||
// a new smd schema.
|
||||
//
|
||||
// Schema should be validated as structural before using with this function, or
|
||||
// there may be information lost.
|
||||
func ToSchemaFromOpenAPI(models map[string]*spec.Schema, preserveUnknownFields bool) (*schema.Schema, error) {
|
||||
c := convert{
|
||||
preserveUnknownFields: preserveUnknownFields,
|
||||
output: &schema.Schema{},
|
||||
}
|
||||
|
||||
for name, spec := range models {
|
||||
// Skip/Ignore top-level references
|
||||
if len(spec.Ref.String()) > 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
var a schema.Atom
|
||||
|
||||
// Hard-coded schemas for now as proto_models implementation functions.
|
||||
// https://github.com/kubernetes/kube-openapi/issues/364
|
||||
if name == quantityResource {
|
||||
a = schema.Atom{
|
||||
Scalar: untypedDef.Atom.Scalar,
|
||||
}
|
||||
} else if name == rawExtensionResource {
|
||||
a = untypedDef.Atom
|
||||
} else {
|
||||
c2 := c.push(name, &a)
|
||||
c2.visitSpec(spec)
|
||||
c.pop(c2)
|
||||
}
|
||||
|
||||
c.insertTypeDef(name, a)
|
||||
}
|
||||
|
||||
if len(c.errorMessages) > 0 {
|
||||
return nil, errors.New(strings.Join(c.errorMessages, "\n"))
|
||||
}
|
||||
|
||||
c.addCommonTypes()
|
||||
return c.output, nil
|
||||
}
|
||||
|
||||
func (c *convert) visitSpec(m *spec.Schema) {
|
||||
// Check if this schema opts its descendants into preserve-unknown-fields
|
||||
if p, ok := m.Extensions["x-kubernetes-preserve-unknown-fields"]; ok && p == true {
|
||||
c.preserveUnknownFields = true
|
||||
}
|
||||
a := c.top()
|
||||
*a = c.parseSchema(m)
|
||||
}
|
||||
|
||||
func (c *convert) parseSchema(m *spec.Schema) schema.Atom {
|
||||
// k8s-generated OpenAPI specs have historically used only one value for
|
||||
// type and starting with OpenAPIV3 it is only allowed to be
|
||||
// a single string.
|
||||
typ := ""
|
||||
if len(m.Type) > 0 {
|
||||
typ = m.Type[0]
|
||||
}
|
||||
|
||||
// Structural Schemas produced by kubernetes follow very specific rules which
|
||||
// we can use to infer the SMD type:
|
||||
switch typ {
|
||||
case "":
|
||||
// According to Swagger docs:
|
||||
// https://swagger.io/docs/specification/data-models/data-types/#any
|
||||
//
|
||||
// If no type is specified, it is equivalent to accepting any type.
|
||||
return schema.Atom{
|
||||
Scalar: ptr(schema.Scalar("untyped")),
|
||||
List: c.parseList(m),
|
||||
Map: c.parseObject(m),
|
||||
}
|
||||
|
||||
case "object":
|
||||
return schema.Atom{
|
||||
Map: c.parseObject(m),
|
||||
}
|
||||
case "array":
|
||||
return schema.Atom{
|
||||
List: c.parseList(m),
|
||||
}
|
||||
case "integer", "boolean", "number", "string":
|
||||
return convertPrimitive(typ, m.Format)
|
||||
default:
|
||||
c.reportError("unrecognized type: '%v'", typ)
|
||||
return schema.Atom{
|
||||
Scalar: ptr(schema.Scalar("untyped")),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *convert) makeOpenAPIRef(specSchema *spec.Schema) schema.TypeRef {
|
||||
refString := specSchema.Ref.String()
|
||||
|
||||
// Special-case handling for $ref stored inside a single-element allOf
|
||||
if len(refString) == 0 && len(specSchema.AllOf) == 1 && len(specSchema.AllOf[0].Ref.String()) > 0 {
|
||||
refString = specSchema.AllOf[0].Ref.String()
|
||||
}
|
||||
|
||||
if _, n := path.Split(refString); len(n) > 0 {
|
||||
//!TODO: Refactor the field ElementRelationship override
|
||||
// we can generate the types with overrides ahead of time rather than
|
||||
// requiring the hacky runtime support
|
||||
// (could just create a normalized key struct containing all customizations
|
||||
// to deduplicate)
|
||||
mapRelationship, err := getMapElementRelationship(specSchema.Extensions)
|
||||
if err != nil {
|
||||
c.reportError(err.Error())
|
||||
}
|
||||
|
||||
if len(mapRelationship) > 0 {
|
||||
return schema.TypeRef{
|
||||
NamedType: &n,
|
||||
ElementRelationship: &mapRelationship,
|
||||
}
|
||||
}
|
||||
|
||||
return schema.TypeRef{
|
||||
NamedType: &n,
|
||||
}
|
||||
|
||||
}
|
||||
var inlined schema.Atom
|
||||
|
||||
// compute the type inline
|
||||
c2 := c.push("inlined in "+c.currentName, &inlined)
|
||||
c2.preserveUnknownFields = c.preserveUnknownFields
|
||||
c2.visitSpec(specSchema)
|
||||
c.pop(c2)
|
||||
|
||||
return schema.TypeRef{
|
||||
Inlined: inlined,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *convert) parseObject(s *spec.Schema) *schema.Map {
|
||||
var fields []schema.StructField
|
||||
for name, member := range s.Properties {
|
||||
fields = append(fields, schema.StructField{
|
||||
Name: name,
|
||||
Type: c.makeOpenAPIRef(&member),
|
||||
Default: member.Default,
|
||||
})
|
||||
}
|
||||
|
||||
// AdditionalProperties informs the schema of any "unknown" keys
|
||||
// Unknown keys are enforced by the ElementType field.
|
||||
elementType := func() schema.TypeRef {
|
||||
if s.AdditionalProperties == nil {
|
||||
// According to openAPI spec, an object without properties and without
|
||||
// additionalProperties is assumed to be a free-form object.
|
||||
if c.preserveUnknownFields || len(s.Properties) == 0 {
|
||||
return schema.TypeRef{
|
||||
NamedType: &deducedName,
|
||||
}
|
||||
}
|
||||
|
||||
// If properties are specified, do not implicitly allow unknown
|
||||
// fields
|
||||
return schema.TypeRef{}
|
||||
} else if s.AdditionalProperties.Schema != nil {
|
||||
// Unknown fields use the referred schema
|
||||
return c.makeOpenAPIRef(s.AdditionalProperties.Schema)
|
||||
|
||||
} else if s.AdditionalProperties.Allows {
|
||||
// A boolean instead of a schema was provided. Deduce the
|
||||
// type from the value provided at runtime.
|
||||
return schema.TypeRef{
|
||||
NamedType: &deducedName,
|
||||
}
|
||||
} else {
|
||||
// Additional Properties are explicitly disallowed by the user.
|
||||
// Ensure element type is empty.
|
||||
return schema.TypeRef{}
|
||||
}
|
||||
}()
|
||||
|
||||
relationship, err := getMapElementRelationship(s.Extensions)
|
||||
if err != nil {
|
||||
c.reportError(err.Error())
|
||||
}
|
||||
|
||||
return &schema.Map{
|
||||
Fields: fields,
|
||||
ElementRelationship: relationship,
|
||||
ElementType: elementType,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *convert) parseList(s *spec.Schema) *schema.List {
|
||||
relationship, mapKeys, err := getListElementRelationship(s.Extensions)
|
||||
if err != nil {
|
||||
c.reportError(err.Error())
|
||||
}
|
||||
elementType := func() schema.TypeRef {
|
||||
if s.Items != nil {
|
||||
if s.Items.Schema == nil || s.Items.Len() != 1 {
|
||||
c.reportError("structural schema arrays must have exactly one member subtype")
|
||||
return schema.TypeRef{
|
||||
NamedType: &deducedName,
|
||||
}
|
||||
}
|
||||
|
||||
subSchema := s.Items.Schema
|
||||
if subSchema == nil {
|
||||
subSchema = &s.Items.Schemas[0]
|
||||
}
|
||||
return c.makeOpenAPIRef(subSchema)
|
||||
} else if len(s.Type) > 0 && len(s.Type[0]) > 0 {
|
||||
c.reportError("`items` must be specified on arrays")
|
||||
}
|
||||
|
||||
// A list with no items specified is treated as "untyped".
|
||||
return schema.TypeRef{
|
||||
NamedType: &untypedName,
|
||||
}
|
||||
|
||||
}()
|
||||
|
||||
return &schema.List{
|
||||
ElementRelationship: relationship,
|
||||
Keys: mapKeys,
|
||||
ElementType: elementType,
|
||||
}
|
||||
}
|
|
@ -0,0 +1,178 @@
|
|||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package schemaconv
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"k8s.io/kube-openapi/pkg/util/proto"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/schema"
|
||||
)
|
||||
|
||||
// ToSchema converts openapi definitions into a schema suitable for structured
|
||||
// merge (i.e. kubectl apply v2).
|
||||
func ToSchema(models proto.Models) (*schema.Schema, error) {
|
||||
return ToSchemaWithPreserveUnknownFields(models, false)
|
||||
}
|
||||
|
||||
// ToSchemaWithPreserveUnknownFields converts openapi definitions into a schema suitable for structured
|
||||
// merge (i.e. kubectl apply v2), it will preserve unknown fields if specified.
|
||||
func ToSchemaWithPreserveUnknownFields(models proto.Models, preserveUnknownFields bool) (*schema.Schema, error) {
|
||||
c := convert{
|
||||
preserveUnknownFields: preserveUnknownFields,
|
||||
output: &schema.Schema{},
|
||||
}
|
||||
for _, name := range models.ListModels() {
|
||||
model := models.LookupModel(name)
|
||||
|
||||
var a schema.Atom
|
||||
c2 := c.push(name, &a)
|
||||
model.Accept(c2)
|
||||
c.pop(c2)
|
||||
|
||||
c.insertTypeDef(name, a)
|
||||
}
|
||||
|
||||
if len(c.errorMessages) > 0 {
|
||||
return nil, errors.New(strings.Join(c.errorMessages, "\n"))
|
||||
}
|
||||
|
||||
c.addCommonTypes()
|
||||
return c.output, nil
|
||||
}
|
||||
|
||||
func (c *convert) makeRef(model proto.Schema, preserveUnknownFields bool) schema.TypeRef {
|
||||
var tr schema.TypeRef
|
||||
if r, ok := model.(*proto.Ref); ok {
|
||||
if r.Reference() == "io.k8s.apimachinery.pkg.runtime.RawExtension" {
|
||||
return schema.TypeRef{
|
||||
NamedType: &untypedName,
|
||||
}
|
||||
}
|
||||
// reference a named type
|
||||
_, n := path.Split(r.Reference())
|
||||
tr.NamedType = &n
|
||||
|
||||
mapRelationship, err := getMapElementRelationship(model.GetExtensions())
|
||||
|
||||
if err != nil {
|
||||
c.reportError(err.Error())
|
||||
}
|
||||
|
||||
// empty string means unset.
|
||||
if len(mapRelationship) > 0 {
|
||||
tr.ElementRelationship = &mapRelationship
|
||||
}
|
||||
} else {
|
||||
// compute the type inline
|
||||
c2 := c.push("inlined in "+c.currentName, &tr.Inlined)
|
||||
c2.preserveUnknownFields = preserveUnknownFields
|
||||
model.Accept(c2)
|
||||
c.pop(c2)
|
||||
|
||||
if tr == (schema.TypeRef{}) {
|
||||
// emit warning?
|
||||
tr.NamedType = &untypedName
|
||||
}
|
||||
}
|
||||
return tr
|
||||
}
|
||||
|
||||
func (c *convert) VisitKind(k *proto.Kind) {
|
||||
preserveUnknownFields := c.preserveUnknownFields
|
||||
if p, ok := k.GetExtensions()["x-kubernetes-preserve-unknown-fields"]; ok && p == true {
|
||||
preserveUnknownFields = true
|
||||
}
|
||||
|
||||
a := c.top()
|
||||
a.Map = &schema.Map{}
|
||||
for _, name := range k.FieldOrder {
|
||||
member := k.Fields[name]
|
||||
tr := c.makeRef(member, preserveUnknownFields)
|
||||
a.Map.Fields = append(a.Map.Fields, schema.StructField{
|
||||
Name: name,
|
||||
Type: tr,
|
||||
Default: member.GetDefault(),
|
||||
})
|
||||
}
|
||||
|
||||
unions, err := makeUnions(k.GetExtensions())
|
||||
if err != nil {
|
||||
c.reportError(err.Error())
|
||||
return
|
||||
}
|
||||
// TODO: We should check that the fields and discriminator
|
||||
// specified in the union are actual fields in the struct.
|
||||
a.Map.Unions = unions
|
||||
|
||||
if preserveUnknownFields {
|
||||
a.Map.ElementType = schema.TypeRef{
|
||||
NamedType: &deducedName,
|
||||
}
|
||||
}
|
||||
|
||||
a.Map.ElementRelationship, err = getMapElementRelationship(k.GetExtensions())
|
||||
if err != nil {
|
||||
c.reportError(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func (c *convert) VisitArray(a *proto.Array) {
|
||||
relationship, mapKeys, err := getListElementRelationship(a.GetExtensions())
|
||||
if err != nil {
|
||||
c.reportError(err.Error())
|
||||
}
|
||||
|
||||
atom := c.top()
|
||||
atom.List = &schema.List{
|
||||
ElementType: c.makeRef(a.SubType, c.preserveUnknownFields),
|
||||
ElementRelationship: relationship,
|
||||
Keys: mapKeys,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *convert) VisitMap(m *proto.Map) {
|
||||
relationship, err := getMapElementRelationship(m.GetExtensions())
|
||||
if err != nil {
|
||||
c.reportError(err.Error())
|
||||
}
|
||||
|
||||
a := c.top()
|
||||
a.Map = &schema.Map{
|
||||
ElementType: c.makeRef(m.SubType, c.preserveUnknownFields),
|
||||
ElementRelationship: relationship,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *convert) VisitPrimitive(p *proto.Primitive) {
|
||||
a := c.top()
|
||||
if c.currentName == quantityResource {
|
||||
a.Scalar = ptr(schema.Scalar("untyped"))
|
||||
} else {
|
||||
*a = convertPrimitive(p.Type, p.Format)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *convert) VisitArbitrary(a *proto.Arbitrary) {
|
||||
*c.top() = deducedDef.Atom
|
||||
}
|
||||
|
||||
func (c *convert) VisitReference(proto.Reference) {
|
||||
// Do nothing, we handle references specially
|
||||
}
|
|
@ -17,43 +17,18 @@ limitations under the License.
|
|||
package schemaconv
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"path"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"k8s.io/kube-openapi/pkg/util/proto"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/schema"
|
||||
)
|
||||
|
||||
const (
|
||||
quantityResource = "io.k8s.apimachinery.pkg.api.resource.Quantity"
|
||||
quantityResource = "io.k8s.apimachinery.pkg.api.resource.Quantity"
|
||||
rawExtensionResource = "io.k8s.apimachinery.pkg.runtime.RawExtension"
|
||||
)
|
||||
|
||||
// ToSchema converts openapi definitions into a schema suitable for structured
|
||||
// merge (i.e. kubectl apply v2).
|
||||
func ToSchema(models proto.Models) (*schema.Schema, error) {
|
||||
return ToSchemaWithPreserveUnknownFields(models, false)
|
||||
}
|
||||
|
||||
// ToSchemaWithPreserveUnknownFields converts openapi definitions into a schema suitable for structured
|
||||
// merge (i.e. kubectl apply v2), it will preserve unknown fields if specified.
|
||||
func ToSchemaWithPreserveUnknownFields(models proto.Models, preserveUnknownFields bool) (*schema.Schema, error) {
|
||||
c := convert{
|
||||
input: models,
|
||||
preserveUnknownFields: preserveUnknownFields,
|
||||
output: &schema.Schema{},
|
||||
}
|
||||
if err := c.convertAll(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.addCommonTypes()
|
||||
return c.output, nil
|
||||
}
|
||||
|
||||
type convert struct {
|
||||
input proto.Models
|
||||
preserveUnknownFields bool
|
||||
output *schema.Schema
|
||||
|
||||
|
@ -64,7 +39,6 @@ type convert struct {
|
|||
|
||||
func (c *convert) push(name string, a *schema.Atom) *convert {
|
||||
return &convert{
|
||||
input: c.input,
|
||||
preserveUnknownFields: c.preserveUnknownFields,
|
||||
output: c.output,
|
||||
currentName: name,
|
||||
|
@ -78,30 +52,17 @@ func (c *convert) pop(c2 *convert) {
|
|||
c.errorMessages = append(c.errorMessages, c2.errorMessages...)
|
||||
}
|
||||
|
||||
func (c *convert) convertAll() error {
|
||||
for _, name := range c.input.ListModels() {
|
||||
model := c.input.LookupModel(name)
|
||||
c.insertTypeDef(name, model)
|
||||
}
|
||||
if len(c.errorMessages) > 0 {
|
||||
return errors.New(strings.Join(c.errorMessages, "\n"))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *convert) reportError(format string, args ...interface{}) {
|
||||
c.errorMessages = append(c.errorMessages,
|
||||
c.currentName+": "+fmt.Sprintf(format, args...),
|
||||
)
|
||||
}
|
||||
|
||||
func (c *convert) insertTypeDef(name string, model proto.Schema) {
|
||||
func (c *convert) insertTypeDef(name string, atom schema.Atom) {
|
||||
def := schema.TypeDef{
|
||||
Name: name,
|
||||
Atom: atom,
|
||||
}
|
||||
c2 := c.push(name, &def.Atom)
|
||||
model.Accept(c2)
|
||||
c.pop(c2)
|
||||
if def.Atom == (schema.Atom{}) {
|
||||
// This could happen if there were a top-level reference.
|
||||
return
|
||||
|
@ -156,46 +117,6 @@ var deducedDef schema.TypeDef = schema.TypeDef{
|
|||
},
|
||||
}
|
||||
|
||||
func (c *convert) makeRef(model proto.Schema, preserveUnknownFields bool) schema.TypeRef {
|
||||
var tr schema.TypeRef
|
||||
if r, ok := model.(*proto.Ref); ok {
|
||||
if r.Reference() == "io.k8s.apimachinery.pkg.runtime.RawExtension" {
|
||||
return schema.TypeRef{
|
||||
NamedType: &untypedName,
|
||||
}
|
||||
}
|
||||
// reference a named type
|
||||
_, n := path.Split(r.Reference())
|
||||
tr.NamedType = &n
|
||||
|
||||
ext := model.GetExtensions()
|
||||
if val, ok := ext["x-kubernetes-map-type"]; ok {
|
||||
switch val {
|
||||
case "atomic":
|
||||
relationship := schema.Atomic
|
||||
tr.ElementRelationship = &relationship
|
||||
case "granular":
|
||||
relationship := schema.Separable
|
||||
tr.ElementRelationship = &relationship
|
||||
default:
|
||||
c.reportError("unknown map type %v", val)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// compute the type inline
|
||||
c2 := c.push("inlined in "+c.currentName, &tr.Inlined)
|
||||
c2.preserveUnknownFields = preserveUnknownFields
|
||||
model.Accept(c2)
|
||||
c.pop(c2)
|
||||
|
||||
if tr == (schema.TypeRef{}) {
|
||||
// emit warning?
|
||||
tr.NamedType = &untypedName
|
||||
}
|
||||
}
|
||||
return tr
|
||||
}
|
||||
|
||||
func makeUnions(extensions map[string]interface{}) ([]schema.Union, error) {
|
||||
schemaUnions := []schema.Union{}
|
||||
if iunions, ok := extensions["x-kubernetes-unions"]; ok {
|
||||
|
@ -299,52 +220,6 @@ func makeUnion(extensions map[string]interface{}) (schema.Union, error) {
|
|||
return union, nil
|
||||
}
|
||||
|
||||
func (c *convert) VisitKind(k *proto.Kind) {
|
||||
preserveUnknownFields := c.preserveUnknownFields
|
||||
if p, ok := k.GetExtensions()["x-kubernetes-preserve-unknown-fields"]; ok && p == true {
|
||||
preserveUnknownFields = true
|
||||
}
|
||||
|
||||
a := c.top()
|
||||
a.Map = &schema.Map{}
|
||||
for _, name := range k.FieldOrder {
|
||||
member := k.Fields[name]
|
||||
tr := c.makeRef(member, preserveUnknownFields)
|
||||
a.Map.Fields = append(a.Map.Fields, schema.StructField{
|
||||
Name: name,
|
||||
Type: tr,
|
||||
Default: member.GetDefault(),
|
||||
})
|
||||
}
|
||||
|
||||
unions, err := makeUnions(k.GetExtensions())
|
||||
if err != nil {
|
||||
c.reportError(err.Error())
|
||||
return
|
||||
}
|
||||
// TODO: We should check that the fields and discriminator
|
||||
// specified in the union are actual fields in the struct.
|
||||
a.Map.Unions = unions
|
||||
|
||||
if preserveUnknownFields {
|
||||
a.Map.ElementType = schema.TypeRef{
|
||||
NamedType: &deducedName,
|
||||
}
|
||||
}
|
||||
|
||||
ext := k.GetExtensions()
|
||||
if val, ok := ext["x-kubernetes-map-type"]; ok {
|
||||
switch val {
|
||||
case "atomic":
|
||||
a.Map.ElementRelationship = schema.Atomic
|
||||
case "granular":
|
||||
a.Map.ElementRelationship = schema.Separable
|
||||
default:
|
||||
c.reportError("unknown map type %v", val)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func toStringSlice(o interface{}) (out []string, ok bool) {
|
||||
switch t := o.(type) {
|
||||
case []interface{}:
|
||||
|
@ -355,117 +230,108 @@ func toStringSlice(o interface{}) (out []string, ok bool) {
|
|||
}
|
||||
}
|
||||
return out, true
|
||||
case []string:
|
||||
return t, true
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func (c *convert) VisitArray(a *proto.Array) {
|
||||
atom := c.top()
|
||||
atom.List = &schema.List{
|
||||
ElementRelationship: schema.Atomic,
|
||||
}
|
||||
l := atom.List
|
||||
l.ElementType = c.makeRef(a.SubType, c.preserveUnknownFields)
|
||||
|
||||
ext := a.GetExtensions()
|
||||
|
||||
if val, ok := ext["x-kubernetes-list-type"]; ok {
|
||||
if val == "atomic" {
|
||||
l.ElementRelationship = schema.Atomic
|
||||
} else if val == "set" {
|
||||
l.ElementRelationship = schema.Associative
|
||||
} else if val == "map" {
|
||||
l.ElementRelationship = schema.Associative
|
||||
if keys, ok := ext["x-kubernetes-list-map-keys"]; ok {
|
||||
if keyNames, ok := toStringSlice(keys); ok {
|
||||
l.Keys = keyNames
|
||||
} else {
|
||||
c.reportError("uninterpreted map keys: %#v", keys)
|
||||
}
|
||||
} else {
|
||||
c.reportError("missing map keys")
|
||||
}
|
||||
} else {
|
||||
c.reportError("unknown list type %v", val)
|
||||
l.ElementRelationship = schema.Atomic
|
||||
}
|
||||
} else if val, ok := ext["x-kubernetes-patch-strategy"]; ok {
|
||||
if val == "merge" || val == "merge,retainKeys" {
|
||||
l.ElementRelationship = schema.Associative
|
||||
if key, ok := ext["x-kubernetes-patch-merge-key"]; ok {
|
||||
if keyName, ok := key.(string); ok {
|
||||
l.Keys = []string{keyName}
|
||||
} else {
|
||||
c.reportError("uninterpreted merge key: %#v", key)
|
||||
}
|
||||
} else {
|
||||
// It's not an error for this to be absent, it
|
||||
// means it's a set.
|
||||
}
|
||||
} else if val == "retainKeys" {
|
||||
} else {
|
||||
c.reportError("unknown patch strategy %v", val)
|
||||
l.ElementRelationship = schema.Atomic
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *convert) VisitMap(m *proto.Map) {
|
||||
a := c.top()
|
||||
a.Map = &schema.Map{}
|
||||
a.Map.ElementType = c.makeRef(m.SubType, c.preserveUnknownFields)
|
||||
|
||||
ext := m.GetExtensions()
|
||||
if val, ok := ext["x-kubernetes-map-type"]; ok {
|
||||
switch val {
|
||||
case "atomic":
|
||||
a.Map.ElementRelationship = schema.Atomic
|
||||
case "granular":
|
||||
a.Map.ElementRelationship = schema.Separable
|
||||
default:
|
||||
c.reportError("unknown map type %v", val)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func ptr(s schema.Scalar) *schema.Scalar { return &s }
|
||||
|
||||
func (c *convert) VisitPrimitive(p *proto.Primitive) {
|
||||
a := c.top()
|
||||
if c.currentName == quantityResource {
|
||||
a.Scalar = ptr(schema.Scalar("untyped"))
|
||||
} else {
|
||||
switch p.Type {
|
||||
case proto.Integer:
|
||||
a.Scalar = ptr(schema.Numeric)
|
||||
case proto.Number:
|
||||
a.Scalar = ptr(schema.Numeric)
|
||||
case proto.String:
|
||||
switch p.Format {
|
||||
case "":
|
||||
a.Scalar = ptr(schema.String)
|
||||
case "byte":
|
||||
// byte really means []byte and is encoded as a string.
|
||||
a.Scalar = ptr(schema.String)
|
||||
case "int-or-string":
|
||||
a.Scalar = ptr(schema.Scalar("untyped"))
|
||||
case "date-time":
|
||||
a.Scalar = ptr(schema.Scalar("untyped"))
|
||||
default:
|
||||
a.Scalar = ptr(schema.Scalar("untyped"))
|
||||
}
|
||||
case proto.Boolean:
|
||||
a.Scalar = ptr(schema.Boolean)
|
||||
// Basic conversion functions to convert OpenAPI schema definitions to
|
||||
// SMD Schema atoms
|
||||
func convertPrimitive(typ string, format string) (a schema.Atom) {
|
||||
switch typ {
|
||||
case "integer":
|
||||
a.Scalar = ptr(schema.Numeric)
|
||||
case "number":
|
||||
a.Scalar = ptr(schema.Numeric)
|
||||
case "string":
|
||||
switch format {
|
||||
case "":
|
||||
a.Scalar = ptr(schema.String)
|
||||
case "byte":
|
||||
// byte really means []byte and is encoded as a string.
|
||||
a.Scalar = ptr(schema.String)
|
||||
case "int-or-string":
|
||||
a.Scalar = ptr(schema.Scalar("untyped"))
|
||||
case "date-time":
|
||||
a.Scalar = ptr(schema.Scalar("untyped"))
|
||||
default:
|
||||
a.Scalar = ptr(schema.Scalar("untyped"))
|
||||
}
|
||||
case "boolean":
|
||||
a.Scalar = ptr(schema.Boolean)
|
||||
default:
|
||||
a.Scalar = ptr(schema.Scalar("untyped"))
|
||||
}
|
||||
|
||||
return a
|
||||
}
|
||||
|
||||
func getListElementRelationship(ext map[string]any) (schema.ElementRelationship, []string, error) {
|
||||
if val, ok := ext["x-kubernetes-list-type"]; ok {
|
||||
switch val {
|
||||
case "atomic":
|
||||
return schema.Atomic, nil, nil
|
||||
case "set":
|
||||
return schema.Associative, nil, nil
|
||||
case "map":
|
||||
keys, ok := ext["x-kubernetes-list-map-keys"]
|
||||
|
||||
if !ok {
|
||||
return schema.Associative, nil, fmt.Errorf("missing map keys")
|
||||
}
|
||||
|
||||
keyNames, ok := toStringSlice(keys)
|
||||
if !ok {
|
||||
return schema.Associative, nil, fmt.Errorf("uninterpreted map keys: %#v", keys)
|
||||
}
|
||||
|
||||
return schema.Associative, keyNames, nil
|
||||
default:
|
||||
return schema.Atomic, nil, fmt.Errorf("unknown list type %v", val)
|
||||
}
|
||||
} else if val, ok := ext["x-kubernetes-patch-strategy"]; ok {
|
||||
switch val {
|
||||
case "merge", "merge,retainKeys":
|
||||
if key, ok := ext["x-kubernetes-patch-merge-key"]; ok {
|
||||
keyName, ok := key.(string)
|
||||
|
||||
if !ok {
|
||||
return schema.Associative, nil, fmt.Errorf("uninterpreted merge key: %#v", key)
|
||||
}
|
||||
|
||||
return schema.Associative, []string{keyName}, nil
|
||||
}
|
||||
// It's not an error for x-kubernetes-patch-merge-key to be absent,
|
||||
// it means it's a set
|
||||
return schema.Associative, nil, nil
|
||||
case "retainKeys":
|
||||
return schema.Atomic, nil, nil
|
||||
default:
|
||||
return schema.Atomic, nil, fmt.Errorf("unknown patch strategy %v", val)
|
||||
}
|
||||
}
|
||||
|
||||
// Treat as atomic by default
|
||||
return schema.Atomic, nil, nil
|
||||
}
|
||||
|
||||
// Returns map element relationship if specified, or empty string if unspecified
|
||||
func getMapElementRelationship(ext map[string]any) (schema.ElementRelationship, error) {
|
||||
val, ok := ext["x-kubernetes-map-type"]
|
||||
if !ok {
|
||||
// unset Map element relationship
|
||||
return "", nil
|
||||
}
|
||||
|
||||
switch val {
|
||||
case "atomic":
|
||||
return schema.Atomic, nil
|
||||
case "granular":
|
||||
return schema.Separable, nil
|
||||
default:
|
||||
return "", fmt.Errorf("unknown map type %v", val)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *convert) VisitArbitrary(a *proto.Arbitrary) {
|
||||
*c.top() = deducedDef.Atom
|
||||
}
|
||||
|
||||
func (c *convert) VisitReference(proto.Reference) {
|
||||
// Do nothing, we handle references specially
|
||||
}
|
||||
|
|
|
@ -18,7 +18,10 @@ package spec3
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/go-openapi/swag"
|
||||
"k8s.io/kube-openapi/pkg/internal"
|
||||
jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json"
|
||||
"k8s.io/kube-openapi/pkg/validation/spec"
|
||||
)
|
||||
|
||||
|
@ -41,6 +44,9 @@ func (e *Encoding) MarshalJSON() ([]byte, error) {
|
|||
}
|
||||
|
||||
func (e *Encoding) UnmarshalJSON(data []byte) error {
|
||||
if internal.UseOptimizedJSONUnmarshalingV3 {
|
||||
return jsonv2.Unmarshal(data, e)
|
||||
}
|
||||
if err := json.Unmarshal(data, &e.EncodingProps); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -50,6 +56,20 @@ func (e *Encoding) UnmarshalJSON(data []byte) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (e *Encoding) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error {
|
||||
var x struct {
|
||||
spec.Extensions
|
||||
EncodingProps
|
||||
}
|
||||
if err := opts.UnmarshalNext(dec, &x); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
e.Extensions = internal.SanitizeExtensions(x.Extensions)
|
||||
e.EncodingProps = x.EncodingProps
|
||||
return nil
|
||||
}
|
||||
|
||||
type EncodingProps struct {
|
||||
// Content Type for encoding a specific property
|
||||
ContentType string `json:"contentType,omitempty"`
|
||||
|
@ -58,7 +78,7 @@ type EncodingProps struct {
|
|||
// Describes how a specific property value will be serialized depending on its type
|
||||
Style string `json:"style,omitempty"`
|
||||
// When this is true, property values of type array or object generate separate parameters for each value of the array, or key-value-pair of the map. For other types of properties this property has no effect
|
||||
Explode string `json:"explode,omitempty"`
|
||||
Explode bool `json:"explode,omitempty"`
|
||||
// AllowReserved determines whether the parameter value SHOULD allow reserved characters, as defined by RFC3986
|
||||
AllowReserved bool `json:"allowReserved,omitempty"`
|
||||
}
|
||||
|
|
|
@ -19,8 +19,11 @@ package spec3
|
|||
import (
|
||||
"encoding/json"
|
||||
|
||||
"k8s.io/kube-openapi/pkg/validation/spec"
|
||||
"github.com/go-openapi/swag"
|
||||
"k8s.io/kube-openapi/pkg/internal"
|
||||
jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json"
|
||||
|
||||
"k8s.io/kube-openapi/pkg/validation/spec"
|
||||
)
|
||||
|
||||
// Example https://swagger.io/specification/#example-object
|
||||
|
@ -49,6 +52,9 @@ func (e *Example) MarshalJSON() ([]byte, error) {
|
|||
}
|
||||
|
||||
func (e *Example) UnmarshalJSON(data []byte) error {
|
||||
if internal.UseOptimizedJSONUnmarshalingV3 {
|
||||
return jsonv2.Unmarshal(data, e)
|
||||
}
|
||||
if err := json.Unmarshal(data, &e.Refable); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -61,6 +67,23 @@ func (e *Example) UnmarshalJSON(data []byte) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (e *Example) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error {
|
||||
var x struct {
|
||||
spec.Extensions
|
||||
ExampleProps
|
||||
}
|
||||
if err := opts.UnmarshalNext(dec, &x); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := internal.JSONRefFromMap(&e.Ref.Ref, x.Extensions); err != nil {
|
||||
return err
|
||||
}
|
||||
e.Extensions = internal.SanitizeExtensions(x.Extensions)
|
||||
e.ExampleProps = x.ExampleProps
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type ExampleProps struct {
|
||||
// Summary holds a short description of the example
|
||||
Summary string `json:"summary,omitempty"`
|
||||
|
|
|
@ -18,8 +18,11 @@ package spec3
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"k8s.io/kube-openapi/pkg/validation/spec"
|
||||
|
||||
"github.com/go-openapi/swag"
|
||||
"k8s.io/kube-openapi/pkg/internal"
|
||||
jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json"
|
||||
"k8s.io/kube-openapi/pkg/validation/spec"
|
||||
)
|
||||
|
||||
type ExternalDocumentation struct {
|
||||
|
@ -48,6 +51,9 @@ func (e *ExternalDocumentation) MarshalJSON() ([]byte, error) {
|
|||
}
|
||||
|
||||
func (e *ExternalDocumentation) UnmarshalJSON(data []byte) error {
|
||||
if internal.UseOptimizedJSONUnmarshalingV3 {
|
||||
return jsonv2.Unmarshal(data, e)
|
||||
}
|
||||
if err := json.Unmarshal(data, &e.ExternalDocumentationProps); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -56,3 +62,16 @@ func (e *ExternalDocumentation) UnmarshalJSON(data []byte) error {
|
|||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *ExternalDocumentation) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error {
|
||||
var x struct {
|
||||
spec.Extensions
|
||||
ExternalDocumentationProps
|
||||
}
|
||||
if err := opts.UnmarshalNext(dec, &x); err != nil {
|
||||
return err
|
||||
}
|
||||
e.Extensions = internal.SanitizeExtensions(x.Extensions)
|
||||
e.ExternalDocumentationProps = x.ExternalDocumentationProps
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -0,0 +1,254 @@
|
|||
package spec3
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"strings"
|
||||
|
||||
fuzz "github.com/google/gofuzz"
|
||||
|
||||
"k8s.io/kube-openapi/pkg/validation/spec"
|
||||
)
|
||||
|
||||
// refChance is the chance that a particular component will use a $ref
|
||||
// instead of fuzzed. Expressed as a fraction 1/n, currently there is
|
||||
// a 1/3 chance that a ref will be used.
|
||||
const refChance = 3
|
||||
|
||||
const alphaNumChars = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||
|
||||
func randAlphanumString() string {
|
||||
arr := make([]string, rand.Intn(10)+5)
|
||||
for i := 0; i < len(arr); i++ {
|
||||
arr[i] = string(alphaNumChars[rand.Intn(len(alphaNumChars))])
|
||||
}
|
||||
return strings.Join(arr, "")
|
||||
}
|
||||
|
||||
var OpenAPIV3FuzzFuncs []interface{} = []interface{}{
|
||||
func(s *string, c fuzz.Continue) {
|
||||
// All OpenAPI V3 map keys must follow the corresponding
|
||||
// regex. Note that this restricts the range for all other
|
||||
// string values as well.
|
||||
str := randAlphanumString()
|
||||
*s = str
|
||||
},
|
||||
func(o *OpenAPI, c fuzz.Continue) {
|
||||
c.FuzzNoCustom(o)
|
||||
o.Version = "3.0.0"
|
||||
},
|
||||
func(r *interface{}, c fuzz.Continue) {
|
||||
switch c.Intn(3) {
|
||||
case 0:
|
||||
*r = nil
|
||||
case 1:
|
||||
n := c.RandString() + "x"
|
||||
*r = n
|
||||
case 2:
|
||||
n := c.Float64()
|
||||
*r = n
|
||||
}
|
||||
},
|
||||
func(v **spec.Info, c fuzz.Continue) {
|
||||
// Info is never nil
|
||||
*v = &spec.Info{}
|
||||
c.FuzzNoCustom(*v)
|
||||
(*v).Title = c.RandString() + "x"
|
||||
},
|
||||
func(v *Paths, c fuzz.Continue) {
|
||||
c.Fuzz(&v.VendorExtensible)
|
||||
num := c.Intn(5)
|
||||
if num > 0 {
|
||||
v.Paths = make(map[string]*Path)
|
||||
}
|
||||
for i := 0; i < num; i++ {
|
||||
val := Path{}
|
||||
c.Fuzz(&val)
|
||||
v.Paths["/"+c.RandString()] = &val
|
||||
}
|
||||
},
|
||||
func(v *SecurityScheme, c fuzz.Continue) {
|
||||
if c.Intn(refChance) == 0 {
|
||||
c.Fuzz(&v.Refable)
|
||||
return
|
||||
}
|
||||
switch c.Intn(4) {
|
||||
case 0:
|
||||
v.Type = "apiKey"
|
||||
v.Name = c.RandString() + "x"
|
||||
switch c.Intn(3) {
|
||||
case 0:
|
||||
v.In = "query"
|
||||
case 1:
|
||||
v.In = "header"
|
||||
case 2:
|
||||
v.In = "cookie"
|
||||
}
|
||||
case 1:
|
||||
v.Type = "http"
|
||||
case 2:
|
||||
v.Type = "oauth2"
|
||||
v.Flows = make(map[string]*OAuthFlow)
|
||||
flow := OAuthFlow{}
|
||||
flow.AuthorizationUrl = c.RandString() + "x"
|
||||
v.Flows["implicit"] = &flow
|
||||
flow.Scopes = make(map[string]string)
|
||||
flow.Scopes["foo"] = "bar"
|
||||
case 3:
|
||||
v.Type = "openIdConnect"
|
||||
v.OpenIdConnectUrl = "https://" + c.RandString()
|
||||
}
|
||||
v.Scheme = "basic"
|
||||
},
|
||||
func(v *spec.Ref, c fuzz.Continue) {
|
||||
switch c.Intn(7) {
|
||||
case 0:
|
||||
*v = spec.MustCreateRef("#/components/schemas/" + randAlphanumString())
|
||||
case 1:
|
||||
*v = spec.MustCreateRef("#/components/responses/" + randAlphanumString())
|
||||
case 2:
|
||||
*v = spec.MustCreateRef("#/components/headers/" + randAlphanumString())
|
||||
case 3:
|
||||
*v = spec.MustCreateRef("#/components/securitySchemes/" + randAlphanumString())
|
||||
case 5:
|
||||
*v = spec.MustCreateRef("#/components/parameters/" + randAlphanumString())
|
||||
case 6:
|
||||
*v = spec.MustCreateRef("#/components/requestBodies/" + randAlphanumString())
|
||||
}
|
||||
},
|
||||
func(v *Parameter, c fuzz.Continue) {
|
||||
if c.Intn(refChance) == 0 {
|
||||
c.Fuzz(&v.Refable)
|
||||
return
|
||||
}
|
||||
c.Fuzz(&v.ParameterProps)
|
||||
c.Fuzz(&v.VendorExtensible)
|
||||
|
||||
switch c.Intn(3) {
|
||||
case 0:
|
||||
// Header param
|
||||
v.In = "query"
|
||||
case 1:
|
||||
v.In = "header"
|
||||
case 2:
|
||||
v.In = "cookie"
|
||||
}
|
||||
},
|
||||
func(v *RequestBody, c fuzz.Continue) {
|
||||
if c.Intn(refChance) == 0 {
|
||||
c.Fuzz(&v.Refable)
|
||||
return
|
||||
}
|
||||
c.Fuzz(&v.RequestBodyProps)
|
||||
c.Fuzz(&v.VendorExtensible)
|
||||
},
|
||||
func(v *Header, c fuzz.Continue) {
|
||||
if c.Intn(refChance) == 0 {
|
||||
c.Fuzz(&v.Refable)
|
||||
return
|
||||
}
|
||||
c.Fuzz(&v.HeaderProps)
|
||||
c.Fuzz(&v.VendorExtensible)
|
||||
},
|
||||
func(v *ResponsesProps, c fuzz.Continue) {
|
||||
c.Fuzz(&v.Default)
|
||||
n := c.Intn(5)
|
||||
for i := 0; i < n; i++ {
|
||||
r2 := Response{}
|
||||
c.Fuzz(&r2)
|
||||
// HTTP Status code in 100-599 Range
|
||||
code := c.Intn(500) + 100
|
||||
v.StatusCodeResponses = make(map[int]*Response)
|
||||
v.StatusCodeResponses[code] = &r2
|
||||
}
|
||||
},
|
||||
func(v *Response, c fuzz.Continue) {
|
||||
if c.Intn(refChance) == 0 {
|
||||
c.Fuzz(&v.Refable)
|
||||
return
|
||||
}
|
||||
c.Fuzz(&v.ResponseProps)
|
||||
c.Fuzz(&v.VendorExtensible)
|
||||
},
|
||||
func(v *spec.Extensions, c fuzz.Continue) {
|
||||
numChildren := c.Intn(5)
|
||||
for i := 0; i < numChildren; i++ {
|
||||
if *v == nil {
|
||||
*v = spec.Extensions{}
|
||||
}
|
||||
(*v)["x-"+c.RandString()] = c.RandString()
|
||||
}
|
||||
},
|
||||
func(v *spec.ExternalDocumentation, c fuzz.Continue) {
|
||||
c.Fuzz(&v.Description)
|
||||
v.URL = "https://" + randAlphanumString()
|
||||
},
|
||||
func(v *spec.SchemaURL, c fuzz.Continue) {
|
||||
*v = spec.SchemaURL("https://" + randAlphanumString())
|
||||
},
|
||||
func(v *spec.SchemaOrBool, c fuzz.Continue) {
|
||||
*v = spec.SchemaOrBool{}
|
||||
|
||||
if c.RandBool() {
|
||||
v.Allows = c.RandBool()
|
||||
} else {
|
||||
v.Schema = &spec.Schema{}
|
||||
v.Allows = true
|
||||
c.Fuzz(&v.Schema)
|
||||
}
|
||||
},
|
||||
func(v *spec.SchemaOrArray, c fuzz.Continue) {
|
||||
*v = spec.SchemaOrArray{}
|
||||
if c.RandBool() {
|
||||
schema := spec.Schema{}
|
||||
c.Fuzz(&schema)
|
||||
v.Schema = &schema
|
||||
} else {
|
||||
v.Schemas = []spec.Schema{}
|
||||
numChildren := c.Intn(5)
|
||||
for i := 0; i < numChildren; i++ {
|
||||
schema := spec.Schema{}
|
||||
c.Fuzz(&schema)
|
||||
v.Schemas = append(v.Schemas, schema)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
},
|
||||
func(v *spec.SchemaOrStringArray, c fuzz.Continue) {
|
||||
if c.RandBool() {
|
||||
*v = spec.SchemaOrStringArray{}
|
||||
if c.RandBool() {
|
||||
c.Fuzz(&v.Property)
|
||||
} else {
|
||||
c.Fuzz(&v.Schema)
|
||||
}
|
||||
}
|
||||
},
|
||||
func(v *spec.Schema, c fuzz.Continue) {
|
||||
if c.Intn(refChance) == 0 {
|
||||
c.Fuzz(&v.Ref)
|
||||
return
|
||||
}
|
||||
if c.RandBool() {
|
||||
// file schema
|
||||
c.Fuzz(&v.Default)
|
||||
c.Fuzz(&v.Description)
|
||||
c.Fuzz(&v.Example)
|
||||
c.Fuzz(&v.ExternalDocs)
|
||||
|
||||
c.Fuzz(&v.Format)
|
||||
c.Fuzz(&v.ReadOnly)
|
||||
c.Fuzz(&v.Required)
|
||||
c.Fuzz(&v.Title)
|
||||
v.Type = spec.StringOrArray{"file"}
|
||||
|
||||
} else {
|
||||
// normal schema
|
||||
c.Fuzz(&v.SchemaProps)
|
||||
c.Fuzz(&v.SwaggerSchemaProps)
|
||||
c.Fuzz(&v.VendorExtensible)
|
||||
c.Fuzz(&v.ExtraProps)
|
||||
}
|
||||
|
||||
},
|
||||
}
|
|
@ -20,6 +20,8 @@ import (
|
|||
"encoding/json"
|
||||
|
||||
"github.com/go-openapi/swag"
|
||||
"k8s.io/kube-openapi/pkg/internal"
|
||||
jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json"
|
||||
"k8s.io/kube-openapi/pkg/validation/spec"
|
||||
)
|
||||
|
||||
|
@ -50,6 +52,9 @@ func (h *Header) MarshalJSON() ([]byte, error) {
|
|||
}
|
||||
|
||||
func (h *Header) UnmarshalJSON(data []byte) error {
|
||||
if internal.UseOptimizedJSONUnmarshalingV3 {
|
||||
return jsonv2.Unmarshal(data, h)
|
||||
}
|
||||
if err := json.Unmarshal(data, &h.Refable); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -63,6 +68,22 @@ func (h *Header) UnmarshalJSON(data []byte) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (h *Header) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error {
|
||||
var x struct {
|
||||
spec.Extensions
|
||||
HeaderProps
|
||||
}
|
||||
if err := opts.UnmarshalNext(dec, &x); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := internal.JSONRefFromMap(&h.Ref.Ref, x.Extensions); err != nil {
|
||||
return err
|
||||
}
|
||||
h.Extensions = internal.SanitizeExtensions(x.Extensions)
|
||||
h.HeaderProps = x.HeaderProps
|
||||
return nil
|
||||
}
|
||||
|
||||
// HeaderProps a struct that describes a header object
|
||||
type HeaderProps struct {
|
||||
// Description holds a brief description of the parameter
|
||||
|
|
|
@ -18,7 +18,10 @@ package spec3
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/go-openapi/swag"
|
||||
"k8s.io/kube-openapi/pkg/internal"
|
||||
jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json"
|
||||
"k8s.io/kube-openapi/pkg/validation/spec"
|
||||
)
|
||||
|
||||
|
@ -44,6 +47,9 @@ func (m *MediaType) MarshalJSON() ([]byte, error) {
|
|||
}
|
||||
|
||||
func (m *MediaType) UnmarshalJSON(data []byte) error {
|
||||
if internal.UseOptimizedJSONUnmarshalingV3 {
|
||||
return jsonv2.Unmarshal(data, m)
|
||||
}
|
||||
if err := json.Unmarshal(data, &m.MediaTypeProps); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -53,10 +59,24 @@ func (m *MediaType) UnmarshalJSON(data []byte) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (m *MediaType) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error {
|
||||
var x struct {
|
||||
spec.Extensions
|
||||
MediaTypeProps
|
||||
}
|
||||
if err := opts.UnmarshalNext(dec, &x); err != nil {
|
||||
return err
|
||||
}
|
||||
m.Extensions = internal.SanitizeExtensions(x.Extensions)
|
||||
m.MediaTypeProps = x.MediaTypeProps
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MediaTypeProps a struct that allows you to specify content format, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#mediaTypeObject
|
||||
type MediaTypeProps struct {
|
||||
// Schema holds the schema defining the type used for the media type
|
||||
Schema *spec.Schema `json:"schema,omitempty"`
|
||||
Schema *spec.Schema `json:"schema,omitempty"`
|
||||
// Example of the media type
|
||||
Example interface{} `json:"example,omitempty"`
|
||||
// Examples of the media type. Each example object should match the media type and specific schema if present
|
||||
|
|
|
@ -19,8 +19,10 @@ package spec3
|
|||
import (
|
||||
"encoding/json"
|
||||
|
||||
"k8s.io/kube-openapi/pkg/validation/spec"
|
||||
"github.com/go-openapi/swag"
|
||||
"k8s.io/kube-openapi/pkg/internal"
|
||||
jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json"
|
||||
"k8s.io/kube-openapi/pkg/validation/spec"
|
||||
)
|
||||
|
||||
// Operation describes a single API operation on a path, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#operationObject
|
||||
|
@ -46,12 +48,28 @@ func (o *Operation) MarshalJSON() ([]byte, error) {
|
|||
|
||||
// UnmarshalJSON hydrates this items instance with the data from JSON
|
||||
func (o *Operation) UnmarshalJSON(data []byte) error {
|
||||
if internal.UseOptimizedJSONUnmarshalingV3 {
|
||||
return jsonv2.Unmarshal(data, o)
|
||||
}
|
||||
if err := json.Unmarshal(data, &o.OperationProps); err != nil {
|
||||
return err
|
||||
}
|
||||
return json.Unmarshal(data, &o.VendorExtensible)
|
||||
}
|
||||
|
||||
func (o *Operation) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error {
|
||||
var x struct {
|
||||
spec.Extensions
|
||||
OperationProps
|
||||
}
|
||||
if err := opts.UnmarshalNext(dec, &x); err != nil {
|
||||
return err
|
||||
}
|
||||
o.Extensions = internal.SanitizeExtensions(x.Extensions)
|
||||
o.OperationProps = x.OperationProps
|
||||
return nil
|
||||
}
|
||||
|
||||
// OperationProps describes a single API operation on a path, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#operationObject
|
||||
type OperationProps struct {
|
||||
// Tags holds a list of tags for API documentation control
|
||||
|
@ -73,7 +91,7 @@ type OperationProps struct {
|
|||
// Deprecated declares this operation to be deprecated
|
||||
Deprecated bool `json:"deprecated,omitempty"`
|
||||
// SecurityRequirement holds a declaration of which security mechanisms can be used for this operation
|
||||
SecurityRequirement []*SecurityRequirement `json:"security,omitempty"`
|
||||
SecurityRequirement []map[string][]string `json:"security,omitempty"`
|
||||
// Servers contains an alternative server array to service this operation
|
||||
Servers []*Server `json:"servers,omitempty"`
|
||||
}
|
||||
|
|
|
@ -20,6 +20,8 @@ import (
|
|||
"encoding/json"
|
||||
|
||||
"github.com/go-openapi/swag"
|
||||
"k8s.io/kube-openapi/pkg/internal"
|
||||
jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json"
|
||||
"k8s.io/kube-openapi/pkg/validation/spec"
|
||||
)
|
||||
|
||||
|
@ -50,6 +52,10 @@ func (p *Parameter) MarshalJSON() ([]byte, error) {
|
|||
}
|
||||
|
||||
func (p *Parameter) UnmarshalJSON(data []byte) error {
|
||||
if internal.UseOptimizedJSONUnmarshalingV3 {
|
||||
return jsonv2.Unmarshal(data, p)
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(data, &p.Refable); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -63,6 +69,22 @@ func (p *Parameter) UnmarshalJSON(data []byte) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (p *Parameter) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error {
|
||||
var x struct {
|
||||
spec.Extensions
|
||||
ParameterProps
|
||||
}
|
||||
if err := opts.UnmarshalNext(dec, &x); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := internal.JSONRefFromMap(&p.Ref.Ref, x.Extensions); err != nil {
|
||||
return err
|
||||
}
|
||||
p.Extensions = internal.SanitizeExtensions(x.Extensions)
|
||||
p.ParameterProps = x.ParameterProps
|
||||
return nil
|
||||
}
|
||||
|
||||
// ParameterProps a struct that describes a single operation parameter, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#parameterObject
|
||||
type ParameterProps struct {
|
||||
// Name holds the name of the parameter
|
||||
|
|
|
@ -18,10 +18,13 @@ package spec3
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"k8s.io/kube-openapi/pkg/validation/spec"
|
||||
"github.com/go-openapi/swag"
|
||||
"k8s.io/kube-openapi/pkg/internal"
|
||||
jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json"
|
||||
"k8s.io/kube-openapi/pkg/validation/spec"
|
||||
)
|
||||
|
||||
// Paths describes the available paths and operations for the API, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#pathsObject
|
||||
|
@ -45,6 +48,9 @@ func (p *Paths) MarshalJSON() ([]byte, error) {
|
|||
|
||||
// UnmarshalJSON hydrates this items instance with the data from JSON
|
||||
func (p *Paths) UnmarshalJSON(data []byte) error {
|
||||
if internal.UseOptimizedJSONUnmarshalingV3 {
|
||||
return jsonv2.Unmarshal(data, p)
|
||||
}
|
||||
var res map[string]json.RawMessage
|
||||
if err := json.Unmarshal(data, &res); err != nil {
|
||||
return err
|
||||
|
@ -74,6 +80,59 @@ func (p *Paths) UnmarshalJSON(data []byte) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (p *Paths) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error {
|
||||
tok, err := dec.ReadToken()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch k := tok.Kind(); k {
|
||||
case 'n':
|
||||
*p = Paths{}
|
||||
return nil
|
||||
case '{':
|
||||
for {
|
||||
tok, err := dec.ReadToken()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if tok.Kind() == '}' {
|
||||
return nil
|
||||
}
|
||||
|
||||
switch k := tok.String(); {
|
||||
case internal.IsExtensionKey(k):
|
||||
var ext any
|
||||
if err := opts.UnmarshalNext(dec, &ext); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if p.Extensions == nil {
|
||||
p.Extensions = make(map[string]any)
|
||||
}
|
||||
p.Extensions[k] = ext
|
||||
case len(k) > 0 && k[0] == '/':
|
||||
pi := Path{}
|
||||
if err := opts.UnmarshalNext(dec, &pi); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if p.Paths == nil {
|
||||
p.Paths = make(map[string]*Path)
|
||||
}
|
||||
p.Paths[k] = &pi
|
||||
default:
|
||||
_, err := dec.ReadValue() // skip value
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("unknown JSON kind: %v", k)
|
||||
}
|
||||
}
|
||||
|
||||
// Path describes the operations available on a single path, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#pathItemObject
|
||||
//
|
||||
// Note that this struct is actually a thin wrapper around PathProps to make it referable and extensible
|
||||
|
@ -101,6 +160,9 @@ func (p *Path) MarshalJSON() ([]byte, error) {
|
|||
}
|
||||
|
||||
func (p *Path) UnmarshalJSON(data []byte) error {
|
||||
if internal.UseOptimizedJSONUnmarshalingV3 {
|
||||
return jsonv2.Unmarshal(data, p)
|
||||
}
|
||||
if err := json.Unmarshal(data, &p.Refable); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -113,6 +175,24 @@ func (p *Path) UnmarshalJSON(data []byte) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (p *Path) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error {
|
||||
var x struct {
|
||||
spec.Extensions
|
||||
PathProps
|
||||
}
|
||||
|
||||
if err := opts.UnmarshalNext(dec, &x); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := internal.JSONRefFromMap(&p.Ref.Ref, x.Extensions); err != nil {
|
||||
return err
|
||||
}
|
||||
p.Extensions = internal.SanitizeExtensions(x.Extensions)
|
||||
p.PathProps = x.PathProps
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// PathProps describes the operations available on a single path, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#pathItemObject
|
||||
type PathProps struct {
|
||||
// Summary holds a summary for all operations in this path
|
||||
|
|
|
@ -19,8 +19,10 @@ package spec3
|
|||
import (
|
||||
"encoding/json"
|
||||
|
||||
"k8s.io/kube-openapi/pkg/validation/spec"
|
||||
"github.com/go-openapi/swag"
|
||||
"k8s.io/kube-openapi/pkg/internal"
|
||||
jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json"
|
||||
"k8s.io/kube-openapi/pkg/validation/spec"
|
||||
)
|
||||
|
||||
// RequestBody describes a single request body, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#requestBodyObject
|
||||
|
@ -50,6 +52,9 @@ func (r *RequestBody) MarshalJSON() ([]byte, error) {
|
|||
}
|
||||
|
||||
func (r *RequestBody) UnmarshalJSON(data []byte) error {
|
||||
if internal.UseOptimizedJSONUnmarshalingV3 {
|
||||
return jsonv2.Unmarshal(data, r)
|
||||
}
|
||||
if err := json.Unmarshal(data, &r.Refable); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -71,3 +76,19 @@ type RequestBodyProps struct {
|
|||
// Required determines if the request body is required in the request
|
||||
Required bool `json:"required,omitempty"`
|
||||
}
|
||||
|
||||
func (r *RequestBody) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error {
|
||||
var x struct {
|
||||
spec.Extensions
|
||||
RequestBodyProps
|
||||
}
|
||||
if err := opts.UnmarshalNext(dec, &x); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := internal.JSONRefFromMap(&r.Ref.Ref, x.Extensions); err != nil {
|
||||
return err
|
||||
}
|
||||
r.Extensions = internal.SanitizeExtensions(x.Extensions)
|
||||
r.RequestBodyProps = x.RequestBodyProps
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -18,10 +18,13 @@ package spec3
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"k8s.io/kube-openapi/pkg/validation/spec"
|
||||
"github.com/go-openapi/swag"
|
||||
"k8s.io/kube-openapi/pkg/internal"
|
||||
jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json"
|
||||
"k8s.io/kube-openapi/pkg/validation/spec"
|
||||
)
|
||||
|
||||
// Responses holds the list of possible responses as they are returned from executing this operation
|
||||
|
@ -46,13 +49,15 @@ func (r *Responses) MarshalJSON() ([]byte, error) {
|
|||
}
|
||||
|
||||
func (r *Responses) UnmarshalJSON(data []byte) error {
|
||||
if internal.UseOptimizedJSONUnmarshalingV3 {
|
||||
return jsonv2.Unmarshal(data, r)
|
||||
}
|
||||
if err := json.Unmarshal(data, &r.ResponsesProps); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := json.Unmarshal(data, &r.VendorExtensible); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -78,25 +83,91 @@ func (r ResponsesProps) MarshalJSON() ([]byte, error) {
|
|||
|
||||
// UnmarshalJSON unmarshals responses from JSON
|
||||
func (r *ResponsesProps) UnmarshalJSON(data []byte) error {
|
||||
var res map[string]*Response
|
||||
if internal.UseOptimizedJSONUnmarshalingV3 {
|
||||
return jsonv2.Unmarshal(data, r)
|
||||
}
|
||||
var res map[string]json.RawMessage
|
||||
if err := json.Unmarshal(data, &res); err != nil {
|
||||
return nil
|
||||
return err
|
||||
}
|
||||
if v, ok := res["default"]; ok {
|
||||
r.Default = v
|
||||
value := Response{}
|
||||
if err := json.Unmarshal(v, &value); err != nil {
|
||||
return err
|
||||
}
|
||||
r.Default = &value
|
||||
delete(res, "default")
|
||||
}
|
||||
for k, v := range res {
|
||||
// Take all integral keys
|
||||
if nk, err := strconv.Atoi(k); err == nil {
|
||||
if r.StatusCodeResponses == nil {
|
||||
r.StatusCodeResponses = map[int]*Response{}
|
||||
}
|
||||
r.StatusCodeResponses[nk] = v
|
||||
value := Response{}
|
||||
if err := json.Unmarshal(v, &value); err != nil {
|
||||
return err
|
||||
}
|
||||
r.StatusCodeResponses[nk] = &value
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Responses) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) (err error) {
|
||||
tok, err := dec.ReadToken()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch k := tok.Kind(); k {
|
||||
case 'n':
|
||||
*r = Responses{}
|
||||
return nil
|
||||
case '{':
|
||||
for {
|
||||
tok, err := dec.ReadToken()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if tok.Kind() == '}' {
|
||||
return nil
|
||||
}
|
||||
switch k := tok.String(); {
|
||||
case internal.IsExtensionKey(k):
|
||||
var ext any
|
||||
if err := opts.UnmarshalNext(dec, &ext); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if r.Extensions == nil {
|
||||
r.Extensions = make(map[string]any)
|
||||
}
|
||||
r.Extensions[k] = ext
|
||||
case k == "default":
|
||||
resp := Response{}
|
||||
if err := opts.UnmarshalNext(dec, &resp); err != nil {
|
||||
return err
|
||||
}
|
||||
r.ResponsesProps.Default = &resp
|
||||
default:
|
||||
if nk, err := strconv.Atoi(k); err == nil {
|
||||
resp := Response{}
|
||||
if err := opts.UnmarshalNext(dec, &resp); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if r.StatusCodeResponses == nil {
|
||||
r.StatusCodeResponses = map[int]*Response{}
|
||||
}
|
||||
r.StatusCodeResponses[nk] = &resp
|
||||
}
|
||||
}
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("unknown JSON kind: %v", k)
|
||||
}
|
||||
}
|
||||
|
||||
// Response describes a single response from an API Operation, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#responseObject
|
||||
//
|
||||
// Note that this struct is actually a thin wrapper around ResponseProps to make it referable and extensible
|
||||
|
@ -124,6 +195,9 @@ func (r *Response) MarshalJSON() ([]byte, error) {
|
|||
}
|
||||
|
||||
func (r *Response) UnmarshalJSON(data []byte) error {
|
||||
if internal.UseOptimizedJSONUnmarshalingV3 {
|
||||
return jsonv2.Unmarshal(data, r)
|
||||
}
|
||||
if err := json.Unmarshal(data, &r.Refable); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -133,7 +207,22 @@ func (r *Response) UnmarshalJSON(data []byte) error {
|
|||
if err := json.Unmarshal(data, &r.VendorExtensible); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Response) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error {
|
||||
var x struct {
|
||||
spec.Extensions
|
||||
ResponseProps
|
||||
}
|
||||
if err := opts.UnmarshalNext(dec, &x); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := internal.JSONRefFromMap(&r.Ref.Ref, x.Extensions); err != nil {
|
||||
return err
|
||||
}
|
||||
r.Extensions = internal.SanitizeExtensions(x.Extensions)
|
||||
r.ResponseProps = x.ResponseProps
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -149,7 +238,6 @@ type ResponseProps struct {
|
|||
Links map[string]*Link `json:"links,omitempty"`
|
||||
}
|
||||
|
||||
|
||||
// Link represents a possible design-time link for a response, more at https://swagger.io/specification/#link-object
|
||||
type Link struct {
|
||||
spec.Refable
|
||||
|
@ -175,6 +263,9 @@ func (r *Link) MarshalJSON() ([]byte, error) {
|
|||
}
|
||||
|
||||
func (r *Link) UnmarshalJSON(data []byte) error {
|
||||
if internal.UseOptimizedJSONUnmarshalingV3 {
|
||||
return jsonv2.Unmarshal(data, r)
|
||||
}
|
||||
if err := json.Unmarshal(data, &r.Refable); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -188,6 +279,22 @@ func (r *Link) UnmarshalJSON(data []byte) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (l *Link) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error {
|
||||
var x struct {
|
||||
spec.Extensions
|
||||
LinkProps
|
||||
}
|
||||
if err := opts.UnmarshalNext(dec, &x); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := internal.JSONRefFromMap(&l.Ref.Ref, x.Extensions); err != nil {
|
||||
return err
|
||||
}
|
||||
l.Extensions = internal.SanitizeExtensions(x.Extensions)
|
||||
l.LinkProps = x.LinkProps
|
||||
return nil
|
||||
}
|
||||
|
||||
// LinkProps describes a single response from an API Operation, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#responseObject
|
||||
type LinkProps struct {
|
||||
// OperationId is the name of an existing, resolvable OAS operation
|
||||
|
|
|
@ -1,56 +0,0 @@
|
|||
/*
|
||||
Copyright 2021 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package spec3
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"k8s.io/kube-openapi/pkg/validation/spec"
|
||||
"github.com/go-openapi/swag"
|
||||
)
|
||||
|
||||
// SecurityRequirementProps describes the required security schemes to execute an operation, more at https://swagger.io/specification/#security-requirement-object
|
||||
//
|
||||
// Note that this struct is actually a thin wrapper around SecurityRequirementProps to make it referable and extensible
|
||||
type SecurityRequirement struct {
|
||||
SecurityRequirementProps
|
||||
spec.VendorExtensible
|
||||
}
|
||||
|
||||
// MarshalJSON is a custom marshal function that knows how to encode SecurityRequirement as JSON
|
||||
func (s *SecurityRequirement) MarshalJSON() ([]byte, error) {
|
||||
b1, err := json.Marshal(s.SecurityRequirementProps)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b2, err := json.Marshal(s.VendorExtensible)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return swag.ConcatJSON(b1, b2), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON hydrates this items instance with the data from JSON
|
||||
func (s *SecurityRequirement) UnmarshalJSON(data []byte) error {
|
||||
if err := json.Unmarshal(data, &s.SecurityRequirementProps); err != nil {
|
||||
return err
|
||||
}
|
||||
return json.Unmarshal(data, &s.VendorExtensible)
|
||||
}
|
||||
|
||||
// SecurityRequirementProps describes the required security schemes to execute an operation, more at https://swagger.io/specification/#security-requirement-object
|
||||
type SecurityRequirementProps map[string][]string
|
|
@ -19,8 +19,8 @@ package spec3
|
|||
import (
|
||||
"encoding/json"
|
||||
|
||||
"k8s.io/kube-openapi/pkg/validation/spec"
|
||||
"github.com/go-openapi/swag"
|
||||
"k8s.io/kube-openapi/pkg/validation/spec"
|
||||
)
|
||||
|
||||
// SecurityScheme defines reusable Security Scheme Object, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#securitySchemeObject
|
||||
|
|
|
@ -18,9 +18,11 @@ package spec3
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"k8s.io/kube-openapi/pkg/validation/spec"
|
||||
"github.com/go-openapi/swag"
|
||||
|
||||
"github.com/go-openapi/swag"
|
||||
"k8s.io/kube-openapi/pkg/internal"
|
||||
jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json"
|
||||
"k8s.io/kube-openapi/pkg/validation/spec"
|
||||
)
|
||||
|
||||
type Server struct {
|
||||
|
@ -51,6 +53,10 @@ func (s *Server) MarshalJSON() ([]byte, error) {
|
|||
}
|
||||
|
||||
func (s *Server) UnmarshalJSON(data []byte) error {
|
||||
if internal.UseOptimizedJSONUnmarshalingV3 {
|
||||
return jsonv2.Unmarshal(data, s)
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(data, &s.ServerProps); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -60,6 +66,20 @@ func (s *Server) UnmarshalJSON(data []byte) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error {
|
||||
var x struct {
|
||||
spec.Extensions
|
||||
ServerProps
|
||||
}
|
||||
if err := opts.UnmarshalNext(dec, &x); err != nil {
|
||||
return err
|
||||
}
|
||||
s.Extensions = internal.SanitizeExtensions(x.Extensions)
|
||||
s.ServerProps = x.ServerProps
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type ServerVariable struct {
|
||||
ServerVariableProps
|
||||
spec.VendorExtensible
|
||||
|
@ -88,6 +108,9 @@ func (s *ServerVariable) MarshalJSON() ([]byte, error) {
|
|||
}
|
||||
|
||||
func (s *ServerVariable) UnmarshalJSON(data []byte) error {
|
||||
if internal.UseOptimizedJSONUnmarshalingV3 {
|
||||
return jsonv2.Unmarshal(data, s)
|
||||
}
|
||||
if err := json.Unmarshal(data, &s.ServerVariableProps); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -96,3 +119,17 @@ func (s *ServerVariable) UnmarshalJSON(data []byte) error {
|
|||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *ServerVariable) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error {
|
||||
var x struct {
|
||||
spec.Extensions
|
||||
ServerVariableProps
|
||||
}
|
||||
if err := opts.UnmarshalNext(dec, &x); err != nil {
|
||||
return err
|
||||
}
|
||||
s.Extensions = internal.SanitizeExtensions(x.Extensions)
|
||||
s.ServerVariableProps = x.ServerVariableProps
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -17,6 +17,10 @@ limitations under the License.
|
|||
package spec3
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"k8s.io/kube-openapi/pkg/internal"
|
||||
jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json"
|
||||
"k8s.io/kube-openapi/pkg/validation/spec"
|
||||
)
|
||||
|
||||
|
@ -35,3 +39,12 @@ type OpenAPI struct {
|
|||
// ExternalDocs holds additional external documentation
|
||||
ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"`
|
||||
}
|
||||
|
||||
func (o *OpenAPI) UnmarshalJSON(data []byte) error {
|
||||
type OpenAPIWithNoFunctions OpenAPI
|
||||
p := (*OpenAPIWithNoFunctions)(o)
|
||||
if internal.UseOptimizedJSONUnmarshalingV3 {
|
||||
return jsonv2.Unmarshal(data, &p)
|
||||
}
|
||||
return json.Unmarshal(data, &p)
|
||||
}
|
||||
|
|
|
@ -120,7 +120,7 @@ func (d *Definitions) ParseSchemaV3(s *openapi_v3.Schema, path *Path) (Schema, e
|
|||
switch s.GetType() {
|
||||
case object:
|
||||
for _, extension := range s.GetSpecificationExtension() {
|
||||
if extension.Name == "x-kuberentes-group-version-kind" {
|
||||
if extension.Name == "x-kubernetes-group-version-kind" {
|
||||
// Objects with x-kubernetes-group-version-kind are always top
|
||||
// level types.
|
||||
return d.parseV3Kind(s, path)
|
||||
|
@ -285,7 +285,7 @@ func parseV3Interface(def *yaml.Node) (interface{}, error) {
|
|||
|
||||
func (d *Definitions) parseV3BaseSchema(s *openapi_v3.Schema, path *Path) (*BaseSchema, error) {
|
||||
if s == nil {
|
||||
return nil, fmt.Errorf("cannot initializae BaseSchema from nil")
|
||||
return nil, fmt.Errorf("cannot initialize BaseSchema from nil")
|
||||
}
|
||||
|
||||
def, err := parseV3Interface(s.GetDefault().ToRawInfo())
|
||||
|
|
|
@ -17,7 +17,6 @@ limitations under the License.
|
|||
package testing
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
|
@ -42,7 +41,7 @@ func (f *Fake) OpenAPISchema() (*openapi_v2.Document, error) {
|
|||
f.err = err
|
||||
return
|
||||
}
|
||||
spec, err := ioutil.ReadFile(f.Path)
|
||||
spec, err := os.ReadFile(f.Path)
|
||||
if err != nil {
|
||||
f.err = err
|
||||
return
|
||||
|
|
|
@ -17,7 +17,6 @@ limitations under the License.
|
|||
package testing
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
@ -47,7 +46,7 @@ func (f *FakeV3) OpenAPIV3Schema(groupVersion string) (*openapi_v3.Document, err
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
spec, err := ioutil.ReadFile(filepath.Join(f.Path, groupVersion+".json"))
|
||||
spec, err := os.ReadFile(filepath.Join(f.Path, groupVersion+".json"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -30,6 +30,7 @@ import (
|
|||
// in GetCanonicalTypeName means Go type names with full package path.
|
||||
//
|
||||
// Examples of REST friendly OpenAPI name:
|
||||
//
|
||||
// Input: k8s.io/api/core/v1.Pod
|
||||
// Output: io.k8s.api.core.v1.Pod
|
||||
//
|
||||
|
@ -45,6 +46,7 @@ func ToCanonicalName(name string) string {
|
|||
// ToRESTFriendlyName converts Golang package/type canonical name into REST friendly OpenAPI name.
|
||||
//
|
||||
// Examples of REST friendly OpenAPI name:
|
||||
//
|
||||
// Input: k8s.io/api/core/v1.Pod
|
||||
// Output: io.k8s.api.core.v1.Pod
|
||||
//
|
||||
|
@ -71,18 +73,21 @@ func ToRESTFriendlyName(name string) string {
|
|||
// OpenAPI canonical names are Go type names with full package path, for uniquely indentifying
|
||||
// a model / Go type. If a Go type is vendored from another package, only the path after "/vendor/"
|
||||
// should be used. For custom resource definition (CRD), the canonical name is expected to be
|
||||
// group/version.kind
|
||||
//
|
||||
// group/version.kind
|
||||
//
|
||||
// Examples of canonical name:
|
||||
// Go type: k8s.io/kubernetes/pkg/apis/core.Pod
|
||||
// CRD: csi.storage.k8s.io/v1alpha1.CSINodeInfo
|
||||
//
|
||||
// Go type: k8s.io/kubernetes/pkg/apis/core.Pod
|
||||
// CRD: csi.storage.k8s.io/v1alpha1.CSINodeInfo
|
||||
//
|
||||
// Example for vendored Go type:
|
||||
// Original full path: k8s.io/kubernetes/vendor/k8s.io/api/core/v1.Pod
|
||||
// Canonical name: k8s.io/api/core/v1.Pod
|
||||
//
|
||||
// Original full path: vendor/k8s.io/api/core/v1.Pod
|
||||
// Canonical name: k8s.io/api/core/v1.Pod
|
||||
// Original full path: k8s.io/kubernetes/vendor/k8s.io/api/core/v1.Pod
|
||||
// Canonical name: k8s.io/api/core/v1.Pod
|
||||
//
|
||||
// Original full path: vendor/k8s.io/api/core/v1.Pod
|
||||
// Canonical name: k8s.io/api/core/v1.Pod
|
||||
type OpenAPICanonicalTypeNamer interface {
|
||||
OpenAPICanonicalTypeName() string
|
||||
}
|
||||
|
|
|
@ -43,6 +43,9 @@ type Header struct {
|
|||
|
||||
// MarshalJSON marshal this to JSON
|
||||
func (h Header) MarshalJSON() ([]byte, error) {
|
||||
if internal.UseOptimizedJSONMarshaling {
|
||||
return internal.DeterministicMarshal(h)
|
||||
}
|
||||
b1, err := json.Marshal(h.CommonValidations)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -62,6 +65,20 @@ func (h Header) MarshalJSON() ([]byte, error) {
|
|||
return swag.ConcatJSON(b1, b2, b3, b4), nil
|
||||
}
|
||||
|
||||
func (h Header) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error {
|
||||
var x struct {
|
||||
CommonValidations commonValidationsOmitZero `json:",inline"`
|
||||
SimpleSchema simpleSchemaOmitZero `json:",inline"`
|
||||
Extensions
|
||||
HeaderProps
|
||||
}
|
||||
x.CommonValidations = commonValidationsOmitZero(h.CommonValidations)
|
||||
x.SimpleSchema = simpleSchemaOmitZero(h.SimpleSchema)
|
||||
x.Extensions = internal.SanitizeExtensions(h.Extensions)
|
||||
x.HeaderProps = h.HeaderProps
|
||||
return opts.MarshalNext(enc, x)
|
||||
}
|
||||
|
||||
// UnmarshalJSON unmarshals this header from JSON
|
||||
func (h *Header) UnmarshalJSON(data []byte) error {
|
||||
if internal.UseOptimizedJSONUnmarshaling {
|
||||
|
@ -94,12 +111,8 @@ func (h *Header) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Dec
|
|||
|
||||
h.CommonValidations = x.CommonValidations
|
||||
h.SimpleSchema = x.SimpleSchema
|
||||
h.Extensions = x.Extensions
|
||||
h.Extensions = internal.SanitizeExtensions(x.Extensions)
|
||||
h.HeaderProps = x.HeaderProps
|
||||
|
||||
h.Extensions.sanitize()
|
||||
if len(h.Extensions) == 0 {
|
||||
h.Extensions = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -89,17 +89,9 @@ func (e Extensions) GetObject(key string, out interface{}) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (e Extensions) sanitize() {
|
||||
for k := range e {
|
||||
if !isExtensionKey(k) {
|
||||
delete(e, k)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (e Extensions) sanitizeWithExtra() (extra map[string]any) {
|
||||
for k, v := range e {
|
||||
if !isExtensionKey(k) {
|
||||
if !internal.IsExtensionKey(k) {
|
||||
if extra == nil {
|
||||
extra = make(map[string]any)
|
||||
}
|
||||
|
@ -110,10 +102,6 @@ func (e Extensions) sanitizeWithExtra() (extra map[string]any) {
|
|||
return extra
|
||||
}
|
||||
|
||||
func isExtensionKey(k string) bool {
|
||||
return len(k) > 1 && (k[0] == 'x' || k[0] == 'X') && k[1] == '-'
|
||||
}
|
||||
|
||||
// VendorExtensible composition block.
|
||||
type VendorExtensible struct {
|
||||
Extensions Extensions
|
||||
|
@ -181,6 +169,9 @@ type Info struct {
|
|||
|
||||
// MarshalJSON marshal this to JSON
|
||||
func (i Info) MarshalJSON() ([]byte, error) {
|
||||
if internal.UseOptimizedJSONMarshaling {
|
||||
return internal.DeterministicMarshal(i)
|
||||
}
|
||||
b1, err := json.Marshal(i.InfoProps)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -192,6 +183,16 @@ func (i Info) MarshalJSON() ([]byte, error) {
|
|||
return swag.ConcatJSON(b1, b2), nil
|
||||
}
|
||||
|
||||
func (i Info) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error {
|
||||
var x struct {
|
||||
Extensions
|
||||
InfoProps
|
||||
}
|
||||
x.Extensions = i.Extensions
|
||||
x.InfoProps = i.InfoProps
|
||||
return opts.MarshalNext(enc, x)
|
||||
}
|
||||
|
||||
// UnmarshalJSON marshal this from JSON
|
||||
func (i *Info) UnmarshalJSON(data []byte) error {
|
||||
if internal.UseOptimizedJSONUnmarshaling {
|
||||
|
@ -212,11 +213,7 @@ func (i *Info) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decod
|
|||
if err := opts.UnmarshalNext(dec, &x); err != nil {
|
||||
return err
|
||||
}
|
||||
x.Extensions.sanitize()
|
||||
if len(x.Extensions) == 0 {
|
||||
x.Extensions = nil
|
||||
}
|
||||
i.VendorExtensible.Extensions = x.Extensions
|
||||
i.Extensions = internal.SanitizeExtensions(x.Extensions)
|
||||
i.InfoProps = x.InfoProps
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -37,6 +37,18 @@ type SimpleSchema struct {
|
|||
Example interface{} `json:"example,omitempty"`
|
||||
}
|
||||
|
||||
// Marshaling structure only, always edit along with corresponding
|
||||
// struct (or compilation will fail).
|
||||
type simpleSchemaOmitZero struct {
|
||||
Type string `json:"type,omitempty"`
|
||||
Nullable bool `json:"nullable,omitzero"`
|
||||
Format string `json:"format,omitempty"`
|
||||
Items *Items `json:"items,omitzero"`
|
||||
CollectionFormat string `json:"collectionFormat,omitempty"`
|
||||
Default interface{} `json:"default,omitempty"`
|
||||
Example interface{} `json:"example,omitempty"`
|
||||
}
|
||||
|
||||
// CommonValidations describe common JSON-schema validations
|
||||
type CommonValidations struct {
|
||||
Maximum *float64 `json:"maximum,omitempty"`
|
||||
|
@ -53,6 +65,23 @@ type CommonValidations struct {
|
|||
Enum []interface{} `json:"enum,omitempty"`
|
||||
}
|
||||
|
||||
// Marshaling structure only, always edit along with corresponding
|
||||
// struct (or compilation will fail).
|
||||
type commonValidationsOmitZero struct {
|
||||
Maximum *float64 `json:"maximum,omitempty"`
|
||||
ExclusiveMaximum bool `json:"exclusiveMaximum,omitzero"`
|
||||
Minimum *float64 `json:"minimum,omitempty"`
|
||||
ExclusiveMinimum bool `json:"exclusiveMinimum,omitzero"`
|
||||
MaxLength *int64 `json:"maxLength,omitempty"`
|
||||
MinLength *int64 `json:"minLength,omitempty"`
|
||||
Pattern string `json:"pattern,omitempty"`
|
||||
MaxItems *int64 `json:"maxItems,omitempty"`
|
||||
MinItems *int64 `json:"minItems,omitempty"`
|
||||
UniqueItems bool `json:"uniqueItems,omitzero"`
|
||||
MultipleOf *float64 `json:"multipleOf,omitempty"`
|
||||
Enum []interface{} `json:"enum,omitempty"`
|
||||
}
|
||||
|
||||
// Items a limited subset of JSON-Schema's items object.
|
||||
// It is used by parameter definitions that are not located in "body".
|
||||
//
|
||||
|
@ -105,18 +134,18 @@ func (i *Items) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Deco
|
|||
if err := i.Refable.Ref.fromMap(x.Extensions); err != nil {
|
||||
return err
|
||||
}
|
||||
x.Extensions.sanitize()
|
||||
if len(x.Extensions) == 0 {
|
||||
x.Extensions = nil
|
||||
}
|
||||
|
||||
i.CommonValidations = x.CommonValidations
|
||||
i.SimpleSchema = x.SimpleSchema
|
||||
i.VendorExtensible.Extensions = x.Extensions
|
||||
i.Extensions = internal.SanitizeExtensions(x.Extensions)
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalJSON converts this items object to JSON
|
||||
func (i Items) MarshalJSON() ([]byte, error) {
|
||||
if internal.UseOptimizedJSONMarshaling {
|
||||
return internal.DeterministicMarshal(i)
|
||||
}
|
||||
b1, err := json.Marshal(i.CommonValidations)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -135,3 +164,17 @@ func (i Items) MarshalJSON() ([]byte, error) {
|
|||
}
|
||||
return swag.ConcatJSON(b4, b3, b1, b2), nil
|
||||
}
|
||||
|
||||
func (i Items) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error {
|
||||
var x struct {
|
||||
CommonValidations commonValidationsOmitZero `json:",inline"`
|
||||
SimpleSchema simpleSchemaOmitZero `json:",inline"`
|
||||
Ref string `json:"$ref,omitempty"`
|
||||
Extensions
|
||||
}
|
||||
x.CommonValidations = commonValidationsOmitZero(i.CommonValidations)
|
||||
x.SimpleSchema = simpleSchemaOmitZero(i.SimpleSchema)
|
||||
x.Ref = i.Refable.Ref.String()
|
||||
x.Extensions = internal.SanitizeExtensions(i.Extensions)
|
||||
return opts.MarshalNext(enc, x)
|
||||
}
|
||||
|
|
|
@ -42,6 +42,23 @@ type OperationProps struct {
|
|||
Responses *Responses `json:"responses,omitempty"`
|
||||
}
|
||||
|
||||
// Marshaling structure only, always edit along with corresponding
|
||||
// struct (or compilation will fail).
|
||||
type operationPropsOmitZero struct {
|
||||
Description string `json:"description,omitempty"`
|
||||
Consumes []string `json:"consumes,omitempty"`
|
||||
Produces []string `json:"produces,omitempty"`
|
||||
Schemes []string `json:"schemes,omitempty"`
|
||||
Tags []string `json:"tags,omitempty"`
|
||||
Summary string `json:"summary,omitempty"`
|
||||
ExternalDocs *ExternalDocumentation `json:"externalDocs,omitzero"`
|
||||
ID string `json:"operationId,omitempty"`
|
||||
Deprecated bool `json:"deprecated,omitempty,omitzero"`
|
||||
Security []map[string][]string `json:"security,omitempty"`
|
||||
Parameters []Parameter `json:"parameters,omitempty"`
|
||||
Responses *Responses `json:"responses,omitzero"`
|
||||
}
|
||||
|
||||
// MarshalJSON takes care of serializing operation properties to JSON
|
||||
//
|
||||
// We use a custom marhaller here to handle a special cases related to
|
||||
|
@ -96,17 +113,16 @@ func (o *Operation) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.
|
|||
if err := opts.UnmarshalNext(dec, &x); err != nil {
|
||||
return err
|
||||
}
|
||||
x.Extensions.sanitize()
|
||||
if len(x.Extensions) == 0 {
|
||||
x.Extensions = nil
|
||||
}
|
||||
o.VendorExtensible.Extensions = x.Extensions
|
||||
o.Extensions = internal.SanitizeExtensions(x.Extensions)
|
||||
o.OperationProps = OperationProps(x.OperationPropsNoMethods)
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalJSON converts this items object to JSON
|
||||
func (o Operation) MarshalJSON() ([]byte, error) {
|
||||
if internal.UseOptimizedJSONMarshaling {
|
||||
return internal.DeterministicMarshal(o)
|
||||
}
|
||||
b1, err := json.Marshal(o.OperationProps)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -118,3 +134,13 @@ func (o Operation) MarshalJSON() ([]byte, error) {
|
|||
concated := swag.ConcatJSON(b1, b2)
|
||||
return concated, nil
|
||||
}
|
||||
|
||||
func (o Operation) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error {
|
||||
var x struct {
|
||||
Extensions
|
||||
OperationProps operationPropsOmitZero `json:",inline"`
|
||||
}
|
||||
x.Extensions = internal.SanitizeExtensions(o.Extensions)
|
||||
x.OperationProps = operationPropsOmitZero(o.OperationProps)
|
||||
return opts.MarshalNext(enc, x)
|
||||
}
|
||||
|
|
|
@ -36,6 +36,17 @@ type ParamProps struct {
|
|||
AllowEmptyValue bool `json:"allowEmptyValue,omitempty"`
|
||||
}
|
||||
|
||||
// Marshaling structure only, always edit along with corresponding
|
||||
// struct (or compilation will fail).
|
||||
type paramPropsOmitZero struct {
|
||||
Description string `json:"description,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
In string `json:"in,omitempty"`
|
||||
Required bool `json:"required,omitzero"`
|
||||
Schema *Schema `json:"schema,omitzero"`
|
||||
AllowEmptyValue bool `json:"allowEmptyValue,omitzero"`
|
||||
}
|
||||
|
||||
// Parameter a unique parameter is defined by a combination of a [name](#parameterName) and [location](#parameterIn).
|
||||
//
|
||||
// There are five possible parameter types.
|
||||
|
@ -109,19 +120,18 @@ func (p *Parameter) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.
|
|||
if err := p.Refable.Ref.fromMap(x.Extensions); err != nil {
|
||||
return err
|
||||
}
|
||||
x.Extensions.sanitize()
|
||||
if len(x.Extensions) == 0 {
|
||||
x.Extensions = nil
|
||||
}
|
||||
p.CommonValidations = x.CommonValidations
|
||||
p.SimpleSchema = x.SimpleSchema
|
||||
p.VendorExtensible.Extensions = x.Extensions
|
||||
p.Extensions = internal.SanitizeExtensions(x.Extensions)
|
||||
p.ParamProps = x.ParamProps
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalJSON converts this items object to JSON
|
||||
func (p Parameter) MarshalJSON() ([]byte, error) {
|
||||
if internal.UseOptimizedJSONMarshaling {
|
||||
return internal.DeterministicMarshal(p)
|
||||
}
|
||||
b1, err := json.Marshal(p.CommonValidations)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -144,3 +154,19 @@ func (p Parameter) MarshalJSON() ([]byte, error) {
|
|||
}
|
||||
return swag.ConcatJSON(b3, b1, b2, b4, b5), nil
|
||||
}
|
||||
|
||||
func (p Parameter) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error {
|
||||
var x struct {
|
||||
CommonValidations commonValidationsOmitZero `json:",inline"`
|
||||
SimpleSchema simpleSchemaOmitZero `json:",inline"`
|
||||
ParamProps paramPropsOmitZero `json:",inline"`
|
||||
Ref string `json:"$ref,omitempty"`
|
||||
Extensions
|
||||
}
|
||||
x.CommonValidations = commonValidationsOmitZero(p.CommonValidations)
|
||||
x.SimpleSchema = simpleSchemaOmitZero(p.SimpleSchema)
|
||||
x.Extensions = internal.SanitizeExtensions(p.Extensions)
|
||||
x.ParamProps = paramPropsOmitZero(p.ParamProps)
|
||||
x.Ref = p.Refable.Ref.String()
|
||||
return opts.MarshalNext(enc, x)
|
||||
}
|
||||
|
|
|
@ -70,24 +70,20 @@ func (p *PathItem) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.D
|
|||
if err := opts.UnmarshalNext(dec, &x); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
p.Extensions = x.Extensions
|
||||
p.PathItemProps = x.PathItemProps
|
||||
|
||||
if err := p.Refable.Ref.fromMap(p.Extensions); err != nil {
|
||||
if err := p.Refable.Ref.fromMap(x.Extensions); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
p.Extensions.sanitize()
|
||||
if len(p.Extensions) == 0 {
|
||||
p.Extensions = nil
|
||||
}
|
||||
p.Extensions = internal.SanitizeExtensions(x.Extensions)
|
||||
p.PathItemProps = x.PathItemProps
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalJSON converts this items object to JSON
|
||||
func (p PathItem) MarshalJSON() ([]byte, error) {
|
||||
if internal.UseOptimizedJSONMarshaling {
|
||||
return internal.DeterministicMarshal(p)
|
||||
}
|
||||
b3, err := json.Marshal(p.Refable)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -103,3 +99,15 @@ func (p PathItem) MarshalJSON() ([]byte, error) {
|
|||
concated := swag.ConcatJSON(b3, b4, b5)
|
||||
return concated, nil
|
||||
}
|
||||
|
||||
func (p PathItem) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error {
|
||||
var x struct {
|
||||
Ref string `json:"$ref,omitempty"`
|
||||
Extensions
|
||||
PathItemProps
|
||||
}
|
||||
x.Ref = p.Refable.Ref.String()
|
||||
x.Extensions = internal.SanitizeExtensions(p.Extensions)
|
||||
x.PathItemProps = p.PathItemProps
|
||||
return opts.MarshalNext(enc, x)
|
||||
}
|
||||
|
|
|
@ -92,7 +92,7 @@ func (p *Paths) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Deco
|
|||
}
|
||||
|
||||
switch k := tok.String(); {
|
||||
case isExtensionKey(k):
|
||||
case internal.IsExtensionKey(k):
|
||||
ext = nil
|
||||
if err := opts.UnmarshalNext(dec, &ext); err != nil {
|
||||
return err
|
||||
|
@ -114,7 +114,9 @@ func (p *Paths) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Deco
|
|||
p.Paths[k] = pi
|
||||
default:
|
||||
_, err := dec.ReadValue() // skip value
|
||||
return err
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
default:
|
||||
|
@ -124,6 +126,9 @@ func (p *Paths) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Deco
|
|||
|
||||
// MarshalJSON converts this items object to JSON
|
||||
func (p Paths) MarshalJSON() ([]byte, error) {
|
||||
if internal.UseOptimizedJSONMarshaling {
|
||||
return internal.DeterministicMarshal(p)
|
||||
}
|
||||
b1, err := json.Marshal(p.VendorExtensible)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -142,3 +147,18 @@ func (p Paths) MarshalJSON() ([]byte, error) {
|
|||
concated := swag.ConcatJSON(b1, b2)
|
||||
return concated, nil
|
||||
}
|
||||
|
||||
func (p Paths) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error {
|
||||
m := make(map[string]any, len(p.Extensions)+len(p.Paths))
|
||||
for k, v := range p.Extensions {
|
||||
if internal.IsExtensionKey(k) {
|
||||
m[k] = v
|
||||
}
|
||||
}
|
||||
for k, v := range p.Paths {
|
||||
if strings.HasPrefix(k, "/") {
|
||||
m[k] = v
|
||||
}
|
||||
}
|
||||
return opts.MarshalNext(enc, m)
|
||||
}
|
||||
|
|
|
@ -21,6 +21,8 @@ import (
|
|||
"path/filepath"
|
||||
|
||||
"github.com/go-openapi/jsonreference"
|
||||
|
||||
"k8s.io/kube-openapi/pkg/internal"
|
||||
)
|
||||
|
||||
// Refable is a struct for things that accept a $ref property
|
||||
|
@ -149,19 +151,5 @@ func (r *Ref) UnmarshalJSON(d []byte) error {
|
|||
}
|
||||
|
||||
func (r *Ref) fromMap(v map[string]interface{}) error {
|
||||
if v == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if vv, ok := v["$ref"]; ok {
|
||||
if str, ok := vv.(string); ok {
|
||||
ref, err := jsonreference.New(str)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*r = Ref{Ref: ref}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
return internal.JSONRefFromMap(&r.Ref, v)
|
||||
}
|
||||
|
|
|
@ -30,6 +30,15 @@ type ResponseProps struct {
|
|||
Examples map[string]interface{} `json:"examples,omitempty"`
|
||||
}
|
||||
|
||||
// Marshaling structure only, always edit along with corresponding
|
||||
// struct (or compilation will fail).
|
||||
type responsePropsOmitZero struct {
|
||||
Description string `json:"description,omitempty"`
|
||||
Schema *Schema `json:"schema,omitzero"`
|
||||
Headers map[string]Header `json:"headers,omitempty"`
|
||||
Examples map[string]interface{} `json:"examples,omitempty"`
|
||||
}
|
||||
|
||||
// Response describes a single response from an API Operation.
|
||||
//
|
||||
// For more information: http://goo.gl/8us55a#responseObject
|
||||
|
@ -68,23 +77,20 @@ func (r *Response) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.D
|
|||
return err
|
||||
}
|
||||
|
||||
r.Extensions = x.Extensions
|
||||
r.ResponseProps = x.ResponseProps
|
||||
|
||||
if err := r.Refable.Ref.fromMap(r.Extensions); err != nil {
|
||||
if err := r.Refable.Ref.fromMap(x.Extensions); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.Extensions.sanitize()
|
||||
if len(r.Extensions) == 0 {
|
||||
r.Extensions = nil
|
||||
}
|
||||
r.Extensions = internal.SanitizeExtensions(x.Extensions)
|
||||
r.ResponseProps = x.ResponseProps
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalJSON converts this items object to JSON
|
||||
func (r Response) MarshalJSON() ([]byte, error) {
|
||||
if internal.UseOptimizedJSONMarshaling {
|
||||
return internal.DeterministicMarshal(r)
|
||||
}
|
||||
b1, err := json.Marshal(r.ResponseProps)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -100,6 +106,18 @@ func (r Response) MarshalJSON() ([]byte, error) {
|
|||
return swag.ConcatJSON(b1, b2, b3), nil
|
||||
}
|
||||
|
||||
func (r Response) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error {
|
||||
var x struct {
|
||||
Ref string `json:"$ref,omitempty"`
|
||||
Extensions
|
||||
ResponseProps responsePropsOmitZero `json:",inline"`
|
||||
}
|
||||
x.Ref = r.Refable.Ref.String()
|
||||
x.Extensions = internal.SanitizeExtensions(r.Extensions)
|
||||
x.ResponseProps = responsePropsOmitZero(r.ResponseProps)
|
||||
return opts.MarshalNext(enc, x)
|
||||
}
|
||||
|
||||
// NewResponse creates a new response instance
|
||||
func NewResponse() *Response {
|
||||
return new(Response)
|
||||
|
|
|
@ -63,6 +63,9 @@ func (r *Responses) UnmarshalJSON(data []byte) error {
|
|||
|
||||
// MarshalJSON converts this items object to JSON
|
||||
func (r Responses) MarshalJSON() ([]byte, error) {
|
||||
if internal.UseOptimizedJSONMarshaling {
|
||||
return internal.DeterministicMarshal(r)
|
||||
}
|
||||
b1, err := json.Marshal(r.ResponsesProps)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -75,6 +78,25 @@ func (r Responses) MarshalJSON() ([]byte, error) {
|
|||
return concated, nil
|
||||
}
|
||||
|
||||
func (r Responses) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error {
|
||||
type ArbitraryKeys map[string]interface{}
|
||||
var x struct {
|
||||
ArbitraryKeys
|
||||
Default *Response `json:"default,omitempty"`
|
||||
}
|
||||
x.ArbitraryKeys = make(map[string]any, len(r.Extensions)+len(r.StatusCodeResponses))
|
||||
for k, v := range r.Extensions {
|
||||
if internal.IsExtensionKey(k) {
|
||||
x.ArbitraryKeys[k] = v
|
||||
}
|
||||
}
|
||||
for k, v := range r.StatusCodeResponses {
|
||||
x.ArbitraryKeys[strconv.Itoa(k)] = v
|
||||
}
|
||||
x.Default = r.Default
|
||||
return opts.MarshalNext(enc, x)
|
||||
}
|
||||
|
||||
// ResponsesProps describes all responses for an operation.
|
||||
// It tells what is the default response and maps all responses with a
|
||||
// HTTP status code.
|
||||
|
@ -148,7 +170,7 @@ func (r *Responses) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.
|
|||
return nil
|
||||
}
|
||||
switch k := tok.String(); {
|
||||
case isExtensionKey(k):
|
||||
case internal.IsExtensionKey(k):
|
||||
ext = nil
|
||||
if err := opts.UnmarshalNext(dec, &ext); err != nil {
|
||||
return err
|
||||
|
|
|
@ -196,6 +196,46 @@ type SchemaProps struct {
|
|||
Definitions Definitions `json:"definitions,omitempty"`
|
||||
}
|
||||
|
||||
// Marshaling structure only, always edit along with corresponding
|
||||
// struct (or compilation will fail).
|
||||
type schemaPropsOmitZero struct {
|
||||
ID string `json:"id,omitempty"`
|
||||
Ref Ref `json:"-"`
|
||||
Schema SchemaURL `json:"-"`
|
||||
Description string `json:"description,omitempty"`
|
||||
Type StringOrArray `json:"type,omitzero"`
|
||||
Nullable bool `json:"nullable,omitzero"`
|
||||
Format string `json:"format,omitempty"`
|
||||
Title string `json:"title,omitempty"`
|
||||
Default interface{} `json:"default,omitzero"`
|
||||
Maximum *float64 `json:"maximum,omitempty"`
|
||||
ExclusiveMaximum bool `json:"exclusiveMaximum,omitzero"`
|
||||
Minimum *float64 `json:"minimum,omitempty"`
|
||||
ExclusiveMinimum bool `json:"exclusiveMinimum,omitzero"`
|
||||
MaxLength *int64 `json:"maxLength,omitempty"`
|
||||
MinLength *int64 `json:"minLength,omitempty"`
|
||||
Pattern string `json:"pattern,omitempty"`
|
||||
MaxItems *int64 `json:"maxItems,omitempty"`
|
||||
MinItems *int64 `json:"minItems,omitempty"`
|
||||
UniqueItems bool `json:"uniqueItems,omitzero"`
|
||||
MultipleOf *float64 `json:"multipleOf,omitempty"`
|
||||
Enum []interface{} `json:"enum,omitempty"`
|
||||
MaxProperties *int64 `json:"maxProperties,omitempty"`
|
||||
MinProperties *int64 `json:"minProperties,omitempty"`
|
||||
Required []string `json:"required,omitempty"`
|
||||
Items *SchemaOrArray `json:"items,omitzero"`
|
||||
AllOf []Schema `json:"allOf,omitempty"`
|
||||
OneOf []Schema `json:"oneOf,omitempty"`
|
||||
AnyOf []Schema `json:"anyOf,omitempty"`
|
||||
Not *Schema `json:"not,omitzero"`
|
||||
Properties map[string]Schema `json:"properties,omitempty"`
|
||||
AdditionalProperties *SchemaOrBool `json:"additionalProperties,omitzero"`
|
||||
PatternProperties map[string]Schema `json:"patternProperties,omitempty"`
|
||||
Dependencies Dependencies `json:"dependencies,omitempty"`
|
||||
AdditionalItems *SchemaOrBool `json:"additionalItems,omitzero"`
|
||||
Definitions Definitions `json:"definitions,omitempty"`
|
||||
}
|
||||
|
||||
// SwaggerSchemaProps are additional properties supported by swagger schemas, but not JSON-schema (draft 4)
|
||||
type SwaggerSchemaProps struct {
|
||||
Discriminator string `json:"discriminator,omitempty"`
|
||||
|
@ -204,6 +244,15 @@ type SwaggerSchemaProps struct {
|
|||
Example interface{} `json:"example,omitempty"`
|
||||
}
|
||||
|
||||
// Marshaling structure only, always edit along with corresponding
|
||||
// struct (or compilation will fail).
|
||||
type swaggerSchemaPropsOmitZero struct {
|
||||
Discriminator string `json:"discriminator,omitempty"`
|
||||
ReadOnly bool `json:"readOnly,omitzero"`
|
||||
ExternalDocs *ExternalDocumentation `json:"externalDocs,omitzero"`
|
||||
Example interface{} `json:"example,omitempty"`
|
||||
}
|
||||
|
||||
// Schema the schema object allows the definition of input and output data types.
|
||||
// These types can be objects, but also primitives and arrays.
|
||||
// This object is based on the [JSON Schema Specification Draft 4](http://json-schema.org/)
|
||||
|
@ -434,6 +483,9 @@ func (s *Schema) WithExternalDocs(description, url string) *Schema {
|
|||
|
||||
// MarshalJSON marshal this to JSON
|
||||
func (s Schema) MarshalJSON() ([]byte, error) {
|
||||
if internal.UseOptimizedJSONMarshaling {
|
||||
return internal.DeterministicMarshal(s)
|
||||
}
|
||||
b1, err := json.Marshal(s.SchemaProps)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("schema props %v", err)
|
||||
|
@ -465,6 +517,31 @@ func (s Schema) MarshalJSON() ([]byte, error) {
|
|||
return swag.ConcatJSON(b1, b2, b3, b4, b5, b6), nil
|
||||
}
|
||||
|
||||
func (s Schema) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error {
|
||||
type ArbitraryKeys map[string]interface{}
|
||||
var x struct {
|
||||
ArbitraryKeys
|
||||
SchemaProps schemaPropsOmitZero `json:",inline"`
|
||||
SwaggerSchemaProps swaggerSchemaPropsOmitZero `json:",inline"`
|
||||
Schema string `json:"$schema,omitempty"`
|
||||
Ref string `json:"$ref,omitempty"`
|
||||
}
|
||||
x.ArbitraryKeys = make(map[string]any, len(s.Extensions)+len(s.ExtraProps))
|
||||
for k, v := range s.Extensions {
|
||||
if internal.IsExtensionKey(k) {
|
||||
x.ArbitraryKeys[k] = v
|
||||
}
|
||||
}
|
||||
for k, v := range s.ExtraProps {
|
||||
x.ArbitraryKeys[k] = v
|
||||
}
|
||||
x.SchemaProps = schemaPropsOmitZero(s.SchemaProps)
|
||||
x.SwaggerSchemaProps = swaggerSchemaPropsOmitZero(s.SwaggerSchemaProps)
|
||||
x.Ref = s.Ref.String()
|
||||
x.Schema = string(s.Schema)
|
||||
return opts.MarshalNext(enc, x)
|
||||
}
|
||||
|
||||
// UnmarshalJSON marshal this from JSON
|
||||
func (s *Schema) UnmarshalJSON(data []byte) error {
|
||||
if internal.UseOptimizedJSONUnmarshaling {
|
||||
|
@ -547,7 +624,7 @@ func (s *Schema) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Dec
|
|||
}
|
||||
|
||||
s.ExtraProps = x.Extensions.sanitizeWithExtra()
|
||||
s.VendorExtensible.Extensions = x.Extensions
|
||||
s.Extensions = internal.SanitizeExtensions(x.Extensions)
|
||||
s.SchemaProps = x.SchemaProps
|
||||
s.SwaggerSchemaProps = x.SwaggerSchemaProps
|
||||
return nil
|
||||
|
|
|
@ -18,6 +18,7 @@ import (
|
|||
"encoding/json"
|
||||
|
||||
"github.com/go-openapi/swag"
|
||||
"k8s.io/kube-openapi/pkg/internal"
|
||||
jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json"
|
||||
)
|
||||
|
||||
|
@ -45,6 +46,9 @@ type SecurityScheme struct {
|
|||
|
||||
// MarshalJSON marshal this to JSON
|
||||
func (s SecurityScheme) MarshalJSON() ([]byte, error) {
|
||||
if internal.UseOptimizedJSONMarshaling {
|
||||
return internal.DeterministicMarshal(s)
|
||||
}
|
||||
b1, err := json.Marshal(s.SecuritySchemeProps)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -56,6 +60,16 @@ func (s SecurityScheme) MarshalJSON() ([]byte, error) {
|
|||
return swag.ConcatJSON(b1, b2), nil
|
||||
}
|
||||
|
||||
func (s SecurityScheme) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error {
|
||||
var x struct {
|
||||
Extensions
|
||||
SecuritySchemeProps
|
||||
}
|
||||
x.Extensions = internal.SanitizeExtensions(s.Extensions)
|
||||
x.SecuritySchemeProps = s.SecuritySchemeProps
|
||||
return opts.MarshalNext(enc, x)
|
||||
}
|
||||
|
||||
// UnmarshalJSON marshal this from JSON
|
||||
func (s *SecurityScheme) UnmarshalJSON(data []byte) error {
|
||||
if err := json.Unmarshal(data, &s.SecuritySchemeProps); err != nil {
|
||||
|
@ -72,11 +86,7 @@ func (s *SecurityScheme) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *js
|
|||
if err := opts.UnmarshalNext(dec, &x); err != nil {
|
||||
return err
|
||||
}
|
||||
x.Extensions.sanitize()
|
||||
if len(x.Extensions) == 0 {
|
||||
x.Extensions = nil
|
||||
}
|
||||
s.VendorExtensible.Extensions = x.Extensions
|
||||
s.Extensions = internal.SanitizeExtensions(x.Extensions)
|
||||
s.SecuritySchemeProps = x.SecuritySchemeProps
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -35,6 +35,9 @@ type Swagger struct {
|
|||
|
||||
// MarshalJSON marshals this swagger structure to json
|
||||
func (s Swagger) MarshalJSON() ([]byte, error) {
|
||||
if internal.UseOptimizedJSONMarshaling {
|
||||
return internal.DeterministicMarshal(s)
|
||||
}
|
||||
b1, err := json.Marshal(s.SwaggerProps)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -46,12 +49,22 @@ func (s Swagger) MarshalJSON() ([]byte, error) {
|
|||
return swag.ConcatJSON(b1, b2), nil
|
||||
}
|
||||
|
||||
// MarshalJSON marshals this swagger structure to json
|
||||
func (s Swagger) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error {
|
||||
var x struct {
|
||||
Extensions
|
||||
SwaggerProps
|
||||
}
|
||||
x.Extensions = internal.SanitizeExtensions(s.Extensions)
|
||||
x.SwaggerProps = s.SwaggerProps
|
||||
return opts.MarshalNext(enc, x)
|
||||
}
|
||||
|
||||
// UnmarshalJSON unmarshals a swagger spec from json
|
||||
func (s *Swagger) UnmarshalJSON(data []byte) error {
|
||||
if internal.UseOptimizedJSONUnmarshaling {
|
||||
return jsonv2.Unmarshal(data, s)
|
||||
}
|
||||
|
||||
var sw Swagger
|
||||
if err := json.Unmarshal(data, &sw.SwaggerProps); err != nil {
|
||||
return err
|
||||
|
@ -75,15 +88,8 @@ func (s *Swagger) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.De
|
|||
if err := opts.UnmarshalNext(dec, &x); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.Extensions = x.Extensions
|
||||
s.Extensions = internal.SanitizeExtensions(x.Extensions)
|
||||
s.SwaggerProps = x.SwaggerProps
|
||||
|
||||
s.Extensions.sanitize()
|
||||
if len(s.Extensions) == 0 {
|
||||
s.Extensions = nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -126,6 +132,9 @@ var jsFalse = []byte("false")
|
|||
|
||||
// MarshalJSON convert this object to JSON
|
||||
func (s SchemaOrBool) MarshalJSON() ([]byte, error) {
|
||||
if internal.UseOptimizedJSONMarshaling {
|
||||
return internal.DeterministicMarshal(s)
|
||||
}
|
||||
if s.Schema != nil {
|
||||
return json.Marshal(s.Schema)
|
||||
}
|
||||
|
@ -136,6 +145,18 @@ func (s SchemaOrBool) MarshalJSON() ([]byte, error) {
|
|||
return jsTrue, nil
|
||||
}
|
||||
|
||||
// MarshalJSON convert this object to JSON
|
||||
func (s SchemaOrBool) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error {
|
||||
if s.Schema != nil {
|
||||
return opts.MarshalNext(enc, s.Schema)
|
||||
}
|
||||
|
||||
if s.Schema == nil && !s.Allows {
|
||||
return enc.WriteToken(jsonv2.False)
|
||||
}
|
||||
return enc.WriteToken(jsonv2.True)
|
||||
}
|
||||
|
||||
// UnmarshalJSON converts this bool or schema object from a JSON structure
|
||||
func (s *SchemaOrBool) UnmarshalJSON(data []byte) error {
|
||||
if internal.UseOptimizedJSONUnmarshaling {
|
||||
|
@ -143,15 +164,15 @@ func (s *SchemaOrBool) UnmarshalJSON(data []byte) error {
|
|||
}
|
||||
|
||||
var nw SchemaOrBool
|
||||
if len(data) >= 4 {
|
||||
if data[0] == '{' {
|
||||
var sch Schema
|
||||
if err := json.Unmarshal(data, &sch); err != nil {
|
||||
return err
|
||||
}
|
||||
nw.Schema = &sch
|
||||
if len(data) > 0 && data[0] == '{' {
|
||||
var sch Schema
|
||||
if err := json.Unmarshal(data, &sch); err != nil {
|
||||
return err
|
||||
}
|
||||
nw.Allows = !(data[0] == 'f' && data[1] == 'a' && data[2] == 'l' && data[3] == 's' && data[4] == 'e')
|
||||
nw.Schema = &sch
|
||||
nw.Allows = true
|
||||
} else {
|
||||
json.Unmarshal(data, &nw.Allows)
|
||||
}
|
||||
*s = nw
|
||||
return nil
|
||||
|
@ -185,6 +206,9 @@ type SchemaOrStringArray struct {
|
|||
|
||||
// MarshalJSON converts this schema object or array into JSON structure
|
||||
func (s SchemaOrStringArray) MarshalJSON() ([]byte, error) {
|
||||
if internal.UseOptimizedJSONMarshaling {
|
||||
return internal.DeterministicMarshal(s)
|
||||
}
|
||||
if len(s.Property) > 0 {
|
||||
return json.Marshal(s.Property)
|
||||
}
|
||||
|
@ -194,6 +218,17 @@ func (s SchemaOrStringArray) MarshalJSON() ([]byte, error) {
|
|||
return []byte("null"), nil
|
||||
}
|
||||
|
||||
// MarshalJSON converts this schema object or array into JSON structure
|
||||
func (s SchemaOrStringArray) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error {
|
||||
if len(s.Property) > 0 {
|
||||
return opts.MarshalNext(enc, s.Property)
|
||||
}
|
||||
if s.Schema != nil {
|
||||
return opts.MarshalNext(enc, s.Schema)
|
||||
}
|
||||
return enc.WriteToken(jsonv2.Null)
|
||||
}
|
||||
|
||||
// UnmarshalJSON converts this schema object or array from a JSON structure
|
||||
func (s *SchemaOrStringArray) UnmarshalJSON(data []byte) error {
|
||||
if internal.UseOptimizedJSONUnmarshaling {
|
||||
|
@ -347,12 +382,23 @@ func (s *SchemaOrArray) ContainsType(name string) bool {
|
|||
|
||||
// MarshalJSON converts this schema object or array into JSON structure
|
||||
func (s SchemaOrArray) MarshalJSON() ([]byte, error) {
|
||||
if len(s.Schemas) > 0 {
|
||||
if internal.UseOptimizedJSONMarshaling {
|
||||
return internal.DeterministicMarshal(s)
|
||||
}
|
||||
if s.Schemas != nil {
|
||||
return json.Marshal(s.Schemas)
|
||||
}
|
||||
return json.Marshal(s.Schema)
|
||||
}
|
||||
|
||||
// MarshalJSON converts this schema object or array into JSON structure
|
||||
func (s SchemaOrArray) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error {
|
||||
if s.Schemas != nil {
|
||||
return opts.MarshalNext(enc, s.Schemas)
|
||||
}
|
||||
return opts.MarshalNext(enc, s.Schema)
|
||||
}
|
||||
|
||||
// UnmarshalJSON converts this schema object or array from a JSON structure
|
||||
func (s *SchemaOrArray) UnmarshalJSON(data []byte) error {
|
||||
if internal.UseOptimizedJSONUnmarshaling {
|
||||
|
|
|
@ -41,6 +41,9 @@ type Tag struct {
|
|||
|
||||
// MarshalJSON marshal this to JSON
|
||||
func (t Tag) MarshalJSON() ([]byte, error) {
|
||||
if internal.UseOptimizedJSONMarshaling {
|
||||
return internal.DeterministicMarshal(t)
|
||||
}
|
||||
b1, err := json.Marshal(t.TagProps)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -52,6 +55,16 @@ func (t Tag) MarshalJSON() ([]byte, error) {
|
|||
return swag.ConcatJSON(b1, b2), nil
|
||||
}
|
||||
|
||||
func (t Tag) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error {
|
||||
var x struct {
|
||||
Extensions
|
||||
TagProps
|
||||
}
|
||||
x.Extensions = internal.SanitizeExtensions(t.Extensions)
|
||||
x.TagProps = t.TagProps
|
||||
return opts.MarshalNext(enc, x)
|
||||
}
|
||||
|
||||
// UnmarshalJSON marshal this from JSON
|
||||
func (t *Tag) UnmarshalJSON(data []byte) error {
|
||||
if internal.UseOptimizedJSONUnmarshaling {
|
||||
|
@ -72,11 +85,7 @@ func (t *Tag) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decode
|
|||
if err := opts.UnmarshalNext(dec, &x); err != nil {
|
||||
return err
|
||||
}
|
||||
x.Extensions.sanitize()
|
||||
if len(x.Extensions) == 0 {
|
||||
x.Extensions = nil
|
||||
}
|
||||
t.VendorExtensible.Extensions = x.Extensions
|
||||
t.Extensions = internal.SanitizeExtensions(x.Extensions)
|
||||
t.TagProps = x.TagProps
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -0,0 +1,201 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
|
@ -0,0 +1,46 @@
|
|||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package custom_metrics
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/conversion"
|
||||
)
|
||||
|
||||
func Convert_v1_ObjectReference_To_custom_metrics_ObjectReference(in *v1.ObjectReference, out *ObjectReference, s conversion.Scope) error {
|
||||
out.APIVersion = in.APIVersion
|
||||
|
||||
out.Kind = in.Kind
|
||||
out.Namespace = in.Namespace
|
||||
out.Name = in.Name
|
||||
out.UID = in.UID
|
||||
out.ResourceVersion = in.ResourceVersion
|
||||
out.FieldPath = in.FieldPath
|
||||
return nil
|
||||
}
|
||||
|
||||
func Convert_custom_metrics_ObjectReference_To_v1_ObjectReference(in *ObjectReference, out *v1.ObjectReference, s conversion.Scope) error {
|
||||
out.APIVersion = in.APIVersion
|
||||
|
||||
out.Kind = in.Kind
|
||||
out.Namespace = in.Namespace
|
||||
out.Name = in.Name
|
||||
out.UID = in.UID
|
||||
out.ResourceVersion = in.ResourceVersion
|
||||
out.FieldPath = in.FieldPath
|
||||
return nil
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue