bump k8s to v1.34 libs

This commit is contained in:
Dave Protasowski 2025-11-03 13:10:11 -05:00
parent c5e9118d59
commit 6bbf09f0f4
663 changed files with 61637 additions and 25404 deletions

View File

@ -1,40 +0,0 @@
/*
Copyright 2022 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package fake
import (
context "context"
fake "knative.dev/pkg/client/injection/kube/informers/factory/fake"
ipaddress "knative.dev/pkg/client/injection/kube/informers/networking/v1alpha1/ipaddress"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
)
var Get = ipaddress.Get
func init() {
injection.Fake.RegisterInformer(withInformer)
}
func withInformer(ctx context.Context) (context.Context, controller.Informer) {
f := fake.Get(ctx)
inf := f.Networking().V1alpha1().IPAddresses()
return context.WithValue(ctx, ipaddress.Key{}, inf), inf.Informer()
}

View File

@ -1,52 +0,0 @@
/*
Copyright 2022 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package fake
import (
context "context"
factoryfiltered "knative.dev/pkg/client/injection/kube/informers/factory/filtered"
filtered "knative.dev/pkg/client/injection/kube/informers/networking/v1alpha1/ipaddress/filtered"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
var Get = filtered.Get
func init() {
injection.Fake.RegisterFilteredInformers(withInformer)
}
func withInformer(ctx context.Context) (context.Context, []controller.Informer) {
untyped := ctx.Value(factoryfiltered.LabelKey{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch labelkey from context.")
}
labelSelectors := untyped.([]string)
infs := []controller.Informer{}
for _, selector := range labelSelectors {
f := factoryfiltered.Get(ctx, selector)
inf := f.Networking().V1alpha1().IPAddresses()
ctx = context.WithValue(ctx, filtered.Key{Selector: selector}, inf)
infs = append(infs, inf.Informer())
}
return ctx, infs
}

View File

@ -1,65 +0,0 @@
/*
Copyright 2022 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package filtered
import (
context "context"
v1alpha1 "k8s.io/client-go/informers/networking/v1alpha1"
filtered "knative.dev/pkg/client/injection/kube/informers/factory/filtered"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
func init() {
injection.Default.RegisterFilteredInformers(withInformer)
}
// Key is used for associating the Informer inside the context.Context.
type Key struct {
Selector string
}
func withInformer(ctx context.Context) (context.Context, []controller.Informer) {
untyped := ctx.Value(filtered.LabelKey{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch labelkey from context.")
}
labelSelectors := untyped.([]string)
infs := []controller.Informer{}
for _, selector := range labelSelectors {
f := filtered.Get(ctx, selector)
inf := f.Networking().V1alpha1().IPAddresses()
ctx = context.WithValue(ctx, Key{Selector: selector}, inf)
infs = append(infs, inf.Informer())
}
return ctx, infs
}
// Get extracts the typed informer from the context.
func Get(ctx context.Context, selector string) v1alpha1.IPAddressInformer {
untyped := ctx.Value(Key{Selector: selector})
if untyped == nil {
logging.FromContext(ctx).Panicf(
"Unable to fetch k8s.io/client-go/informers/networking/v1alpha1.IPAddressInformer with selector %s from context.", selector)
}
return untyped.(v1alpha1.IPAddressInformer)
}

View File

@ -1,52 +0,0 @@
/*
Copyright 2022 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package ipaddress
import (
context "context"
v1alpha1 "k8s.io/client-go/informers/networking/v1alpha1"
factory "knative.dev/pkg/client/injection/kube/informers/factory"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
func init() {
injection.Default.RegisterInformer(withInformer)
}
// Key is used for associating the Informer inside the context.Context.
type Key struct{}
func withInformer(ctx context.Context) (context.Context, controller.Informer) {
f := factory.Get(ctx)
inf := f.Networking().V1alpha1().IPAddresses()
return context.WithValue(ctx, Key{}, inf), inf.Informer()
}
// Get extracts the typed informer from the context.
func Get(ctx context.Context) v1alpha1.IPAddressInformer {
untyped := ctx.Value(Key{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch k8s.io/client-go/informers/networking/v1alpha1.IPAddressInformer from context.")
}
return untyped.(v1alpha1.IPAddressInformer)
}

View File

@ -1,40 +0,0 @@
/*
Copyright 2022 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package fake
import (
context "context"
fake "knative.dev/pkg/client/injection/kube/informers/factory/fake"
servicecidr "knative.dev/pkg/client/injection/kube/informers/networking/v1alpha1/servicecidr"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
)
var Get = servicecidr.Get
func init() {
injection.Fake.RegisterInformer(withInformer)
}
func withInformer(ctx context.Context) (context.Context, controller.Informer) {
f := fake.Get(ctx)
inf := f.Networking().V1alpha1().ServiceCIDRs()
return context.WithValue(ctx, servicecidr.Key{}, inf), inf.Informer()
}

View File

@ -1,52 +0,0 @@
/*
Copyright 2022 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package fake
import (
context "context"
factoryfiltered "knative.dev/pkg/client/injection/kube/informers/factory/filtered"
filtered "knative.dev/pkg/client/injection/kube/informers/networking/v1alpha1/servicecidr/filtered"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
var Get = filtered.Get
func init() {
injection.Fake.RegisterFilteredInformers(withInformer)
}
func withInformer(ctx context.Context) (context.Context, []controller.Informer) {
untyped := ctx.Value(factoryfiltered.LabelKey{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch labelkey from context.")
}
labelSelectors := untyped.([]string)
infs := []controller.Informer{}
for _, selector := range labelSelectors {
f := factoryfiltered.Get(ctx, selector)
inf := f.Networking().V1alpha1().ServiceCIDRs()
ctx = context.WithValue(ctx, filtered.Key{Selector: selector}, inf)
infs = append(infs, inf.Informer())
}
return ctx, infs
}

View File

@ -1,65 +0,0 @@
/*
Copyright 2022 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package filtered
import (
context "context"
v1alpha1 "k8s.io/client-go/informers/networking/v1alpha1"
filtered "knative.dev/pkg/client/injection/kube/informers/factory/filtered"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
func init() {
injection.Default.RegisterFilteredInformers(withInformer)
}
// Key is used for associating the Informer inside the context.Context.
type Key struct {
Selector string
}
func withInformer(ctx context.Context) (context.Context, []controller.Informer) {
untyped := ctx.Value(filtered.LabelKey{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch labelkey from context.")
}
labelSelectors := untyped.([]string)
infs := []controller.Informer{}
for _, selector := range labelSelectors {
f := filtered.Get(ctx, selector)
inf := f.Networking().V1alpha1().ServiceCIDRs()
ctx = context.WithValue(ctx, Key{Selector: selector}, inf)
infs = append(infs, inf.Informer())
}
return ctx, infs
}
// Get extracts the typed informer from the context.
func Get(ctx context.Context, selector string) v1alpha1.ServiceCIDRInformer {
untyped := ctx.Value(Key{Selector: selector})
if untyped == nil {
logging.FromContext(ctx).Panicf(
"Unable to fetch k8s.io/client-go/informers/networking/v1alpha1.ServiceCIDRInformer with selector %s from context.", selector)
}
return untyped.(v1alpha1.ServiceCIDRInformer)
}

View File

@ -1,52 +0,0 @@
/*
Copyright 2022 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package servicecidr
import (
context "context"
v1alpha1 "k8s.io/client-go/informers/networking/v1alpha1"
factory "knative.dev/pkg/client/injection/kube/informers/factory"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
func init() {
injection.Default.RegisterInformer(withInformer)
}
// Key is used for associating the Informer inside the context.Context.
type Key struct{}
func withInformer(ctx context.Context) (context.Context, controller.Informer) {
f := factory.Get(ctx)
inf := f.Networking().V1alpha1().ServiceCIDRs()
return context.WithValue(ctx, Key{}, inf), inf.Informer()
}
// Get extracts the typed informer from the context.
func Get(ctx context.Context) v1alpha1.ServiceCIDRInformer {
untyped := ctx.Value(Key{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch k8s.io/client-go/informers/networking/v1alpha1.ServiceCIDRInformer from context.")
}
return untyped.(v1alpha1.ServiceCIDRInformer)
}

28
go.mod
View File

@ -38,14 +38,14 @@ require (
golang.org/x/tools v0.38.0
gomodules.xyz/jsonpatch/v2 v2.5.0
gopkg.in/yaml.v3 v3.0.1
k8s.io/api v0.33.5
k8s.io/apiextensions-apiserver v0.33.5
k8s.io/apimachinery v0.33.5
k8s.io/client-go v0.33.5
k8s.io/code-generator v0.33.5
k8s.io/gengo/v2 v2.0.0-20250207200755-1244d31929d7
k8s.io/api v0.34.1
k8s.io/apiextensions-apiserver v0.34.1
k8s.io/apimachinery v0.34.1
k8s.io/client-go v0.34.1
k8s.io/code-generator v0.34.1
k8s.io/gengo/v2 v2.0.0-20250604051438-85fd79dbfd9f
k8s.io/klog/v2 v2.130.1
k8s.io/utils v0.0.0-20241210054802-24370beab758
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397
knative.dev/hack v0.0.0-20251021013703-4fae78067103
sigs.k8s.io/randfill v1.0.0
sigs.k8s.io/yaml v1.6.0
@ -55,26 +55,27 @@ require (
github.com/beorn7/perks v1.0.1 // indirect
github.com/cenkalti/backoff/v5 v5.0.3 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/emicklei/go-restful/v3 v3.12.1 // indirect
github.com/emicklei/go-restful/v3 v3.12.2 // indirect
github.com/evanphx/json-patch v5.9.0+incompatible // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
github.com/fxamacker/cbor/v2 v2.9.0 // indirect
github.com/go-logr/logr v1.4.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-openapi/jsonpointer v0.21.0 // indirect
github.com/go-openapi/jsonreference v0.21.0 // indirect
github.com/go-openapi/swag v0.23.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/google/gnostic-models v0.6.9 // indirect
github.com/google/gnostic-models v0.7.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect
github.com/influxdata/tdigest v0.0.1 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/mailru/easyjson v0.9.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_model v0.6.2 // indirect
github.com/prometheus/common v0.66.1 // indirect
github.com/prometheus/procfs v0.17.0 // indirect
@ -85,6 +86,7 @@ require (
go.opentelemetry.io/proto/otlp v1.7.1 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.yaml.in/yaml/v2 v2.4.2 // indirect
go.yaml.in/yaml/v3 v3.0.4 // indirect
golang.org/x/exp v0.0.0-20250210185358-939b2ce775ac // indirect
golang.org/x/mod v0.29.0 // indirect
golang.org/x/oauth2 v0.30.0 // indirect
@ -99,7 +101,7 @@ require (
google.golang.org/protobuf v1.36.8 // indirect
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect
k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect
)

54
go.sum
View File

@ -16,16 +16,16 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgryski/go-gk v0.0.0-20200319235926-a69029f61654 h1:XOPLOMn/zT4jIgxfxSsoXPxkrzz0FaCHwp33x5POJ+Q=
github.com/dgryski/go-gk v0.0.0-20200319235926-a69029f61654/go.mod h1:qm+vckxRlDt0aOla0RYJJVeqHZlWfOm2UIxHaqPB46E=
github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU=
github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU=
github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls=
github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU=
github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=
github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
@ -47,10 +47,9 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw=
github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw=
github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo=
github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
@ -87,8 +86,9 @@ github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUt
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8=
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM=
@ -259,35 +259,33 @@ gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
k8s.io/api v0.33.5 h1:YR+uhYj05jdRpcksv8kjSliW+v9hwXxn6Cv10aR8Juw=
k8s.io/api v0.33.5/go.mod h1:2gzShdwXKT5yPGiqrTrn/U/nLZ7ZyT4WuAj3XGDVgVs=
k8s.io/apiextensions-apiserver v0.33.5 h1:93NZh6rmrcamX/tfv/dZrTsMiQX69ufANmDcKPEgSeA=
k8s.io/apiextensions-apiserver v0.33.5/go.mod h1:JIbyQnNlu6nQa7b1vgFi51pmlXOk8mdn0WJwUJnz/7U=
k8s.io/apimachinery v0.33.5 h1:NiT64hln4TQXeYR18/ES39OrNsjGz8NguxsBgp+6QIo=
k8s.io/apimachinery v0.33.5/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM=
k8s.io/client-go v0.33.5 h1:I8BdmQGxInpkMEnJvV6iG7dqzP3JRlpZZlib3OMFc3o=
k8s.io/client-go v0.33.5/go.mod h1:W8PQP4MxbM4ypgagVE65mUUqK1/ByQkSALF9tzuQ6u0=
k8s.io/code-generator v0.33.5 h1:KwkOvhwAaorjSwF2MQhhdhL3i8bBmAal/TWhX67kdHw=
k8s.io/code-generator v0.33.5/go.mod h1:Ra+sdZquRakeTGcEnQAPw6BmlZ92IvxwQQTX/XOvOIE=
k8s.io/gengo/v2 v2.0.0-20250207200755-1244d31929d7 h1:2OX19X59HxDprNCVrWi6jb7LW1PoqTlYqEq5H2oetog=
k8s.io/gengo/v2 v2.0.0-20250207200755-1244d31929d7/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU=
k8s.io/api v0.34.1 h1:jC+153630BMdlFukegoEL8E/yT7aLyQkIVuwhmwDgJM=
k8s.io/api v0.34.1/go.mod h1:SB80FxFtXn5/gwzCoN6QCtPD7Vbu5w2n1S0J5gFfTYk=
k8s.io/apiextensions-apiserver v0.34.1 h1:NNPBva8FNAPt1iSVwIE0FsdrVriRXMsaWFMqJbII2CI=
k8s.io/apiextensions-apiserver v0.34.1/go.mod h1:hP9Rld3zF5Ay2Of3BeEpLAToP+l4s5UlxiHfqRaRcMc=
k8s.io/apimachinery v0.34.1 h1:dTlxFls/eikpJxmAC7MVE8oOeP1zryV7iRyIjB0gky4=
k8s.io/apimachinery v0.34.1/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw=
k8s.io/client-go v0.34.1 h1:ZUPJKgXsnKwVwmKKdPfw4tB58+7/Ik3CrjOEhsiZ7mY=
k8s.io/client-go v0.34.1/go.mod h1:kA8v0FP+tk6sZA0yKLRG67LWjqufAoSHA2xVGKw9Of8=
k8s.io/code-generator v0.34.1 h1:WpphT26E+j7tEgIUfFr5WfbJrktCGzB3JoJH9149xYc=
k8s.io/code-generator v0.34.1/go.mod h1:DeWjekbDnJWRwpw3s0Jat87c+e0TgkxoR4ar608yqvg=
k8s.io/gengo/v2 v2.0.0-20250604051438-85fd79dbfd9f h1:SLb+kxmzfA87x4E4brQzB33VBbT2+x7Zq9ROIHmGn9Q=
k8s.io/gengo/v2 v2.0.0-20250604051438-85fd79dbfd9f/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4=
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8=
k8s.io/utils v0.0.0-20241210054802-24370beab758 h1:sdbE21q2nlQtFh65saZY+rRM6x6aJJI8IUa1AmH/qa0=
k8s.io/utils v0.0.0-20241210054802-24370beab758/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA=
k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts=
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y=
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
knative.dev/hack v0.0.0-20251021013703-4fae78067103 h1:j96YY5CLCTytWZsGVzixVvNasRbrni2NuXI54R+IPvA=
knative.dev/hack v0.0.0-20251021013703-4fae78067103/go.mod h1:L5RzHgbvam0u8QFHfzCX6MKxu/a/gIGEdaRBqNiVbl0=
pgregory.net/rapid v1.1.0 h1:CMa0sjHSru3puNx+J0MIAuiiEV4N0qj8/cMWGBBCsjw=
pgregory.net/rapid v1.1.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04=
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE=
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc=
sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps=
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco=
sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE=
sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs=
sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4=

View File

@ -39,6 +39,7 @@ K8S_TYPES=$(find ./vendor/k8s.io/api -type d -path '*/*/*/*/*/*' | cut -d'/' -f
grep -v "imagepolicy:" | \
grep -v "resource:" | \
grep -v "storagemigration:" \
grep -v "alpha" \
)
OUTPUT_PKG="knative.dev/pkg/client/injection/kube" \

View File

@ -1,5 +1,8 @@
# Change history of go-restful
## [v3.12.2] - 2025-02-21
- allow empty payloads in post,put,patch, issue #580 ( thanks @liggitt, Jordan Liggitt)
## [v3.12.1] - 2024-05-28
@ -18,7 +21,7 @@
- fix by restoring custom JSON handler functions (Mike Beaumont #540)
## [v3.12.0] - 2023-08-19
## [v3.11.0] - 2023-08-19
- restored behavior as <= v3.9.0 with option to change path strategy using TrimRightSlashEnabled.

View File

@ -3,7 +3,7 @@ go-restful
package for building REST-style Web Services using Google Go
[![Go Report Card](https://goreportcard.com/badge/github.com/emicklei/go-restful)](https://goreportcard.com/report/github.com/emicklei/go-restful)
[![GoDoc](https://godoc.org/github.com/emicklei/go-restful?status.svg)](https://pkg.go.dev/github.com/emicklei/go-restful)
[![Go Reference](https://pkg.go.dev/badge/github.com/emicklei/go-restful.svg)](https://pkg.go.dev/github.com/emicklei/go-restful/v3)
[![codecov](https://codecov.io/gh/emicklei/go-restful/branch/master/graph/badge.svg)](https://codecov.io/gh/emicklei/go-restful)
- [Code examples use v3](https://github.com/emicklei/go-restful/tree/v3/examples)

View File

@ -65,7 +65,7 @@ func (RouterJSR311) extractParams(pathExpr *pathExpression, matches []string) ma
return params
}
// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2
// https://download.oracle.com/otndocs/jcp/jaxrs-1.1-mrel-eval-oth-JSpec/
func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*Route, error) {
candidates := make([]*Route, 0, 8)
for i, each := range routes {
@ -126,9 +126,7 @@ func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*R
if trace {
traceLogger.Printf("no Route found (from %d) that matches HTTP Content-Type: %s\n", len(previous), contentType)
}
if httpRequest.ContentLength > 0 {
return nil, NewError(http.StatusUnsupportedMediaType, "415: Unsupported Media Type")
}
return nil, NewError(http.StatusUnsupportedMediaType, "415: Unsupported Media Type")
}
// accept
@ -151,20 +149,9 @@ func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*R
for _, candidate := range previous {
available = append(available, candidate.Produces...)
}
// if POST,PUT,PATCH without body
method, length := httpRequest.Method, httpRequest.Header.Get("Content-Length")
if (method == http.MethodPost ||
method == http.MethodPut ||
method == http.MethodPatch) && (length == "" || length == "0") {
return nil, NewError(
http.StatusUnsupportedMediaType,
fmt.Sprintf("415: Unsupported Media Type\n\nAvailable representations: %s", strings.Join(available, ", ")),
)
}
return nil, NewError(
http.StatusNotAcceptable,
fmt.Sprintf("406: Not Acceptable\n\nAvailable representations: %s", strings.Join(available, ", ")),
)
fmt.Sprintf("406: Not Acceptable\n\nAvailable representations: %s", strings.Join(available, ", ")))
}
// return r.bestMatchByMedia(outputMediaOk, contentType, accept), nil
return candidates[0], nil

View File

@ -111,6 +111,8 @@ func (r Route) matchesAccept(mimeTypesWithQuality string) bool {
}
// Return whether this Route can consume content with a type specified by mimeTypes (can be empty).
// If the route does not specify Consumes then return true (*/*).
// If no content type is set then return true for GET,HEAD,OPTIONS,DELETE and TRACE.
func (r Route) matchesContentType(mimeTypes string) bool {
if len(r.Consumes) == 0 {

View File

@ -1,30 +1,31 @@
# CBOR Codec in Go
<!-- [![](https://github.com/fxamacker/images/raw/master/cbor/v2.5.0/fxamacker_cbor_banner.png)](#cbor-library-in-go) -->
<h1>CBOR Codec <a href="https://pkg.go.dev/github.com/fxamacker/cbor/v2"><img src="https://raw.githubusercontent.com/fxamacker/images/refs/heads/master/cbor/go-logo-blue.svg" alt="Go logo" style="height: 1em;" align="right"></a></h1>
[fxamacker/cbor](https://github.com/fxamacker/cbor) is a library for encoding and decoding [CBOR](https://www.rfc-editor.org/info/std94) and [CBOR Sequences](https://www.rfc-editor.org/rfc/rfc8742.html).
CBOR is a [trusted alternative](https://www.rfc-editor.org/rfc/rfc8949.html#name-comparison-of-other-binary-) to JSON, MessagePack, Protocol Buffers, etc.&nbsp; CBOR is an Internet&nbsp;Standard defined by [IETF&nbsp;STD&nbsp;94 (RFC&nbsp;8949)](https://www.rfc-editor.org/info/std94) and is designed to be relevant for decades.
`fxamacker/cbor` is used in projects by Arm Ltd., Cisco, EdgeX&nbsp;Foundry, Flow Foundation, Fraunhofer&#8209;AISEC, Kubernetes, Let's&nbsp;Encrypt (ISRG), Linux&nbsp;Foundation, Microsoft, Mozilla, Oasis&nbsp;Protocol, Tailscale, Teleport, [etc](https://github.com/fxamacker/cbor#who-uses-fxamackercbor).
`fxamacker/cbor` is used in projects by Arm Ltd., EdgeX&nbsp;Foundry, Flow Foundation, Fraunhofer&#8209;AISEC, IBM, Kubernetes[*](https://github.com/search?q=org%3Akubernetes%20fxamacker%2Fcbor&type=code), Let's&nbsp;Encrypt, Linux&nbsp;Foundation, Microsoft, Oasis&nbsp;Protocol, Red Hat[*](https://github.com/search?q=org%3Aopenshift+fxamacker%2Fcbor&type=code), Tailscale[*](https://github.com/search?q=org%3Atailscale+fxamacker%2Fcbor&type=code), Veraison[*](https://github.com/search?q=org%3Averaison+fxamacker%2Fcbor&type=code), [etc](https://github.com/fxamacker/cbor#who-uses-fxamackercbor).
See [Quick&nbsp;Start](#quick-start) and [Releases](https://github.com/fxamacker/cbor/releases/). 🆕 `UnmarshalFirst` and `DiagnoseFirst` can decode CBOR Sequences. `cbor.MarshalToBuffer()` and `UserBufferEncMode` accepts user-specified buffer.
See [Quick&nbsp;Start](#quick-start) and [Releases](https://github.com/fxamacker/cbor/releases/). 🆕 `UnmarshalFirst` and `DiagnoseFirst` can decode CBOR Sequences. `MarshalToBuffer` and `UserBufferEncMode` accepts user-specified buffer.
## fxamacker/cbor
[![](https://github.com/fxamacker/cbor/workflows/ci/badge.svg)](https://github.com/fxamacker/cbor/actions?query=workflow%3Aci)
[![](https://github.com/fxamacker/cbor/workflows/cover%20%E2%89%A596%25/badge.svg)](https://github.com/fxamacker/cbor/actions?query=workflow%3A%22cover+%E2%89%A596%25%22)
[![](https://github.com/fxamacker/cbor/workflows/cover%20%E2%89%A597%25/badge.svg)](https://github.com/fxamacker/cbor/actions?query=workflow%3A%22cover+%E2%89%A597%25%22)
[![CodeQL](https://github.com/fxamacker/cbor/actions/workflows/codeql-analysis.yml/badge.svg)](https://github.com/fxamacker/cbor/actions/workflows/codeql-analysis.yml)
[![](https://img.shields.io/badge/fuzzing-passing-44c010)](#fuzzing-and-code-coverage)
[![Go Report Card](https://goreportcard.com/badge/github.com/fxamacker/cbor)](https://goreportcard.com/report/github.com/fxamacker/cbor)
[![](https://img.shields.io/ossf-scorecard/github.com/fxamacker/cbor?label=openssf%20scorecard)](https://github.com/fxamacker/cbor#fuzzing-and-code-coverage)
`fxamacker/cbor` is a CBOR codec in full conformance with [IETF STD&nbsp;94 (RFC&nbsp;8949)](https://www.rfc-editor.org/info/std94). It also supports CBOR Sequences ([RFC&nbsp;8742](https://www.rfc-editor.org/rfc/rfc8742.html)) and Extended Diagnostic Notation ([Appendix G of RFC&nbsp;8610](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G)).
Features include full support for CBOR tags, [Core Deterministic Encoding](https://www.rfc-editor.org/rfc/rfc8949.html#name-core-deterministic-encoding), duplicate map key detection, etc.
API is mostly same as `encoding/json`, plus interfaces that simplify concurrency and CBOR options.
Design balances trade-offs between security, speed, concurrency, encoded data size, usability, etc.
<details><summary>Highlights</summary><p/>
<details><summary> 🔎&nbsp; Highlights</summary><p/>
__🚀&nbsp; Speed__
@ -38,7 +39,7 @@ Codec passed multiple confidential security assessments in 2022. No vulnerabili
__🗜&nbsp; Data Size__
Struct tags (`toarray`, `keyasint`, `omitempty`) automatically reduce size of encoded structs. Encoding optionally shrinks float64→32→16 when values fit.
Struct tag options (`toarray`, `keyasint`, `omitempty`, `omitzero`) and field tag "-" automatically reduce size of encoded structs. Encoding optionally shrinks float64→32→16 when values fit.
__:jigsaw:&nbsp; Usability__
@ -58,164 +59,205 @@ Features include CBOR [extension points](https://www.rfc-editor.org/rfc/rfc8949.
`fxamacker/cbor` has configurable limits, etc. that defend against malicious CBOR data.
By contrast, `encoding/gob` is [not designed to be hardened against adversarial inputs](https://pkg.go.dev/encoding/gob#hdr-Security).
Notably, `fxamacker/cbor` is fast at rejecting malformed CBOR data.
<details><summary>Example decoding with encoding/gob 💥 fatal error (out of memory)</summary><p/>
> [!NOTE]
> Benchmarks rejecting 10 bytes of malicious CBOR data decoding to `[]byte`:
>
> | Codec | Speed (ns/op) | Memory | Allocs |
> | :---- | ------------: | -----: | -----: |
> | fxamacker/cbor 2.7.0 | 47 ± 7% | 32 B/op | 2 allocs/op |
> | ugorji/go 1.2.12 | 5878187 ± 3% | 67111556 B/op | 13 allocs/op |
>
> Faster hardware (overclocked DDR4 or DDR5) can reduce speed difference.
>
> <details><summary> 🔎&nbsp; Benchmark details </summary><p/>
>
> Latest comparison for decoding CBOR data to Go `[]byte`:
> - Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}`
> - go1.22.7, linux/amd64, i5-13600K (DDR4-2933, disabled e-cores)
> - go test -bench=. -benchmem -count=20
>
> #### Prior comparisons
>
> | Codec | Speed (ns/op) | Memory | Allocs |
> | :---- | ------------: | -----: | -----: |
> | fxamacker/cbor 2.5.0-beta2 | 44.33 ± 2% | 32 B/op | 2 allocs/op |
> | fxamacker/cbor 0.1.0 - 2.4.0 | ~44.68 ± 6% | 32 B/op | 2 allocs/op |
> | ugorji/go 1.2.10 | 5524792.50 ± 3% | 67110491 B/op | 12 allocs/op |
> | ugorji/go 1.1.0 - 1.2.6 | 💥 runtime: | out of memory: | cannot allocate |
>
> - Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}`
> - go1.19.6, linux/amd64, i5-13600K (DDR4)
> - go test -bench=. -benchmem -count=20
>
> </details>
```Go
// Example of encoding/gob having "fatal error: runtime: out of memory"
// while decoding 181 bytes.
package main
import (
"bytes"
"encoding/gob"
"encoding/hex"
"fmt"
)
In contrast, some codecs can crash or use excessive resources while decoding bad data.
// Example data is from https://github.com/golang/go/issues/24446
// (shortened to 181 bytes).
const data = "4dffb503010102303001ff30000109010130010800010130010800010130" +
"01ffb80001014a01ffb60001014b01ff860001013001ff860001013001ff" +
"860001013001ff860001013001ffb80000001eff850401010e3030303030" +
"30303030303030303001ff3000010c0104000016ffb70201010830303030" +
"3030303001ff3000010c000030ffb6040405fcff00303030303030303030" +
"303030303030303030303030303030303030303030303030303030303030" +
"30"
> [!WARNING]
> Go's `encoding/gob` is [not designed to be hardened against adversarial inputs](https://pkg.go.dev/encoding/gob#hdr-Security).
>
> <details><summary> 🔎&nbsp; gob fatal error (out of memory) 💥 decoding 181 bytes</summary><p/>
>
> ```Go
> // Example of encoding/gob having "fatal error: runtime: out of memory"
> // while decoding 181 bytes (all Go versions as of Dec. 8, 2024).
> package main
> import (
> "bytes"
> "encoding/gob"
> "encoding/hex"
> "fmt"
> )
>
> // Example data is from https://github.com/golang/go/issues/24446
> // (shortened to 181 bytes).
> const data = "4dffb503010102303001ff30000109010130010800010130010800010130" +
> "01ffb80001014a01ffb60001014b01ff860001013001ff860001013001ff" +
> "860001013001ff860001013001ffb80000001eff850401010e3030303030" +
> "30303030303030303001ff3000010c0104000016ffb70201010830303030" +
> "3030303001ff3000010c000030ffb6040405fcff00303030303030303030" +
> "303030303030303030303030303030303030303030303030303030303030" +
> "30"
>
> type X struct {
> J *X
> K map[string]int
> }
>
> func main() {
> raw, _ := hex.DecodeString(data)
> decoder := gob.NewDecoder(bytes.NewReader(raw))
>
> var x X
> decoder.Decode(&x) // fatal error: runtime: out of memory
> fmt.Println("Decoding finished.")
> }
> ```
>
>
> </details>
type X struct {
J *X
K map[string]int
}
### Smaller Encodings with Struct Tag Options
func main() {
raw, _ := hex.DecodeString(data)
decoder := gob.NewDecoder(bytes.NewReader(raw))
Struct tags automatically reduce encoded size of structs and improve speed.
var x X
decoder.Decode(&x) // fatal error: runtime: out of memory
fmt.Println("Decoding finished.")
}
```
We can write less code by using struct tag options:
- `toarray`: encode without field names (decode back to original struct)
- `keyasint`: encode field names as integers (decode back to original struct)
- `omitempty`: omit empty field when encoding
- `omitzero`: omit zero-value field when encoding
<hr/>
As a special case, struct field tag "-" omits the field.
</details>
`fxamacker/cbor` is fast at rejecting malformed CBOR data. E.g. attempts to
decode 10 bytes of malicious CBOR data to `[]byte` (with default settings):
| Codec | Speed (ns/op) | Memory | Allocs |
| :---- | ------------: | -----: | -----: |
| fxamacker/cbor 2.5.0 | 44 ± 5% | 32 B/op | 2 allocs/op |
| ugorji/go 1.2.11 | 5353261 ± 4% | 67111321 B/op | 13 allocs/op |
<details><summary>Benchmark details</summary><p/>
Latest comparison used:
- Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}`
- go1.19.10, linux/amd64, i5-13600K (disabled all e-cores, DDR4 @2933)
- go test -bench=. -benchmem -count=20
#### Prior comparisons
| Codec | Speed (ns/op) | Memory | Allocs |
| :---- | ------------: | -----: | -----: |
| fxamacker/cbor 2.5.0-beta2 | 44.33 ± 2% | 32 B/op | 2 allocs/op |
| fxamacker/cbor 0.1.0 - 2.4.0 | ~44.68 ± 6% | 32 B/op | 2 allocs/op |
| ugorji/go 1.2.10 | 5524792.50 ± 3% | 67110491 B/op | 12 allocs/op |
| ugorji/go 1.1.0 - 1.2.6 | 💥 runtime: | out of memory: | cannot allocate |
- Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}`
- go1.19.6, linux/amd64, i5-13600K (DDR4)
- go test -bench=. -benchmem -count=20
<hr/>
</details>
### Smaller Encodings with Struct Tags
Struct tags (`toarray`, `keyasint`, `omitempty`) reduce encoded size of structs.
<details><summary>Example encoding 3-level nested Go struct to 1 byte CBOR</summary><p/>
https://go.dev/play/p/YxwvfPdFQG2
```Go
// Example encoding nested struct (with omitempty tag)
// - encoding/json: 18 byte JSON
// - fxamacker/cbor: 1 byte CBOR
package main
import (
"encoding/hex"
"encoding/json"
"fmt"
"github.com/fxamacker/cbor/v2"
)
type GrandChild struct {
Quux int `json:",omitempty"`
}
type Child struct {
Baz int `json:",omitempty"`
Qux GrandChild `json:",omitempty"`
}
type Parent struct {
Foo Child `json:",omitempty"`
Bar int `json:",omitempty"`
}
func cb() {
results, _ := cbor.Marshal(Parent{})
fmt.Println("hex(CBOR): " + hex.EncodeToString(results))
text, _ := cbor.Diagnose(results) // Diagnostic Notation
fmt.Println("DN: " + text)
}
func js() {
results, _ := json.Marshal(Parent{})
fmt.Println("hex(JSON): " + hex.EncodeToString(results))
text := string(results) // JSON
fmt.Println("JSON: " + text)
}
func main() {
cb()
fmt.Println("-------------")
js()
}
```
Output (DN is Diagnostic Notation):
```
hex(CBOR): a0
DN: {}
-------------
hex(JSON): 7b22466f6f223a7b22517578223a7b7d7d7d
JSON: {"Foo":{"Qux":{}}}
```
<hr/>
</details>
Example using different struct tags together:
NOTE: When a struct uses `toarray`, the encoder will ignore `omitempty` and `omitzero` to prevent position of encoded array elements from changing. This allows decoder to match encoded elements to their Go struct field.
![alt text](https://github.com/fxamacker/images/raw/master/cbor/v2.3.0/cbor_struct_tags_api.svg?sanitize=1 "CBOR API and Go Struct Tags")
API is mostly same as `encoding/json`, plus interfaces that simplify concurrency for CBOR options.
> [!NOTE]
> `fxamacker/cbor` can encode a 3-level nested Go struct to 1 byte!
> - `encoding/json`: 18 bytes of JSON
> - `fxamacker/cbor`: 1 byte of CBOR
>
> <details><summary> 🔎&nbsp; Encoding 3-level nested Go struct with omitempty</summary><p/>
>
> https://go.dev/play/p/YxwvfPdFQG2
>
> ```Go
> // Example encoding nested struct (with omitempty tag)
> // - encoding/json: 18 byte JSON
> // - fxamacker/cbor: 1 byte CBOR
>
> package main
>
> import (
> "encoding/hex"
> "encoding/json"
> "fmt"
>
> "github.com/fxamacker/cbor/v2"
> )
>
> type GrandChild struct {
> Quux int `json:",omitempty"`
> }
>
> type Child struct {
> Baz int `json:",omitempty"`
> Qux GrandChild `json:",omitempty"`
> }
>
> type Parent struct {
> Foo Child `json:",omitempty"`
> Bar int `json:",omitempty"`
> }
>
> func cb() {
> results, _ := cbor.Marshal(Parent{})
> fmt.Println("hex(CBOR): " + hex.EncodeToString(results))
>
> text, _ := cbor.Diagnose(results) // Diagnostic Notation
> fmt.Println("DN: " + text)
> }
>
> func js() {
> results, _ := json.Marshal(Parent{})
> fmt.Println("hex(JSON): " + hex.EncodeToString(results))
>
> text := string(results) // JSON
> fmt.Println("JSON: " + text)
> }
>
> func main() {
> cb()
> fmt.Println("-------------")
> js()
> }
> ```
>
> Output (DN is Diagnostic Notation):
> ```
> hex(CBOR): a0
> DN: {}
> -------------
> hex(JSON): 7b22466f6f223a7b22517578223a7b7d7d7d
> JSON: {"Foo":{"Qux":{}}}
> ```
>
> </details>
## Quick Start
__Install__: `go get github.com/fxamacker/cbor/v2` and `import "github.com/fxamacker/cbor/v2"`.
> [!TIP]
>
> Tinygo users can try beta/experimental branch [feature/cbor-tinygo-beta](https://github.com/fxamacker/cbor/tree/feature/cbor-tinygo-beta).
>
> <details><summary> 🔎&nbsp; More about tinygo feature branch</summary>
>
> ### Tinygo
>
> Branch [feature/cbor-tinygo-beta](https://github.com/fxamacker/cbor/tree/feature/cbor-tinygo-beta) is based on fxamacker/cbor v2.7.0 and it can be compiled using tinygo v0.33 (also compiles with golang/go).
>
> It passes unit tests (with both go1.22 and tinygo v0.33) and is considered beta/experimental for tinygo.
>
> :warning: The `feature/cbor-tinygo-beta` branch does not get fuzz tested yet.
>
> Changes in this feature branch only affect tinygo compiled software. Summary of changes:
> - default `DecOptions.MaxNestedLevels` is reduced to 16 (was 32). User can specify higher limit but 24+ crashes tests when compiled with tinygo v0.33.
> - disabled decoding CBOR tag data to Go interface because tinygo v0.33 is missing needed feature.
> - encoding error message can be different when encoding function type.
>
> Related tinygo issues:
> - https://github.com/tinygo-org/tinygo/issues/4277
> - https://github.com/tinygo-org/tinygo/issues/4458
>
> </details>
### Key Points
This library can encode and decode CBOR (RFC 8949) and CBOR Sequences (RFC 8742).
@ -252,16 +294,17 @@ rest, err = cbor.UnmarshalFirst(b, &v) // decode []byte b to v
// DiagnoseFirst translates first CBOR data item to text and returns remaining bytes.
text, rest, err = cbor.DiagnoseFirst(b) // decode []byte b to Diagnostic Notation text
// NOTE: Unmarshal returns ExtraneousDataError if there are remaining bytes,
// but new funcs UnmarshalFirst and DiagnoseFirst do not.
// NOTE: Unmarshal() returns ExtraneousDataError if there are remaining bytes, but
// UnmarshalFirst() and DiagnoseFirst() allow trailing bytes.
```
__IMPORTANT__: 👉 CBOR settings allow trade-offs between speed, security, encoding size, etc.
- Different CBOR libraries may use different default settings.
- CBOR-based formats or protocols usually require specific settings.
For example, WebAuthn uses "CTAP2 Canonical CBOR" which is available as a preset.
> [!IMPORTANT]
> CBOR settings allow trade-offs between speed, security, encoding size, etc.
>
> - Different CBOR libraries may use different default settings.
> - CBOR-based formats or protocols usually require specific settings.
>
> For example, WebAuthn uses "CTAP2 Canonical CBOR" which is available as a preset.
### Presets
@ -312,9 +355,63 @@ err = em.MarshalToBuffer(v, &buf) // encode v to provided buf
### Struct Tags
Struct tags (`toarray`, `keyasint`, `omitempty`) reduce encoded size of structs.
Struct tag options (`toarray`, `keyasint`, `omitempty`, `omitzero`) reduce encoded size of structs.
<details><summary>Example encoding 3-level nested Go struct to 1 byte CBOR</summary><p/>
As a special case, struct field tag "-" omits the field.
<details><summary> 🔎&nbsp; Example encoding with struct field tag "-"</summary><p/>
https://go.dev/play/p/aWEIFxd7InX
```Go
// https://github.com/fxamacker/cbor/issues/652
package main
import (
"encoding/json"
"fmt"
"github.com/fxamacker/cbor/v2"
)
// The `cbor:"-"` tag omits the Type field when encoding to CBOR.
type Entity struct {
_ struct{} `cbor:",toarray"`
ID uint64 `json:"id"`
Type string `cbor:"-" json:"typeOf"`
Name string `json:"name"`
}
func main() {
entity := Entity{
ID: 1,
Type: "int64",
Name: "Identifier",
}
c, _ := cbor.Marshal(entity)
diag, _ := cbor.Diagnose(c)
fmt.Printf("CBOR in hex: %x\n", c)
fmt.Printf("CBOR in edn: %s\n", diag)
j, _ := json.Marshal(entity)
fmt.Printf("JSON: %s\n", string(j))
fmt.Printf("JSON encoding is %d bytes\n", len(j))
fmt.Printf("CBOR encoding is %d bytes\n", len(c))
// Output:
// CBOR in hex: 82016a4964656e746966696572
// CBOR in edn: [1, "Identifier"]
// JSON: {"id":1,"typeOf":"int64","name":"Identifier"}
// JSON encoding is 45 bytes
// CBOR encoding is 13 bytes
}
```
</details>
<details><summary> 🔎&nbsp; Example encoding 3-level nested Go struct to 1 byte CBOR</summary><p/>
https://go.dev/play/p/YxwvfPdFQG2
@ -382,13 +479,13 @@ JSON: {"Foo":{"Qux":{}}}
</details>
<details><summary>Example using several struct tags</summary><p/>
<details><summary> 🔎&nbsp; Example using struct tag options</summary><p/>
![alt text](https://github.com/fxamacker/images/raw/master/cbor/v2.3.0/cbor_struct_tags_api.svg?sanitize=1 "CBOR API and Go Struct Tags")
</details>
Struct tags simplify use of CBOR-based protocols that require CBOR arrays or maps with integer keys.
Struct tag options simplify use of CBOR-based protocols that require CBOR arrays or maps with integer keys.
### CBOR Tags
@ -404,7 +501,7 @@ em, err := opts.EncModeWithSharedTags(ts) // mutable shared CBOR tags
`TagSet` and modes using it are safe for concurrent use. Equivalent API is available for `DecMode`.
<details><summary>Example using TagSet and TagOptions</summary><p/>
<details><summary> 🔎&nbsp; Example using TagSet and TagOptions</summary><p/>
```go
// Use signedCWT struct defined in "Decoding CWT" example.
@ -430,16 +527,149 @@ if err := dm.Unmarshal(data, &v); err != nil {
em, _ := cbor.EncOptions{}.EncModeWithTags(tags)
// Marshal signedCWT with tag number.
if data, err := cbor.Marshal(v); err != nil {
if data, err := em.Marshal(v); err != nil {
return err
}
```
</details>
👉 `fxamacker/cbor` allows user apps to use almost any current or future CBOR tag number by implementing `cbor.Marshaler` and `cbor.Unmarshaler` interfaces.
Basically, `MarshalCBOR` and `UnmarshalCBOR` functions can be implemented by user apps and those functions will automatically be called by this CBOR codec's `Marshal`, `Unmarshal`, etc.
The following [example](https://github.com/fxamacker/cbor/blob/master/example_embedded_json_tag_for_cbor_test.go) shows how to encode and decode a tagged CBOR data item with tag number 262. The tag content is a JSON object "embedded" as a CBOR byte string (major type 2).
<details><summary> 🔎&nbsp; Example using Embedded JSON Tag for CBOR (tag 262)</summary>
```go
// https://github.com/fxamacker/cbor/issues/657
package cbor_test
// NOTE: RFC 8949 does not mention tag number 262. IANA assigned
// CBOR tag number 262 as "Embedded JSON Object" specified by the
// document Embedded JSON Tag for CBOR:
//
// "Tag 262 can be applied to a byte string (major type 2) to indicate
// that the byte string is a JSON Object. The length of the byte string
// indicates the content."
//
// For more info, see Embedded JSON Tag for CBOR at:
// https://github.com/toravir/CBOR-Tag-Specs/blob/master/embeddedJSON.md
import (
"bytes"
"encoding/json"
"fmt"
"github.com/fxamacker/cbor/v2"
)
// cborTagNumForEmbeddedJSON is the CBOR tag number 262.
const cborTagNumForEmbeddedJSON = 262
// EmbeddedJSON represents a Go value to be encoded as a tagged CBOR data item
// with tag number 262 and the tag content is a JSON object "embedded" as a
// CBOR byte string (major type 2).
type EmbeddedJSON struct {
any
}
func NewEmbeddedJSON(val any) EmbeddedJSON {
return EmbeddedJSON{val}
}
// MarshalCBOR encodes EmbeddedJSON to a tagged CBOR data item with the
// tag number 262 and the tag content is a JSON object that is
// "embedded" as a CBOR byte string.
func (v EmbeddedJSON) MarshalCBOR() ([]byte, error) {
// Encode v to JSON object.
data, err := json.Marshal(v)
if err != nil {
return nil, err
}
// Create cbor.Tag representing a tagged CBOR data item.
tag := cbor.Tag{
Number: cborTagNumForEmbeddedJSON,
Content: data,
}
// Marshal to a tagged CBOR data item.
return cbor.Marshal(tag)
}
// UnmarshalCBOR decodes a tagged CBOR data item to EmbeddedJSON.
// The byte slice provided to this function must contain a single
// tagged CBOR data item with the tag number 262 and tag content
// must be a JSON object "embedded" as a CBOR byte string.
func (v *EmbeddedJSON) UnmarshalCBOR(b []byte) error {
// Unmarshal tagged CBOR data item.
var tag cbor.Tag
if err := cbor.Unmarshal(b, &tag); err != nil {
return err
}
// Check tag number.
if tag.Number != cborTagNumForEmbeddedJSON {
return fmt.Errorf("got tag number %d, expect tag number %d", tag.Number, cborTagNumForEmbeddedJSON)
}
// Check tag content.
jsonData, isByteString := tag.Content.([]byte)
if !isByteString {
return fmt.Errorf("got tag content type %T, expect tag content []byte", tag.Content)
}
// Unmarshal JSON object.
return json.Unmarshal(jsonData, v)
}
// MarshalJSON encodes EmbeddedJSON to a JSON object.
func (v EmbeddedJSON) MarshalJSON() ([]byte, error) {
return json.Marshal(v.any)
}
// UnmarshalJSON decodes a JSON object.
func (v *EmbeddedJSON) UnmarshalJSON(b []byte) error {
dec := json.NewDecoder(bytes.NewReader(b))
dec.UseNumber()
return dec.Decode(&v.any)
}
func Example_embeddedJSONTagForCBOR() {
value := NewEmbeddedJSON(map[string]any{
"name": "gopher",
"id": json.Number("42"),
})
data, err := cbor.Marshal(value)
if err != nil {
panic(err)
}
fmt.Printf("cbor: %x\n", data)
var v EmbeddedJSON
err = cbor.Unmarshal(data, &v)
if err != nil {
panic(err)
}
fmt.Printf("%+v\n", v.any)
for k, v := range v.any.(map[string]any) {
fmt.Printf(" %s: %v (%T)\n", k, v, v)
}
}
```
</details>
### Functions and Interfaces
<details><summary>Functions and interfaces at a glance</summary><p/>
<details><summary> 🔎&nbsp; Functions and interfaces at a glance</summary><p/>
Common functions with same API as `encoding/json`:
- `Marshal`, `Unmarshal`
@ -453,7 +683,7 @@ because RFC 8949 treats CBOR data item with remaining bytes as malformed.
Other useful functions:
- `Diagnose`, `DiagnoseFirst` produce human-readable [Extended Diagnostic Notation](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G) from CBOR data.
- `UnmarshalFirst` decodes first CBOR data item and return any remaining bytes.
- `Wellformed` returns true if the the CBOR data item is well-formed.
- `Wellformed` returns true if the CBOR data item is well-formed.
Interfaces identical or comparable to Go `encoding` packages include:
`Marshaler`, `Unmarshaler`, `BinaryMarshaler`, and `BinaryUnmarshaler`.
@ -472,15 +702,28 @@ Default limits may need to be increased for systems handling very large data (e.
## Status
v2.7.0 (June 23, 2024) adds features and improvements that help large projects (e.g. Kubernetes) use CBOR as an alternative to JSON and Protocol Buffers. Other improvements include speedups, improved memory use, bug fixes, new serialization options, etc. It passed fuzz tests (5+ billion executions) and is production quality.
[v2.9.0](https://github.com/fxamacker/cbor/releases/tag/v2.9.0) (Jul 13, 2025) improved interoperability/transcoding between CBOR & JSON, refactored tests, and improved docs.
- Add opt-in support for `encoding.TextMarshaler` and `encoding.TextUnmarshaler` to encode and decode from CBOR text string.
- Add opt-in support for `json.Marshaler` and `json.Unmarshaler` via user-provided transcoding function.
- Update docs for TimeMode, Tag, RawTag, and add example for Embedded JSON Tag for CBOR.
v2.9.0 passed fuzz tests and is production quality.
The minimum version of Go required to build:
- v2.8.0 and newer releases require go 1.20+.
- v2.7.1 and older releases require go 1.17+.
For more details, see [release notes](https://github.com/fxamacker/cbor/releases).
### Prior Release
### Prior Releases
[v2.8.0](https://github.com/fxamacker/cbor/releases/tag/v2.8.0) (March 30, 2025) is a small release primarily to add `omitzero` option to struct field tags and fix bugs. It passed fuzz tests (billions of executions) and is production quality.
[v2.7.0](https://github.com/fxamacker/cbor/releases/tag/v2.7.0) (June 23, 2024) adds features and improvements that help large projects (e.g. Kubernetes) use CBOR as an alternative to JSON and Protocol Buffers. Other improvements include speedups, improved memory use, bug fixes, new serialization options, etc. It passed fuzz tests (5+ billion executions) and is production quality.
[v2.6.0](https://github.com/fxamacker/cbor/releases/tag/v2.6.0) (February 2024) adds important new features, optimizations, and bug fixes. It is especially useful to systems that need to convert data between CBOR and JSON. New options and optimizations improve handling of bignum, integers, maps, and strings.
v2.5.0 was released on Sunday, August 13, 2023 with new features and important bug fixes. It is fuzz tested and production quality after extended beta [v2.5.0-beta](https://github.com/fxamacker/cbor/releases/tag/v2.5.0-beta) (Dec 2022) -> [v2.5.0](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) (Aug 2023).
[v2.5.0](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) was released on Sunday, August 13, 2023 with new features and important bug fixes. It is fuzz tested and production quality after extended beta [v2.5.0-beta](https://github.com/fxamacker/cbor/releases/tag/v2.5.0-beta) (Dec 2022) -> [v2.5.0](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) (Aug 2023).
__IMPORTANT__: 👉 Before upgrading from v2.4 or older release, please read the notable changes highlighted in the release notes. v2.5.0 is a large release with bug fixes to error handling for extraneous data in `Unmarshal`, etc. that should be reviewed before upgrading.
@ -489,7 +732,7 @@ See [v2.5.0 release notes](https://github.com/fxamacker/cbor/releases/tag/v2.5.0
See ["Version and API Changes"](https://github.com/fxamacker/cbor#versions-and-api-changes) section for more info about version numbering, etc.
<!--
<details><summary>👉 Benchmark Comparison: v2.4.0 vs v2.5.0</summary><p/>
<details><summary> 🔎&nbsp; Benchmark Comparison: v2.4.0 vs v2.5.0</summary><p/>
TODO: Update to v2.4.0 vs 2.5.0 (not beta2).
@ -549,9 +792,9 @@ geomean 2.782
## Who uses fxamacker/cbor
`fxamacker/cbor` is used in projects by Arm Ltd., Berlin Institute of Health at Charité, Chainlink, Cisco, Confidential Computing Consortium, ConsenSys, Dapper&nbsp;Labs, EdgeX&nbsp;Foundry, F5, FIDO Alliance, Fraunhofer&#8209;AISEC, Kubernetes, Let's Encrypt (ISRG), Linux&nbsp;Foundation, Matrix.org, Microsoft, Mozilla, National&nbsp;Cybersecurity&nbsp;Agency&nbsp;of&nbsp;France (govt), Netherlands (govt), Oasis Protocol, Smallstep, Tailscale, Taurus SA, Teleport, TIBCO, and others.
`fxamacker/cbor` is used in projects by Arm Ltd., Berlin Institute of Health at Charité, Chainlink, Confidential&nbsp;Computing&nbsp;Consortium, ConsenSys, EdgeX&nbsp;Foundry, F5, Flow&nbsp;Foundation, Fraunhofer&#8209;AISEC, IBM, Kubernetes, Let's&nbsp;Encrypt&nbsp;(ISRG), Linaro, Linux&nbsp;Foundation, Matrix.org, Microsoft, National&nbsp;Cybersecurity&nbsp;Agency&nbsp;of&nbsp;France&nbsp;(govt), Netherlands&nbsp;(govt), Oasis&nbsp;Protocol, Red Hat OpenShift, Smallstep, Tailscale, Taurus SA, TIBCO, Veraison, and others.
`fxamacker/cbor` passed multiple confidential security assessments. A [nonconfidential security assessment](https://github.com/veraison/go-cose/blob/v1.0.0-rc.1/reports/NCC_Microsoft-go-cose-Report_2022-05-26_v1.0.pdf) (prepared by NCC Group for Microsoft Corporation) includes a subset of fxamacker/cbor v2.4.0 in its scope.
`fxamacker/cbor` passed multiple confidential security assessments in 2022. A [nonconfidential security assessment](https://github.com/veraison/go-cose/blob/v1.0.0-rc.1/reports/NCC_Microsoft-go-cose-Report_2022-05-26_v1.0.pdf) (prepared by NCC Group for Microsoft Corporation) assessed a subset of fxamacker/cbor v2.4.
## Standards
@ -588,7 +831,7 @@ By default, decoder treats time values of floating-point NaN and Infinity as if
__Click to expand topic:__
<details>
<summary>Duplicate Map Keys</summary><p>
<summary> 🔎&nbsp; Duplicate Map Keys</summary><p>
This library provides options for fast detection and rejection of duplicate map keys based on applying a Go-specific data model to CBOR's extended generic data model in order to determine duplicate vs distinct map keys. Detection relies on whether the CBOR map key would be a duplicate "key" when decoded and applied to the user-provided Go map or struct.
@ -601,7 +844,7 @@ APF suffix means "Allow Partial Fill" so the destination map or struct can conta
</details>
<details>
<summary>Tag Validity</summary><p>
<summary> 🔎&nbsp; Tag Validity</summary><p>
This library checks tag validity for built-in tags (currently tag numbers 0, 1, 2, 3, and 55799):

View File

@ -38,11 +38,38 @@ func (bs ByteString) MarshalCBOR() ([]byte, error) {
// UnmarshalCBOR decodes CBOR byte string (major type 2) to ByteString.
// Decoding CBOR null and CBOR undefined sets ByteString to be empty.
//
// Deprecated: No longer used by this codec; kept for compatibility
// with user apps that directly call this function.
func (bs *ByteString) UnmarshalCBOR(data []byte) error {
if bs == nil {
return errors.New("cbor.ByteString: UnmarshalCBOR on nil pointer")
}
d := decoder{data: data, dm: defaultDecMode}
// Check well-formedness of CBOR data item.
// ByteString.UnmarshalCBOR() is exported, so
// the codec needs to support same behavior for:
// - Unmarshal(data, *ByteString)
// - ByteString.UnmarshalCBOR(data)
err := d.wellformed(false, false)
if err != nil {
return err
}
return bs.unmarshalCBOR(data)
}
// unmarshalCBOR decodes CBOR byte string (major type 2) to ByteString.
// Decoding CBOR null and CBOR undefined sets ByteString to be empty.
// This function assumes data is well-formed, and does not perform bounds checking.
// This function is called by Unmarshal().
func (bs *ByteString) unmarshalCBOR(data []byte) error {
if bs == nil {
return errors.New("cbor.ByteString: UnmarshalCBOR on nil pointer")
}
// Decoding CBOR null and CBOR undefined to ByteString resets data.
// This behavior is similar to decoding CBOR null and CBOR undefined to []byte.
if len(data) == 1 && (data[0] == 0xf6 || data[0] == 0xf7) {

View File

@ -17,6 +17,7 @@ import (
type encodeFuncs struct {
ef encodeFunc
ief isEmptyFunc
izf isZeroFunc
}
var (
@ -31,10 +32,12 @@ type specialType int
const (
specialTypeNone specialType = iota
specialTypeUnmarshalerIface
specialTypeUnexportedUnmarshalerIface
specialTypeEmptyIface
specialTypeIface
specialTypeTag
specialTypeTime
specialTypeJSONUnmarshalerIface
)
type typeInfo struct {
@ -50,7 +53,7 @@ type typeInfo struct {
func newTypeInfo(t reflect.Type) *typeInfo {
tInfo := typeInfo{typ: t, kind: t.Kind()}
for t.Kind() == reflect.Ptr {
for t.Kind() == reflect.Pointer {
t = t.Elem()
}
@ -69,8 +72,12 @@ func newTypeInfo(t reflect.Type) *typeInfo {
tInfo.spclType = specialTypeTag
} else if t == typeTime {
tInfo.spclType = specialTypeTime
} else if reflect.PtrTo(t).Implements(typeUnmarshaler) {
} else if reflect.PointerTo(t).Implements(typeUnexportedUnmarshaler) {
tInfo.spclType = specialTypeUnexportedUnmarshalerIface
} else if reflect.PointerTo(t).Implements(typeUnmarshaler) {
tInfo.spclType = specialTypeUnmarshalerIface
} else if reflect.PointerTo(t).Implements(typeJSONUnmarshaler) {
tInfo.spclType = specialTypeJSONUnmarshalerIface
}
switch k {
@ -237,7 +244,7 @@ func getEncodingStructType(t reflect.Type) (*encodingStructType, error) {
e := getEncodeBuffer()
for i := 0; i < len(flds); i++ {
// Get field's encodeFunc
flds[i].ef, flds[i].ief = getEncodeFunc(flds[i].typ)
flds[i].ef, flds[i].ief, flds[i].izf = getEncodeFunc(flds[i].typ)
if flds[i].ef == nil {
err = &UnsupportedTypeError{t}
break
@ -321,7 +328,7 @@ func getEncodingStructType(t reflect.Type) (*encodingStructType, error) {
func getEncodingStructToArrayType(t reflect.Type, flds fields) (*encodingStructType, error) {
for i := 0; i < len(flds); i++ {
// Get field's encodeFunc
flds[i].ef, flds[i].ief = getEncodeFunc(flds[i].typ)
flds[i].ef, flds[i].ief, flds[i].izf = getEncodeFunc(flds[i].typ)
if flds[i].ef == nil {
structType := &encodingStructType{err: &UnsupportedTypeError{t}}
encodingStructTypeCache.Store(t, structType)
@ -337,14 +344,14 @@ func getEncodingStructToArrayType(t reflect.Type, flds fields) (*encodingStructT
return structType, structType.err
}
func getEncodeFunc(t reflect.Type) (encodeFunc, isEmptyFunc) {
func getEncodeFunc(t reflect.Type) (encodeFunc, isEmptyFunc, isZeroFunc) {
if v, _ := encodeFuncCache.Load(t); v != nil {
fs := v.(encodeFuncs)
return fs.ef, fs.ief
return fs.ef, fs.ief, fs.izf
}
ef, ief := getEncodeFuncInternal(t)
encodeFuncCache.Store(t, encodeFuncs{ef, ief})
return ef, ief
ef, ief, izf := getEncodeFuncInternal(t)
encodeFuncCache.Store(t, encodeFuncs{ef, ief, izf})
return ef, ief, izf
}
func getTypeInfo(t reflect.Type) *typeInfo {

View File

@ -5,6 +5,7 @@ package cbor
import (
"fmt"
"io"
"strconv"
)
@ -180,3 +181,11 @@ func validBuiltinTag(tagNum uint64, contentHead byte) error {
return nil
}
// Transcoder is a scheme for transcoding a single CBOR encoded data item to or from a different
// data format.
type Transcoder interface {
// Transcode reads the data item in its source format from a Reader and writes a
// corresponding representation in its destination format to a Writer.
Transcode(dst io.Writer, src io.Reader) error
}

View File

@ -4,6 +4,7 @@
package cbor
import (
"bytes"
"encoding"
"encoding/base64"
"encoding/binary"
@ -94,7 +95,7 @@ import (
//
// To unmarshal CBOR null (0xf6) and undefined (0xf7) values into a
// slice/map/pointer, Unmarshal sets Go value to nil. Because null is often
// used to mean "not present", unmarshalling CBOR null and undefined value
// used to mean "not present", unmarshaling CBOR null and undefined value
// into any other Go type has no effect and returns no error.
//
// Unmarshal supports CBOR tag 55799 (self-describe CBOR), tag 0 and 1 (time),
@ -104,7 +105,7 @@ import (
// if there are any remaining bytes following the first valid CBOR data item.
// See UnmarshalFirst, if you want to unmarshal only the first
// CBOR data item without ExtraneousDataError caused by remaining bytes.
func Unmarshal(data []byte, v interface{}) error {
func Unmarshal(data []byte, v any) error {
return defaultDecMode.Unmarshal(data, v)
}
@ -114,7 +115,7 @@ func Unmarshal(data []byte, v interface{}) error {
// If v is nil, not a pointer, or a nil pointer, UnmarshalFirst returns an error.
//
// See the documentation for Unmarshal for details.
func UnmarshalFirst(data []byte, v interface{}) (rest []byte, err error) {
func UnmarshalFirst(data []byte, v any) (rest []byte, err error) {
return defaultDecMode.UnmarshalFirst(data, v)
}
@ -151,6 +152,10 @@ type Unmarshaler interface {
UnmarshalCBOR([]byte) error
}
type unmarshaler interface {
unmarshalCBOR([]byte) error
}
// InvalidUnmarshalError describes an invalid argument passed to Unmarshal.
type InvalidUnmarshalError struct {
s string
@ -193,12 +198,12 @@ func (e *InvalidMapKeyTypeError) Error() string {
// DupMapKeyError describes detected duplicate map key in CBOR map.
type DupMapKeyError struct {
Key interface{}
Key any
Index int
}
func (e *DupMapKeyError) Error() string {
return fmt.Sprintf("cbor: found duplicate map key \"%v\" at map element index %d", e.Key, e.Index)
return fmt.Sprintf("cbor: found duplicate map key %#v at map element index %d", e.Key, e.Index)
}
// UnknownFieldError describes detected unknown field in CBOR map when decoding to Go struct.
@ -383,7 +388,7 @@ const (
// - return UnmarshalTypeError if value doesn't fit into int64
IntDecConvertSignedOrFail
// IntDecConvertSigned affects how CBOR integers (major type 0 and 1) decode to Go interface{}.
// IntDecConvertSignedOrBigInt affects how CBOR integers (major type 0 and 1) decode to Go interface{}.
// It makes CBOR integers (major type 0 and 1) decode to:
// - int64 if value fits
// - big.Int or *big.Int (see BigIntDecMode) if value doesn't fit into int64
@ -489,11 +494,11 @@ type BigIntDecMode int
const (
// BigIntDecodeValue makes CBOR bignum decode to big.Int (instead of *big.Int)
// when unmarshalling into a Go interface{}.
// when unmarshaling into a Go interface{}.
BigIntDecodeValue BigIntDecMode = iota
// BigIntDecodePointer makes CBOR bignum decode to *big.Int when
// unmarshalling into a Go interface{}.
// unmarshaling into a Go interface{}.
BigIntDecodePointer
maxBigIntDecMode
@ -745,6 +750,25 @@ func (bum BinaryUnmarshalerMode) valid() bool {
return bum >= 0 && bum < maxBinaryUnmarshalerMode
}
// TextUnmarshalerMode specifies how to decode into types that implement
// encoding.TextUnmarshaler.
type TextUnmarshalerMode int
const (
// TextUnmarshalerNone does not recognize TextUnmarshaler implementations during decode.
TextUnmarshalerNone TextUnmarshalerMode = iota
// TextUnmarshalerTextString will invoke UnmarshalText on the contents of a CBOR text
// string when decoding into a value that implements TextUnmarshaler.
TextUnmarshalerTextString
maxTextUnmarshalerMode
)
func (tum TextUnmarshalerMode) valid() bool {
return tum >= 0 && tum < maxTextUnmarshalerMode
}
// DecOptions specifies decoding options.
type DecOptions struct {
// DupMapKey specifies whether to enforce duplicate map key.
@ -793,7 +817,7 @@ type DecOptions struct {
// TagsMd specifies whether to allow CBOR tags (major type 6).
TagsMd TagsMode
// IntDec specifies which Go integer type (int64 or uint64) to use
// IntDec specifies which Go integer type (int64, uint64, or [big.Int]) to use
// when decoding CBOR int (major type 0 and 1) to Go interface{}.
IntDec IntDecMode
@ -807,7 +831,7 @@ type DecOptions struct {
ExtraReturnErrors ExtraDecErrorCond
// DefaultMapType specifies Go map type to create and decode to
// when unmarshalling CBOR into an empty interface value.
// when unmarshaling CBOR into an empty interface value.
// By default, unmarshal uses map[interface{}]interface{}.
DefaultMapType reflect.Type
@ -879,6 +903,15 @@ type DecOptions struct {
// BinaryUnmarshaler specifies how to decode into types that implement
// encoding.BinaryUnmarshaler.
BinaryUnmarshaler BinaryUnmarshalerMode
// TextUnmarshaler specifies how to decode into types that implement
// encoding.TextUnmarshaler.
TextUnmarshaler TextUnmarshalerMode
// JSONUnmarshalerTranscoder sets the transcoding scheme used to unmarshal types that
// implement json.Unmarshaler but do not also implement cbor.Unmarshaler. If nil, decoding
// behavior is not influenced by whether or not a type implements json.Unmarshaler.
JSONUnmarshalerTranscoder Transcoder
}
// DecMode returns DecMode with immutable options and no tags (safe for concurrency).
@ -1091,33 +1124,39 @@ func (opts DecOptions) decMode() (*decMode, error) { //nolint:gocritic // ignore
return nil, errors.New("cbor: invalid BinaryUnmarshaler " + strconv.Itoa(int(opts.BinaryUnmarshaler)))
}
if !opts.TextUnmarshaler.valid() {
return nil, errors.New("cbor: invalid TextUnmarshaler " + strconv.Itoa(int(opts.TextUnmarshaler)))
}
dm := decMode{
dupMapKey: opts.DupMapKey,
timeTag: opts.TimeTag,
maxNestedLevels: opts.MaxNestedLevels,
maxArrayElements: opts.MaxArrayElements,
maxMapPairs: opts.MaxMapPairs,
indefLength: opts.IndefLength,
tagsMd: opts.TagsMd,
intDec: opts.IntDec,
mapKeyByteString: opts.MapKeyByteString,
extraReturnErrors: opts.ExtraReturnErrors,
defaultMapType: opts.DefaultMapType,
utf8: opts.UTF8,
fieldNameMatching: opts.FieldNameMatching,
bigIntDec: opts.BigIntDec,
defaultByteStringType: opts.DefaultByteStringType,
byteStringToString: opts.ByteStringToString,
fieldNameByteString: opts.FieldNameByteString,
unrecognizedTagToAny: opts.UnrecognizedTagToAny,
timeTagToAny: opts.TimeTagToAny,
simpleValues: simpleValues,
nanDec: opts.NaN,
infDec: opts.Inf,
byteStringToTime: opts.ByteStringToTime,
byteStringExpectedFormat: opts.ByteStringExpectedFormat,
bignumTag: opts.BignumTag,
binaryUnmarshaler: opts.BinaryUnmarshaler,
dupMapKey: opts.DupMapKey,
timeTag: opts.TimeTag,
maxNestedLevels: opts.MaxNestedLevels,
maxArrayElements: opts.MaxArrayElements,
maxMapPairs: opts.MaxMapPairs,
indefLength: opts.IndefLength,
tagsMd: opts.TagsMd,
intDec: opts.IntDec,
mapKeyByteString: opts.MapKeyByteString,
extraReturnErrors: opts.ExtraReturnErrors,
defaultMapType: opts.DefaultMapType,
utf8: opts.UTF8,
fieldNameMatching: opts.FieldNameMatching,
bigIntDec: opts.BigIntDec,
defaultByteStringType: opts.DefaultByteStringType,
byteStringToString: opts.ByteStringToString,
fieldNameByteString: opts.FieldNameByteString,
unrecognizedTagToAny: opts.UnrecognizedTagToAny,
timeTagToAny: opts.TimeTagToAny,
simpleValues: simpleValues,
nanDec: opts.NaN,
infDec: opts.Inf,
byteStringToTime: opts.ByteStringToTime,
byteStringExpectedFormat: opts.ByteStringExpectedFormat,
bignumTag: opts.BignumTag,
binaryUnmarshaler: opts.BinaryUnmarshaler,
textUnmarshaler: opts.TextUnmarshaler,
jsonUnmarshalerTranscoder: opts.JSONUnmarshalerTranscoder,
}
return &dm, nil
@ -1130,7 +1169,7 @@ type DecMode interface {
// Unmarshal returns an error.
//
// See the documentation for Unmarshal for details.
Unmarshal(data []byte, v interface{}) error
Unmarshal(data []byte, v any) error
// UnmarshalFirst parses the first CBOR data item into the value pointed to by v
// using the decoding mode. Any remaining bytes are returned in rest.
@ -1138,7 +1177,7 @@ type DecMode interface {
// If v is nil, not a pointer, or a nil pointer, UnmarshalFirst returns an error.
//
// See the documentation for Unmarshal for details.
UnmarshalFirst(data []byte, v interface{}) (rest []byte, err error)
UnmarshalFirst(data []byte, v any) (rest []byte, err error)
// Valid checks whether data is a well-formed encoded CBOR data item and
// that it complies with configurable restrictions such as MaxNestedLevels,
@ -1170,33 +1209,35 @@ type DecMode interface {
}
type decMode struct {
tags tagProvider
dupMapKey DupMapKeyMode
timeTag DecTagMode
maxNestedLevels int
maxArrayElements int
maxMapPairs int
indefLength IndefLengthMode
tagsMd TagsMode
intDec IntDecMode
mapKeyByteString MapKeyByteStringMode
extraReturnErrors ExtraDecErrorCond
defaultMapType reflect.Type
utf8 UTF8Mode
fieldNameMatching FieldNameMatchingMode
bigIntDec BigIntDecMode
defaultByteStringType reflect.Type
byteStringToString ByteStringToStringMode
fieldNameByteString FieldNameByteStringMode
unrecognizedTagToAny UnrecognizedTagToAnyMode
timeTagToAny TimeTagToAnyMode
simpleValues *SimpleValueRegistry
nanDec NaNMode
infDec InfMode
byteStringToTime ByteStringToTimeMode
byteStringExpectedFormat ByteStringExpectedFormatMode
bignumTag BignumTagMode
binaryUnmarshaler BinaryUnmarshalerMode
tags tagProvider
dupMapKey DupMapKeyMode
timeTag DecTagMode
maxNestedLevels int
maxArrayElements int
maxMapPairs int
indefLength IndefLengthMode
tagsMd TagsMode
intDec IntDecMode
mapKeyByteString MapKeyByteStringMode
extraReturnErrors ExtraDecErrorCond
defaultMapType reflect.Type
utf8 UTF8Mode
fieldNameMatching FieldNameMatchingMode
bigIntDec BigIntDecMode
defaultByteStringType reflect.Type
byteStringToString ByteStringToStringMode
fieldNameByteString FieldNameByteStringMode
unrecognizedTagToAny UnrecognizedTagToAnyMode
timeTagToAny TimeTagToAnyMode
simpleValues *SimpleValueRegistry
nanDec NaNMode
infDec InfMode
byteStringToTime ByteStringToTimeMode
byteStringExpectedFormat ByteStringExpectedFormatMode
bignumTag BignumTagMode
binaryUnmarshaler BinaryUnmarshalerMode
textUnmarshaler TextUnmarshalerMode
jsonUnmarshalerTranscoder Transcoder
}
var defaultDecMode, _ = DecOptions{}.decMode()
@ -1211,32 +1252,34 @@ func (dm *decMode) DecOptions() DecOptions {
}
return DecOptions{
DupMapKey: dm.dupMapKey,
TimeTag: dm.timeTag,
MaxNestedLevels: dm.maxNestedLevels,
MaxArrayElements: dm.maxArrayElements,
MaxMapPairs: dm.maxMapPairs,
IndefLength: dm.indefLength,
TagsMd: dm.tagsMd,
IntDec: dm.intDec,
MapKeyByteString: dm.mapKeyByteString,
ExtraReturnErrors: dm.extraReturnErrors,
DefaultMapType: dm.defaultMapType,
UTF8: dm.utf8,
FieldNameMatching: dm.fieldNameMatching,
BigIntDec: dm.bigIntDec,
DefaultByteStringType: dm.defaultByteStringType,
ByteStringToString: dm.byteStringToString,
FieldNameByteString: dm.fieldNameByteString,
UnrecognizedTagToAny: dm.unrecognizedTagToAny,
TimeTagToAny: dm.timeTagToAny,
SimpleValues: simpleValues,
NaN: dm.nanDec,
Inf: dm.infDec,
ByteStringToTime: dm.byteStringToTime,
ByteStringExpectedFormat: dm.byteStringExpectedFormat,
BignumTag: dm.bignumTag,
BinaryUnmarshaler: dm.binaryUnmarshaler,
DupMapKey: dm.dupMapKey,
TimeTag: dm.timeTag,
MaxNestedLevels: dm.maxNestedLevels,
MaxArrayElements: dm.maxArrayElements,
MaxMapPairs: dm.maxMapPairs,
IndefLength: dm.indefLength,
TagsMd: dm.tagsMd,
IntDec: dm.intDec,
MapKeyByteString: dm.mapKeyByteString,
ExtraReturnErrors: dm.extraReturnErrors,
DefaultMapType: dm.defaultMapType,
UTF8: dm.utf8,
FieldNameMatching: dm.fieldNameMatching,
BigIntDec: dm.bigIntDec,
DefaultByteStringType: dm.defaultByteStringType,
ByteStringToString: dm.byteStringToString,
FieldNameByteString: dm.fieldNameByteString,
UnrecognizedTagToAny: dm.unrecognizedTagToAny,
TimeTagToAny: dm.timeTagToAny,
SimpleValues: simpleValues,
NaN: dm.nanDec,
Inf: dm.infDec,
ByteStringToTime: dm.byteStringToTime,
ByteStringExpectedFormat: dm.byteStringExpectedFormat,
BignumTag: dm.bignumTag,
BinaryUnmarshaler: dm.binaryUnmarshaler,
TextUnmarshaler: dm.textUnmarshaler,
JSONUnmarshalerTranscoder: dm.jsonUnmarshalerTranscoder,
}
}
@ -1245,7 +1288,7 @@ func (dm *decMode) DecOptions() DecOptions {
// Unmarshal returns an error.
//
// See the documentation for Unmarshal for details.
func (dm *decMode) Unmarshal(data []byte, v interface{}) error {
func (dm *decMode) Unmarshal(data []byte, v any) error {
d := decoder{data: data, dm: dm}
// Check well-formedness.
@ -1265,7 +1308,7 @@ func (dm *decMode) Unmarshal(data []byte, v interface{}) error {
// If v is nil, not a pointer, or a nil pointer, UnmarshalFirst returns an error.
//
// See the documentation for Unmarshal for details.
func (dm *decMode) UnmarshalFirst(data []byte, v interface{}) (rest []byte, err error) {
func (dm *decMode) UnmarshalFirst(data []byte, v any) (rest []byte, err error) {
d := decoder{data: data, dm: dm}
// check well-formedness.
@ -1341,13 +1384,13 @@ type decoder struct {
// If CBOR data item fails to be decoded into v,
// error is returned and offset is moved to the next CBOR data item.
// Precondition: d.data contains at least one well-formed CBOR data item.
func (d *decoder) value(v interface{}) error {
func (d *decoder) value(v any) error {
// v can't be nil, non-pointer, or nil pointer value.
if v == nil {
return &InvalidUnmarshalError{"cbor: Unmarshal(nil)"}
}
rv := reflect.ValueOf(v)
if rv.Kind() != reflect.Ptr {
if rv.Kind() != reflect.Pointer {
return &InvalidUnmarshalError{"cbor: Unmarshal(non-pointer " + rv.Type().String() + ")"}
} else if rv.IsNil() {
return &InvalidUnmarshalError{"cbor: Unmarshal(nil " + rv.Type().String() + ")"}
@ -1361,9 +1404,9 @@ func (d *decoder) value(v interface{}) error {
func (d *decoder) parseToValue(v reflect.Value, tInfo *typeInfo) error { //nolint:gocyclo
// Decode CBOR nil or CBOR undefined to pointer value by setting pointer value to nil.
if d.nextCBORNil() && v.Kind() == reflect.Ptr {
if d.nextCBORNil() && v.Kind() == reflect.Pointer {
d.skip()
v.Set(reflect.Zero(v.Type()))
v.SetZero()
return nil
}
@ -1387,7 +1430,7 @@ func (d *decoder) parseToValue(v reflect.Value, tInfo *typeInfo) error { //nolin
registeredType := d.dm.tags.getTypeFromTagNum(tagNums)
if registeredType != nil {
if registeredType.Implements(tInfo.nonPtrType) ||
reflect.PtrTo(registeredType).Implements(tInfo.nonPtrType) {
reflect.PointerTo(registeredType).Implements(tInfo.nonPtrType) {
v.Set(reflect.New(registeredType))
v = v.Elem()
tInfo = getTypeInfo(registeredType)
@ -1399,7 +1442,7 @@ func (d *decoder) parseToValue(v reflect.Value, tInfo *typeInfo) error { //nolin
// Create new value for the pointer v to point to.
// At this point, CBOR value is not nil/undefined if v is a pointer.
for v.Kind() == reflect.Ptr {
for v.Kind() == reflect.Pointer {
if v.IsNil() {
if !v.CanSet() {
d.skip()
@ -1460,6 +1503,17 @@ func (d *decoder) parseToValue(v reflect.Value, tInfo *typeInfo) error { //nolin
case specialTypeUnmarshalerIface:
return d.parseToUnmarshaler(v)
case specialTypeUnexportedUnmarshalerIface:
return d.parseToUnexportedUnmarshaler(v)
case specialTypeJSONUnmarshalerIface:
// This special type implies that the type does not also implement
// cbor.Umarshaler.
if d.dm.jsonUnmarshalerTranscoder == nil {
break
}
return d.parseToJSONUnmarshaler(v)
}
}
@ -1516,14 +1570,14 @@ func (d *decoder) parseToValue(v reflect.Value, tInfo *typeInfo) error { //nolin
return err
}
copied = copied || converted
return fillByteString(t, b, !copied, v, d.dm.byteStringToString, d.dm.binaryUnmarshaler)
return fillByteString(t, b, !copied, v, d.dm.byteStringToString, d.dm.binaryUnmarshaler, d.dm.textUnmarshaler)
case cborTypeTextString:
b, err := d.parseTextString()
if err != nil {
return err
}
return fillTextString(t, b, v)
return fillTextString(t, b, v, d.dm.textUnmarshaler)
case cborTypePrimitives:
_, ai, val := d.getHead()
@ -1575,7 +1629,7 @@ func (d *decoder) parseToValue(v reflect.Value, tInfo *typeInfo) error { //nolin
return nil
}
if tInfo.nonPtrKind == reflect.Slice || tInfo.nonPtrKind == reflect.Array {
return fillByteString(t, b, !copied, v, ByteStringToStringForbidden, d.dm.binaryUnmarshaler)
return fillByteString(t, b, !copied, v, ByteStringToStringForbidden, d.dm.binaryUnmarshaler, d.dm.textUnmarshaler)
}
if bi.IsUint64() {
return fillPositiveInt(t, bi.Uint64(), v)
@ -1598,7 +1652,7 @@ func (d *decoder) parseToValue(v reflect.Value, tInfo *typeInfo) error { //nolin
return nil
}
if tInfo.nonPtrKind == reflect.Slice || tInfo.nonPtrKind == reflect.Array {
return fillByteString(t, b, !copied, v, ByteStringToStringForbidden, d.dm.binaryUnmarshaler)
return fillByteString(t, b, !copied, v, ByteStringToStringForbidden, d.dm.binaryUnmarshaler, d.dm.textUnmarshaler)
}
if bi.IsInt64() {
return fillNegativeInt(t, bi.Int64(), v)
@ -1788,12 +1842,12 @@ func (d *decoder) parseToTime() (time.Time, bool, error) {
// parseToUnmarshaler parses CBOR data to value implementing Unmarshaler interface.
// It assumes data is well-formed, and does not perform bounds checking.
func (d *decoder) parseToUnmarshaler(v reflect.Value) error {
if d.nextCBORNil() && v.Kind() == reflect.Ptr && v.IsNil() {
if d.nextCBORNil() && v.Kind() == reflect.Pointer && v.IsNil() {
d.skip()
return nil
}
if v.Kind() != reflect.Ptr && v.CanAddr() {
if v.Kind() != reflect.Pointer && v.CanAddr() {
v = v.Addr()
}
if u, ok := v.Interface().(Unmarshaler); ok {
@ -1805,9 +1859,55 @@ func (d *decoder) parseToUnmarshaler(v reflect.Value) error {
return errors.New("cbor: failed to assert " + v.Type().String() + " as cbor.Unmarshaler")
}
// parseToUnexportedUnmarshaler parses CBOR data to value implementing unmarshaler interface.
// It assumes data is well-formed, and does not perform bounds checking.
func (d *decoder) parseToUnexportedUnmarshaler(v reflect.Value) error {
if d.nextCBORNil() && v.Kind() == reflect.Pointer && v.IsNil() {
d.skip()
return nil
}
if v.Kind() != reflect.Pointer && v.CanAddr() {
v = v.Addr()
}
if u, ok := v.Interface().(unmarshaler); ok {
start := d.off
d.skip()
return u.unmarshalCBOR(d.data[start:d.off])
}
d.skip()
return errors.New("cbor: failed to assert " + v.Type().String() + " as cbor.unmarshaler")
}
// parseToJSONUnmarshaler parses CBOR data to be transcoded to JSON and passed to the value's
// implementation of the json.Unmarshaler interface. It assumes data is well-formed, and does not
// perform bounds checking.
func (d *decoder) parseToJSONUnmarshaler(v reflect.Value) error {
if d.nextCBORNil() && v.Kind() == reflect.Pointer && v.IsNil() {
d.skip()
return nil
}
if v.Kind() != reflect.Pointer && v.CanAddr() {
v = v.Addr()
}
if u, ok := v.Interface().(jsonUnmarshaler); ok {
start := d.off
d.skip()
e := getEncodeBuffer()
defer putEncodeBuffer(e)
if err := d.dm.jsonUnmarshalerTranscoder.Transcode(e, bytes.NewReader(d.data[start:d.off])); err != nil {
return &TranscodeError{err: err, rtype: v.Type(), sourceFormat: "cbor", targetFormat: "json"}
}
return u.UnmarshalJSON(e.Bytes())
}
d.skip()
return errors.New("cbor: failed to assert " + v.Type().String() + " as json.Unmarshaler")
}
// parse parses CBOR data and returns value in default Go type.
// It assumes data is well-formed, and does not perform bounds checking.
func (d *decoder) parse(skipSelfDescribedTag bool) (interface{}, error) { //nolint:gocyclo
func (d *decoder) parse(skipSelfDescribedTag bool) (any, error) { //nolint:gocyclo
// Strip self-described CBOR tag number.
if skipSelfDescribedTag {
for d.nextCBORType() == cborTypeTag {
@ -2224,15 +2324,15 @@ func (d *decoder) parseTextString() ([]byte, error) {
return b, nil
}
func (d *decoder) parseArray() ([]interface{}, error) {
func (d *decoder) parseArray() ([]any, error) {
_, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag()
hasSize := !indefiniteLength
count := int(val)
if !hasSize {
count = d.numOfItemsUntilBreak() // peek ahead to get array size to preallocate slice for better performance
}
v := make([]interface{}, count)
var e interface{}
v := make([]any, count)
var e any
var err, lastErr error
for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ {
if e, lastErr = d.parse(true); lastErr != nil {
@ -2290,20 +2390,19 @@ func (d *decoder) parseArrayToArray(v reflect.Value, tInfo *typeInfo) error {
}
// Set remaining Go array elements to zero values.
if gi < vLen {
zeroV := reflect.Zero(tInfo.elemTypeInfo.typ)
for ; gi < vLen; gi++ {
v.Index(gi).Set(zeroV)
v.Index(gi).SetZero()
}
}
return err
}
func (d *decoder) parseMap() (interface{}, error) {
func (d *decoder) parseMap() (any, error) {
_, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag()
hasSize := !indefiniteLength
count := int(val)
m := make(map[interface{}]interface{})
var k, e interface{}
m := make(map[any]any)
var k, e any
var err, lastErr error
keyCount := 0
for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ {
@ -2376,13 +2475,13 @@ func (d *decoder) parseMapToMap(v reflect.Value, tInfo *typeInfo) error { //noli
}
keyType, eleType := tInfo.keyTypeInfo.typ, tInfo.elemTypeInfo.typ
reuseKey, reuseEle := isImmutableKind(tInfo.keyTypeInfo.kind), isImmutableKind(tInfo.elemTypeInfo.kind)
var keyValue, eleValue, zeroKeyValue, zeroEleValue reflect.Value
var keyValue, eleValue reflect.Value
keyIsInterfaceType := keyType == typeIntf // If key type is interface{}, need to check if key value is hashable.
var err, lastErr error
keyCount := v.Len()
var existingKeys map[interface{}]bool // Store existing map keys, used for detecting duplicate map key.
var existingKeys map[any]bool // Store existing map keys, used for detecting duplicate map key.
if d.dm.dupMapKey == DupMapKeyEnforcedAPF {
existingKeys = make(map[interface{}]bool, keyCount)
existingKeys = make(map[any]bool, keyCount)
if keyCount > 0 {
vKeys := v.MapKeys()
for i := 0; i < len(vKeys); i++ {
@ -2395,10 +2494,7 @@ func (d *decoder) parseMapToMap(v reflect.Value, tInfo *typeInfo) error { //noli
if !keyValue.IsValid() {
keyValue = reflect.New(keyType).Elem()
} else if !reuseKey {
if !zeroKeyValue.IsValid() {
zeroKeyValue = reflect.Zero(keyType)
}
keyValue.Set(zeroKeyValue)
keyValue.SetZero()
}
if lastErr = d.parseToValue(keyValue, tInfo.keyTypeInfo); lastErr != nil {
if err == nil {
@ -2413,7 +2509,7 @@ func (d *decoder) parseMapToMap(v reflect.Value, tInfo *typeInfo) error { //noli
if !isHashableValue(keyValue.Elem()) {
var converted bool
if d.dm.mapKeyByteString == MapKeyByteStringAllowed {
var k interface{}
var k any
k, converted = convertByteSliceToByteString(keyValue.Elem().Interface())
if converted {
keyValue.Set(reflect.ValueOf(k))
@ -2433,10 +2529,7 @@ func (d *decoder) parseMapToMap(v reflect.Value, tInfo *typeInfo) error { //noli
if !eleValue.IsValid() {
eleValue = reflect.New(eleType).Elem()
} else if !reuseEle {
if !zeroEleValue.IsValid() {
zeroEleValue = reflect.Zero(eleType)
}
eleValue.Set(zeroEleValue)
eleValue.SetZero()
}
if lastErr := d.parseToValue(eleValue, tInfo.elemTypeInfo); lastErr != nil {
if err == nil {
@ -2584,7 +2677,7 @@ func (d *decoder) parseMapToStruct(v reflect.Value, tInfo *typeInfo) error { //n
// Keeps track of CBOR map keys to detect duplicate map key
keyCount := 0
var mapKeys map[interface{}]struct{}
var mapKeys map[any]struct{}
errOnUnknownField := (d.dm.extraReturnErrors & ExtraDecErrorUnknownField) > 0
@ -2594,7 +2687,7 @@ MapEntryLoop:
// If duplicate field detection is enabled and the key at index j did not match any
// field, k will hold the map key.
var k interface{}
var k any
t := d.nextCBORType()
if t == cborTypeTextString || (t == cborTypeByteString && d.dm.fieldNameByteString == FieldNameByteStringAllowed) {
@ -2764,7 +2857,7 @@ MapEntryLoop:
// check is never reached.
if d.dm.dupMapKey == DupMapKeyEnforcedAPF {
if mapKeys == nil {
mapKeys = make(map[interface{}]struct{}, 1)
mapKeys = make(map[any]struct{}, 1)
}
mapKeys[k] = struct{}{}
newKeyCount := len(mapKeys)
@ -2968,20 +3061,25 @@ func (d *decoder) nextCBORNil() bool {
return d.data[d.off] == 0xf6 || d.data[d.off] == 0xf7
}
type jsonUnmarshaler interface{ UnmarshalJSON([]byte) error }
var (
typeIntf = reflect.TypeOf([]interface{}(nil)).Elem()
typeTime = reflect.TypeOf(time.Time{})
typeBigInt = reflect.TypeOf(big.Int{})
typeUnmarshaler = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
typeBinaryUnmarshaler = reflect.TypeOf((*encoding.BinaryUnmarshaler)(nil)).Elem()
typeString = reflect.TypeOf("")
typeByteSlice = reflect.TypeOf([]byte(nil))
typeIntf = reflect.TypeOf([]any(nil)).Elem()
typeTime = reflect.TypeOf(time.Time{})
typeBigInt = reflect.TypeOf(big.Int{})
typeUnmarshaler = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
typeUnexportedUnmarshaler = reflect.TypeOf((*unmarshaler)(nil)).Elem()
typeBinaryUnmarshaler = reflect.TypeOf((*encoding.BinaryUnmarshaler)(nil)).Elem()
typeTextUnmarshaler = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
typeJSONUnmarshaler = reflect.TypeOf((*jsonUnmarshaler)(nil)).Elem()
typeString = reflect.TypeOf("")
typeByteSlice = reflect.TypeOf([]byte(nil))
)
func fillNil(_ cborType, v reflect.Value) error {
switch v.Kind() {
case reflect.Slice, reflect.Map, reflect.Interface, reflect.Ptr:
v.Set(reflect.Zero(v.Type()))
case reflect.Slice, reflect.Map, reflect.Interface, reflect.Pointer:
v.SetZero()
return nil
}
return nil
@ -3082,8 +3180,8 @@ func fillFloat(t cborType, val float64, v reflect.Value) error {
return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()}
}
func fillByteString(t cborType, val []byte, shared bool, v reflect.Value, bsts ByteStringToStringMode, bum BinaryUnmarshalerMode) error {
if bum == BinaryUnmarshalerByteString && reflect.PtrTo(v.Type()).Implements(typeBinaryUnmarshaler) {
func fillByteString(t cborType, val []byte, shared bool, v reflect.Value, bsts ByteStringToStringMode, bum BinaryUnmarshalerMode, tum TextUnmarshalerMode) error {
if bum == BinaryUnmarshalerByteString && reflect.PointerTo(v.Type()).Implements(typeBinaryUnmarshaler) {
if v.CanAddr() {
v = v.Addr()
if u, ok := v.Interface().(encoding.BinaryUnmarshaler); ok {
@ -3095,9 +3193,26 @@ func fillByteString(t cborType, val []byte, shared bool, v reflect.Value, bsts B
}
return errors.New("cbor: cannot set new value for " + v.Type().String())
}
if bsts != ByteStringToStringForbidden && v.Kind() == reflect.String {
v.SetString(string(val))
return nil
if bsts != ByteStringToStringForbidden {
if tum == TextUnmarshalerTextString && reflect.PointerTo(v.Type()).Implements(typeTextUnmarshaler) {
if v.CanAddr() {
v = v.Addr()
if u, ok := v.Interface().(encoding.TextUnmarshaler); ok {
// The contract of TextUnmarshaler forbids retaining the input
// bytes, so no copying is required even if val is shared.
if err := u.UnmarshalText(val); err != nil {
return fmt.Errorf("cbor: cannot unmarshal text for %s: %w", v.Type(), err)
}
return nil
}
}
return errors.New("cbor: cannot set new value for " + v.Type().String())
}
if v.Kind() == reflect.String {
v.SetString(string(val))
return nil
}
}
if v.Kind() == reflect.Slice && v.Type().Elem().Kind() == reflect.Uint8 {
src := val
@ -3117,9 +3232,8 @@ func fillByteString(t cborType, val []byte, shared bool, v reflect.Value, bsts B
}
// Set remaining Go array elements to zero values.
if i < vLen {
zeroV := reflect.Zero(reflect.TypeOf(byte(0)))
for ; i < vLen; i++ {
v.Index(i).Set(zeroV)
v.Index(i).SetZero()
}
}
return nil
@ -3127,11 +3241,28 @@ func fillByteString(t cborType, val []byte, shared bool, v reflect.Value, bsts B
return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()}
}
func fillTextString(t cborType, val []byte, v reflect.Value) error {
func fillTextString(t cborType, val []byte, v reflect.Value, tum TextUnmarshalerMode) error {
// Check if the value implements TextUnmarshaler and the mode allows it
if tum == TextUnmarshalerTextString && reflect.PointerTo(v.Type()).Implements(typeTextUnmarshaler) {
if v.CanAddr() {
v = v.Addr()
if u, ok := v.Interface().(encoding.TextUnmarshaler); ok {
// The contract of TextUnmarshaler forbids retaining the input
// bytes, so no copying is required even if val is shared.
if err := u.UnmarshalText(val); err != nil {
return fmt.Errorf("cbor: cannot unmarshal text for %s: %w", v.Type(), err)
}
return nil
}
}
return errors.New("cbor: cannot set new value for " + v.Type().String())
}
if v.Kind() == reflect.String {
v.SetString(string(val))
return nil
}
return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()}
}
@ -3172,7 +3303,7 @@ func isHashableValue(rv reflect.Value) bool {
// This function also handles nested tags.
// CBOR data is already verified to be well-formed before this function is used,
// so the recursion won't exceed max nested levels.
func convertByteSliceToByteString(v interface{}) (interface{}, bool) {
func convertByteSliceToByteString(v any) (any, bool) {
switch v := v.(type) {
case []byte:
return ByteString(v), true

View File

@ -2,15 +2,15 @@
// Licensed under the MIT License. See LICENSE in the project root for license information.
/*
Package cbor is a modern CBOR codec (RFC 8949 & RFC 7049) with CBOR tags,
Go struct tags (toarray/keyasint/omitempty), Core Deterministic Encoding,
Package cbor is a modern CBOR codec (RFC 8949 & RFC 8742) with CBOR tags,
Go struct tag options (toarray/keyasint/omitempty/omitzero), Core Deterministic Encoding,
CTAP2, Canonical CBOR, float64->32->16, and duplicate map key detection.
Encoding options allow "preferred serialization" by encoding integers and floats
to their smallest forms (e.g. float16) when values fit.
Struct tags like "keyasint", "toarray" and "omitempty" make CBOR data smaller
and easier to use with structs.
Struct tag options "keyasint", "toarray", "omitempty", and "omitzero" reduce encoding size
and reduce programming effort.
For example, "toarray" tag makes struct fields encode to CBOR array elements. And
"keyasint" makes a field encode to an element of CBOR map with specified int key.
@ -23,11 +23,19 @@ The Quick Start guide is at https://github.com/fxamacker/cbor#quick-start
Function signatures identical to encoding/json include:
Marshal, Unmarshal, NewEncoder, NewDecoder, (*Encoder).Encode, (*Decoder).Decode.
Marshal, Unmarshal, NewEncoder, NewDecoder, (*Encoder).Encode, (*Decoder).Decode
Standard interfaces include:
BinaryMarshaler, BinaryUnmarshaler, Marshaler, and Unmarshaler.
BinaryMarshaler, BinaryUnmarshaler, Marshaler, and Unmarshaler
Diagnostic functions translate CBOR data item into Diagnostic Notation:
Diagnose, DiagnoseFirst
Functions that simplify using CBOR Sequences (RFC 8742) include:
UnmarshalFirst
Custom encoding and decoding is possible by implementing standard interfaces for
user-defined Go types.
@ -50,19 +58,19 @@ Modes are intended to be reused and are safe for concurrent use.
EncMode and DecMode Interfaces
// EncMode interface uses immutable options and is safe for concurrent use.
type EncMode interface {
// EncMode interface uses immutable options and is safe for concurrent use.
type EncMode interface {
Marshal(v interface{}) ([]byte, error)
NewEncoder(w io.Writer) *Encoder
EncOptions() EncOptions // returns copy of options
}
}
// DecMode interface uses immutable options and is safe for concurrent use.
type DecMode interface {
// DecMode interface uses immutable options and is safe for concurrent use.
type DecMode interface {
Unmarshal(data []byte, v interface{}) error
NewDecoder(r io.Reader) *Decoder
DecOptions() DecOptions // returns copy of options
}
}
Using Default Encoding Mode
@ -78,6 +86,16 @@ Using Default Decoding Mode
decoder := cbor.NewDecoder(r)
err = decoder.Decode(&v)
Using Default Mode of UnmarshalFirst to Decode CBOR Sequences
// Decode the first CBOR data item and return remaining bytes:
rest, err = cbor.UnmarshalFirst(b, &v) // decode []byte b to v
Using Extended Diagnostic Notation (EDN) to represent CBOR data
// Translate the first CBOR data item into text and return remaining bytes.
text, rest, err = cbor.DiagnoseFirst(b) // decode []byte b to text
Creating and Using Encoding Modes
// Create EncOptions using either struct literal or a function.
@ -111,15 +129,20 @@ Decoding Options: https://github.com/fxamacker/cbor#decoding-options
Struct tags like `cbor:"name,omitempty"` and `json:"name,omitempty"` work as expected.
If both struct tags are specified then `cbor` is used.
Struct tags like "keyasint", "toarray", and "omitempty" make it easy to use
Struct tag options like "keyasint", "toarray", "omitempty", and "omitzero" make it easy to use
very compact formats like COSE and CWT (CBOR Web Tokens) with structs.
The "omitzero" option omits zero values from encoding, matching
[stdlib encoding/json behavior](https://pkg.go.dev/encoding/json#Marshal).
When specified in the `cbor` tag, the option is always honored.
When specified in the `json` tag, the option is honored when building with Go 1.24+.
For example, "toarray" makes struct fields encode to array elements. And "keyasint"
makes struct fields encode to elements of CBOR map with int keys.
https://raw.githubusercontent.com/fxamacker/images/master/cbor/v2.0.0/cbor_easy_api.png
Struct tags are listed at https://github.com/fxamacker/cbor#struct-tags-1
Struct tag options are listed at https://github.com/fxamacker/cbor#struct-tags-1
# Tests and Fuzzing

View File

@ -58,8 +58,10 @@ import (
//
// Marshal supports format string stored under the "cbor" key in the struct
// field's tag. CBOR format string can specify the name of the field,
// "omitempty" and "keyasint" options, and special case "-" for field omission.
// If "cbor" key is absent, Marshal uses "json" key.
// "omitempty", "omitzero" and "keyasint" options, and special case "-" for
// field omission. If "cbor" key is absent, Marshal uses "json" key.
// When using the "json" key, the "omitzero" option is honored when building
// with Go 1.24+ to match stdlib encoding/json behavior.
//
// Struct field name is treated as integer if it has "keyasint" option in
// its format string. The format string must specify an integer as its
@ -67,8 +69,8 @@ import (
//
// Special struct field "_" is used to specify struct level options, such as
// "toarray". "toarray" option enables Go struct to be encoded as CBOR array.
// "omitempty" is disabled by "toarray" to ensure that the same number
// of elements are encoded every time.
// "omitempty" and "omitzero" are disabled by "toarray" to ensure that the
// same number of elements are encoded every time.
//
// Anonymous struct fields are marshaled as if their exported fields
// were fields in the outer struct. Marshal follows the same struct fields
@ -92,7 +94,7 @@ import (
//
// Values of other types cannot be encoded in CBOR. Attempting
// to encode such a value causes Marshal to return an UnsupportedTypeError.
func Marshal(v interface{}) ([]byte, error) {
func Marshal(v any) ([]byte, error) {
return defaultEncMode.Marshal(v)
}
@ -103,7 +105,7 @@ func Marshal(v interface{}) ([]byte, error) {
// partially encoded data if error is returned.
//
// See Marshal for more details.
func MarshalToBuffer(v interface{}, buf *bytes.Buffer) error {
func MarshalToBuffer(v any, buf *bytes.Buffer) error {
return defaultEncMode.MarshalToBuffer(v, buf)
}
@ -130,6 +132,20 @@ func (e *MarshalerError) Unwrap() error {
return e.err
}
type TranscodeError struct {
err error
rtype reflect.Type
sourceFormat, targetFormat string
}
func (e TranscodeError) Error() string {
return "cbor: cannot transcode from " + e.sourceFormat + " to " + e.targetFormat + ": " + e.err.Error()
}
func (e TranscodeError) Unwrap() error {
return e.err
}
// UnsupportedTypeError is returned by Marshal when attempting to encode value
// of an unsupported type.
type UnsupportedTypeError struct {
@ -291,24 +307,51 @@ func (icm InfConvertMode) valid() bool {
return icm >= 0 && icm < maxInfConvert
}
// TimeMode specifies how to encode time.Time values.
// TimeMode specifies how to encode time.Time values in compliance with RFC 8949 (CBOR):
// - Section 3.4.1: Standard Date/Time String
// - Section 3.4.2: Epoch-Based Date/Time
// For more info, see:
// - https://www.rfc-editor.org/rfc/rfc8949.html
// NOTE: User applications that prefer to encode time with fractional seconds to an integer
// (instead of floating point or text string) can use a CBOR tag number not assigned by IANA:
// 1. Define a user-defined type in Go with just a time.Time or int64 as its data.
// 2. Implement the cbor.Marshaler and cbor.Unmarshaler interface for that user-defined type
// to encode or decode the tagged data item with an enclosed integer content.
type TimeMode int
const (
// TimeUnix causes time.Time to be encoded as epoch time in integer with second precision.
// TimeUnix causes time.Time to encode to a CBOR time (tag 1) with an integer content
// representing seconds elapsed (with 1-second precision) since UNIX Epoch UTC.
// The TimeUnix option is location independent and has a clear precision guarantee.
TimeUnix TimeMode = iota
// TimeUnixMicro causes time.Time to be encoded as epoch time in float-point rounded to microsecond precision.
// TimeUnixMicro causes time.Time to encode to a CBOR time (tag 1) with a floating point content
// representing seconds elapsed (with up to 1-microsecond precision) since UNIX Epoch UTC.
// NOTE: The floating point content is encoded to the shortest floating-point encoding that preserves
// the 64-bit floating point value. I.e., the floating point encoding can be IEEE 764:
// binary64, binary32, or binary16 depending on the content's value.
TimeUnixMicro
// TimeUnixDynamic causes time.Time to be encoded as integer if time.Time doesn't have fractional seconds,
// otherwise float-point rounded to microsecond precision.
// TimeUnixDynamic causes time.Time to encode to a CBOR time (tag 1) with either an integer content or
// a floating point content, depending on the content's value. This option is equivalent to dynamically
// choosing TimeUnix if time.Time doesn't have fractional seconds, and using TimeUnixMicro if time.Time
// has fractional seconds.
TimeUnixDynamic
// TimeRFC3339 causes time.Time to be encoded as RFC3339 formatted string with second precision.
// TimeRFC3339 causes time.Time to encode to a CBOR time (tag 0) with a text string content
// representing the time using 1-second precision in RFC3339 format. If the time.Time has a
// non-UTC timezone then a "localtime - UTC" numeric offset will be included as specified in RFC3339.
// NOTE: User applications can avoid including the RFC3339 numeric offset by:
// - providing a time.Time value set to UTC, or
// - using the TimeUnix, TimeUnixMicro, or TimeUnixDynamic option instead of TimeRFC3339.
TimeRFC3339
// TimeRFC3339Nano causes time.Time to be encoded as RFC3339 formatted string with nanosecond precision.
// TimeRFC3339Nano causes time.Time to encode to a CBOR time (tag 0) with a text string content
// representing the time using 1-nanosecond precision in RFC3339 format. If the time.Time has a
// non-UTC timezone then a "localtime - UTC" numeric offset will be included as specified in RFC3339.
// NOTE: User applications can avoid including the RFC3339 numeric offset by:
// - providing a time.Time value set to UTC, or
// - using the TimeUnix, TimeUnixMicro, or TimeUnixDynamic option instead of TimeRFC3339Nano.
TimeRFC3339Nano
maxTimeMode
@ -481,6 +524,24 @@ func (bmm BinaryMarshalerMode) valid() bool {
return bmm >= 0 && bmm < maxBinaryMarshalerMode
}
// TextMarshalerMode specifies how to encode types that implement encoding.TextMarshaler.
type TextMarshalerMode int
const (
// TextMarshalerNone does not recognize TextMarshaler implementations during encode.
// This is the default behavior.
TextMarshalerNone TextMarshalerMode = iota
// TextMarshalerTextString encodes the output of MarshalText to a CBOR text string.
TextMarshalerTextString
maxTextMarshalerMode
)
func (tmm TextMarshalerMode) valid() bool {
return tmm >= 0 && tmm < maxTextMarshalerMode
}
// EncOptions specifies encoding options.
type EncOptions struct {
// Sort specifies sorting order.
@ -538,6 +599,14 @@ type EncOptions struct {
// BinaryMarshaler specifies how to encode types that implement encoding.BinaryMarshaler.
BinaryMarshaler BinaryMarshalerMode
// TextMarshaler specifies how to encode types that implement encoding.TextMarshaler.
TextMarshaler TextMarshalerMode
// JSONMarshalerTranscoder sets the transcoding scheme used to marshal types that implement
// json.Marshaler but do not also implement cbor.Marshaler. If nil, encoding behavior is not
// influenced by whether or not a type implements json.Marshaler.
JSONMarshalerTranscoder Transcoder
}
// CanonicalEncOptions returns EncOptions for "Canonical CBOR" encoding,
@ -748,6 +817,9 @@ func (opts EncOptions) encMode() (*encMode, error) { //nolint:gocritic // ignore
if !opts.BinaryMarshaler.valid() {
return nil, errors.New("cbor: invalid BinaryMarshaler " + strconv.Itoa(int(opts.BinaryMarshaler)))
}
if !opts.TextMarshaler.valid() {
return nil, errors.New("cbor: invalid TextMarshaler " + strconv.Itoa(int(opts.TextMarshaler)))
}
em := encMode{
sort: opts.Sort,
shortestFloat: opts.ShortestFloat,
@ -767,13 +839,15 @@ func (opts EncOptions) encMode() (*encMode, error) { //nolint:gocritic // ignore
byteSliceLaterEncodingTag: byteSliceLaterEncodingTag,
byteArray: opts.ByteArray,
binaryMarshaler: opts.BinaryMarshaler,
textMarshaler: opts.TextMarshaler,
jsonMarshalerTranscoder: opts.JSONMarshalerTranscoder,
}
return &em, nil
}
// EncMode is the main interface for CBOR encoding.
type EncMode interface {
Marshal(v interface{}) ([]byte, error)
Marshal(v any) ([]byte, error)
NewEncoder(w io.Writer) *Encoder
EncOptions() EncOptions
}
@ -783,7 +857,7 @@ type EncMode interface {
// into the built-in buffer pool.
type UserBufferEncMode interface {
EncMode
MarshalToBuffer(v interface{}, buf *bytes.Buffer) error
MarshalToBuffer(v any, buf *bytes.Buffer) error
// This private method is to prevent users implementing
// this interface and so future additions to it will
@ -812,6 +886,8 @@ type encMode struct {
byteSliceLaterEncodingTag uint64
byteArray ByteArrayMode
binaryMarshaler BinaryMarshalerMode
textMarshaler TextMarshalerMode
jsonMarshalerTranscoder Transcoder
}
var defaultEncMode, _ = EncOptions{}.encMode()
@ -888,22 +964,24 @@ func getMarshalerDecMode(indefLength IndefLengthMode, tagsMd TagsMode) *decMode
// EncOptions returns user specified options used to create this EncMode.
func (em *encMode) EncOptions() EncOptions {
return EncOptions{
Sort: em.sort,
ShortestFloat: em.shortestFloat,
NaNConvert: em.nanConvert,
InfConvert: em.infConvert,
BigIntConvert: em.bigIntConvert,
Time: em.time,
TimeTag: em.timeTag,
IndefLength: em.indefLength,
NilContainers: em.nilContainers,
TagsMd: em.tagsMd,
OmitEmpty: em.omitEmpty,
String: em.stringType,
FieldName: em.fieldName,
ByteSliceLaterFormat: em.byteSliceLaterFormat,
ByteArray: em.byteArray,
BinaryMarshaler: em.binaryMarshaler,
Sort: em.sort,
ShortestFloat: em.shortestFloat,
NaNConvert: em.nanConvert,
InfConvert: em.infConvert,
BigIntConvert: em.bigIntConvert,
Time: em.time,
TimeTag: em.timeTag,
IndefLength: em.indefLength,
NilContainers: em.nilContainers,
TagsMd: em.tagsMd,
OmitEmpty: em.omitEmpty,
String: em.stringType,
FieldName: em.fieldName,
ByteSliceLaterFormat: em.byteSliceLaterFormat,
ByteArray: em.byteArray,
BinaryMarshaler: em.binaryMarshaler,
TextMarshaler: em.textMarshaler,
JSONMarshalerTranscoder: em.jsonMarshalerTranscoder,
}
}
@ -921,7 +999,7 @@ func (em *encMode) encTagBytes(t reflect.Type) []byte {
// Marshal returns the CBOR encoding of v using em encoding mode.
//
// See the documentation for Marshal for details.
func (em *encMode) Marshal(v interface{}) ([]byte, error) {
func (em *encMode) Marshal(v any) ([]byte, error) {
e := getEncodeBuffer()
if err := encode(e, em, reflect.ValueOf(v)); err != nil {
@ -943,7 +1021,7 @@ func (em *encMode) Marshal(v interface{}) ([]byte, error) {
// partially encoded data if error is returned.
//
// See Marshal for more details.
func (em *encMode) MarshalToBuffer(v interface{}, buf *bytes.Buffer) error {
func (em *encMode) MarshalToBuffer(v any, buf *bytes.Buffer) error {
if buf == nil {
return fmt.Errorf("cbor: encoding buffer provided by user is nil")
}
@ -957,7 +1035,7 @@ func (em *encMode) NewEncoder(w io.Writer) *Encoder {
// encodeBufferPool caches unused bytes.Buffer objects for later reuse.
var encodeBufferPool = sync.Pool{
New: func() interface{} {
New: func() any {
e := new(bytes.Buffer)
e.Grow(32) // TODO: make this configurable
return e
@ -975,6 +1053,7 @@ func putEncodeBuffer(e *bytes.Buffer) {
type encodeFunc func(e *bytes.Buffer, em *encMode, v reflect.Value) error
type isEmptyFunc func(em *encMode, v reflect.Value) (empty bool, err error)
type isZeroFunc func(v reflect.Value) (zero bool, err error)
func encode(e *bytes.Buffer, em *encMode, v reflect.Value) error {
if !v.IsValid() {
@ -983,7 +1062,7 @@ func encode(e *bytes.Buffer, em *encMode, v reflect.Value) error {
return nil
}
vt := v.Type()
f, _ := getEncodeFunc(vt)
f, _, _ := getEncodeFunc(vt)
if f == nil {
return &UnsupportedTypeError{vt}
}
@ -1483,6 +1562,15 @@ func encodeStruct(e *bytes.Buffer, em *encMode, v reflect.Value) (err error) {
continue
}
}
if f.omitZero {
zero, err := f.izf(fv)
if err != nil {
return err
}
if zero {
continue
}
}
if !f.keyAsInt && em.fieldName == FieldNameToByteString {
e.Write(f.cborNameByteString)
@ -1665,6 +1753,107 @@ func (bme binaryMarshalerEncoder) isEmpty(em *encMode, v reflect.Value) (bool, e
return len(data) == 0, nil
}
type textMarshalerEncoder struct {
alternateEncode encodeFunc
alternateIsEmpty isEmptyFunc
}
func (tme textMarshalerEncoder) encode(e *bytes.Buffer, em *encMode, v reflect.Value) error {
if em.textMarshaler == TextMarshalerNone {
return tme.alternateEncode(e, em, v)
}
vt := v.Type()
m, ok := v.Interface().(encoding.TextMarshaler)
if !ok {
pv := reflect.New(vt)
pv.Elem().Set(v)
m = pv.Interface().(encoding.TextMarshaler)
}
data, err := m.MarshalText()
if err != nil {
return fmt.Errorf("cbor: cannot marshal text for %s: %w", vt, err)
}
if b := em.encTagBytes(vt); b != nil {
e.Write(b)
}
encodeHead(e, byte(cborTypeTextString), uint64(len(data)))
e.Write(data)
return nil
}
func (tme textMarshalerEncoder) isEmpty(em *encMode, v reflect.Value) (bool, error) {
if em.textMarshaler == TextMarshalerNone {
return tme.alternateIsEmpty(em, v)
}
m, ok := v.Interface().(encoding.TextMarshaler)
if !ok {
pv := reflect.New(v.Type())
pv.Elem().Set(v)
m = pv.Interface().(encoding.TextMarshaler)
}
data, err := m.MarshalText()
if err != nil {
return false, fmt.Errorf("cbor: cannot marshal text for %s: %w", v.Type(), err)
}
return len(data) == 0, nil
}
type jsonMarshalerEncoder struct {
alternateEncode encodeFunc
alternateIsEmpty isEmptyFunc
}
func (jme jsonMarshalerEncoder) encode(e *bytes.Buffer, em *encMode, v reflect.Value) error {
if em.jsonMarshalerTranscoder == nil {
return jme.alternateEncode(e, em, v)
}
vt := v.Type()
m, ok := v.Interface().(jsonMarshaler)
if !ok {
pv := reflect.New(vt)
pv.Elem().Set(v)
m = pv.Interface().(jsonMarshaler)
}
json, err := m.MarshalJSON()
if err != nil {
return err
}
offset := e.Len()
if b := em.encTagBytes(vt); b != nil {
e.Write(b)
}
if err := em.jsonMarshalerTranscoder.Transcode(e, bytes.NewReader(json)); err != nil {
return &TranscodeError{err: err, rtype: vt, sourceFormat: "json", targetFormat: "cbor"}
}
// Validate that the transcode function has written exactly one well-formed data item.
d := decoder{data: e.Bytes()[offset:], dm: getMarshalerDecMode(em.indefLength, em.tagsMd)}
if err := d.wellformed(false, true); err != nil {
e.Truncate(offset)
return &TranscodeError{err: err, rtype: vt, sourceFormat: "json", targetFormat: "cbor"}
}
return nil
}
func (jme jsonMarshalerEncoder) isEmpty(em *encMode, v reflect.Value) (bool, error) {
if em.jsonMarshalerTranscoder == nil {
return jme.alternateIsEmpty(em, v)
}
// As with types implementing cbor.Marshaler, transcoded json.Marshaler values always encode
// as exactly one complete CBOR data item.
return false, nil
}
func encodeMarshalerType(e *bytes.Buffer, em *encMode, v reflect.Value) error {
if em.tagsMd == TagsForbidden && v.Type() == typeRawTag {
return errors.New("cbor: cannot encode cbor.RawTag when TagsMd is TagsForbidden")
@ -1768,41 +1957,45 @@ func encodeHead(e *bytes.Buffer, t byte, n uint64) int {
return headSize
}
type jsonMarshaler interface{ MarshalJSON() ([]byte, error) }
var (
typeMarshaler = reflect.TypeOf((*Marshaler)(nil)).Elem()
typeBinaryMarshaler = reflect.TypeOf((*encoding.BinaryMarshaler)(nil)).Elem()
typeTextMarshaler = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
typeJSONMarshaler = reflect.TypeOf((*jsonMarshaler)(nil)).Elem()
typeRawMessage = reflect.TypeOf(RawMessage(nil))
typeByteString = reflect.TypeOf(ByteString(""))
)
func getEncodeFuncInternal(t reflect.Type) (ef encodeFunc, ief isEmptyFunc) {
func getEncodeFuncInternal(t reflect.Type) (ef encodeFunc, ief isEmptyFunc, izf isZeroFunc) {
k := t.Kind()
if k == reflect.Ptr {
return getEncodeIndirectValueFunc(t), isEmptyPtr
if k == reflect.Pointer {
return getEncodeIndirectValueFunc(t), isEmptyPtr, getIsZeroFunc(t)
}
switch t {
case typeSimpleValue:
return encodeMarshalerType, isEmptyUint
return encodeMarshalerType, isEmptyUint, getIsZeroFunc(t)
case typeTag:
return encodeTag, alwaysNotEmpty
return encodeTag, alwaysNotEmpty, getIsZeroFunc(t)
case typeTime:
return encodeTime, alwaysNotEmpty
return encodeTime, alwaysNotEmpty, getIsZeroFunc(t)
case typeBigInt:
return encodeBigInt, alwaysNotEmpty
return encodeBigInt, alwaysNotEmpty, getIsZeroFunc(t)
case typeRawMessage:
return encodeMarshalerType, isEmptySlice
return encodeMarshalerType, isEmptySlice, getIsZeroFunc(t)
case typeByteString:
return encodeMarshalerType, isEmptyString
return encodeMarshalerType, isEmptyString, getIsZeroFunc(t)
}
if reflect.PtrTo(t).Implements(typeMarshaler) {
return encodeMarshalerType, alwaysNotEmpty
if reflect.PointerTo(t).Implements(typeMarshaler) {
return encodeMarshalerType, alwaysNotEmpty, getIsZeroFunc(t)
}
if reflect.PtrTo(t).Implements(typeBinaryMarshaler) {
if reflect.PointerTo(t).Implements(typeBinaryMarshaler) {
defer func() {
// capture encoding method used for modes that disable BinaryMarshaler
bme := binaryMarshalerEncoder{
@ -1813,41 +2006,65 @@ func getEncodeFuncInternal(t reflect.Type) (ef encodeFunc, ief isEmptyFunc) {
ief = bme.isEmpty
}()
}
if reflect.PointerTo(t).Implements(typeTextMarshaler) {
defer func() {
// capture encoding method used for modes that disable TextMarshaler
tme := textMarshalerEncoder{
alternateEncode: ef,
alternateIsEmpty: ief,
}
ef = tme.encode
ief = tme.isEmpty
}()
}
if reflect.PointerTo(t).Implements(typeJSONMarshaler) {
defer func() {
// capture encoding method used for modes that don't support transcoding
// from types that implement json.Marshaler.
jme := jsonMarshalerEncoder{
alternateEncode: ef,
alternateIsEmpty: ief,
}
ef = jme.encode
ief = jme.isEmpty
}()
}
switch k {
case reflect.Bool:
return encodeBool, isEmptyBool
return encodeBool, isEmptyBool, getIsZeroFunc(t)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return encodeInt, isEmptyInt
return encodeInt, isEmptyInt, getIsZeroFunc(t)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return encodeUint, isEmptyUint
return encodeUint, isEmptyUint, getIsZeroFunc(t)
case reflect.Float32, reflect.Float64:
return encodeFloat, isEmptyFloat
return encodeFloat, isEmptyFloat, getIsZeroFunc(t)
case reflect.String:
return encodeString, isEmptyString
return encodeString, isEmptyString, getIsZeroFunc(t)
case reflect.Slice:
if t.Elem().Kind() == reflect.Uint8 {
return encodeByteString, isEmptySlice
return encodeByteString, isEmptySlice, getIsZeroFunc(t)
}
fallthrough
case reflect.Array:
f, _ := getEncodeFunc(t.Elem())
f, _, _ := getEncodeFunc(t.Elem())
if f == nil {
return nil, nil
return nil, nil, nil
}
return arrayEncodeFunc{f: f}.encode, isEmptySlice
return arrayEncodeFunc{f: f}.encode, isEmptySlice, getIsZeroFunc(t)
case reflect.Map:
f := getEncodeMapFunc(t)
if f == nil {
return nil, nil
return nil, nil, nil
}
return f, isEmptyMap
return f, isEmptyMap, getIsZeroFunc(t)
case reflect.Struct:
// Get struct's special field "_" tag options
@ -1855,31 +2072,31 @@ func getEncodeFuncInternal(t reflect.Type) (ef encodeFunc, ief isEmptyFunc) {
tag := f.Tag.Get("cbor")
if tag != "-" {
if hasToArrayOption(tag) {
return encodeStructToArray, isEmptyStruct
return encodeStructToArray, isEmptyStruct, isZeroFieldStruct
}
}
}
return encodeStruct, isEmptyStruct
return encodeStruct, isEmptyStruct, getIsZeroFunc(t)
case reflect.Interface:
return encodeIntf, isEmptyIntf
return encodeIntf, isEmptyIntf, getIsZeroFunc(t)
}
return nil, nil
return nil, nil, nil
}
func getEncodeIndirectValueFunc(t reflect.Type) encodeFunc {
for t.Kind() == reflect.Ptr {
for t.Kind() == reflect.Pointer {
t = t.Elem()
}
f, _ := getEncodeFunc(t)
f, _, _ := getEncodeFunc(t)
if f == nil {
return nil
}
return func(e *bytes.Buffer, em *encMode, v reflect.Value) error {
for v.Kind() == reflect.Ptr && !v.IsNil() {
for v.Kind() == reflect.Pointer && !v.IsNil() {
v = v.Elem()
}
if v.Kind() == reflect.Ptr && v.IsNil() {
if v.Kind() == reflect.Pointer && v.IsNil() {
e.Write(cborNil)
return nil
}
@ -1987,3 +2204,96 @@ func float32NaNFromReflectValue(v reflect.Value) float32 {
f32 := p.Convert(reflect.TypeOf((*float32)(nil))).Elem().Interface().(float32)
return f32
}
type isZeroer interface {
IsZero() bool
}
var isZeroerType = reflect.TypeOf((*isZeroer)(nil)).Elem()
// getIsZeroFunc returns a function for the given type that can be called to determine if a given value is zero.
// Types that implement `IsZero() bool` are delegated to for non-nil values.
// Types that do not implement `IsZero() bool` use the reflect.Value#IsZero() implementation.
// The returned function matches behavior of stdlib encoding/json behavior in Go 1.24+.
func getIsZeroFunc(t reflect.Type) isZeroFunc {
// Provide a function that uses a type's IsZero method if defined.
switch {
case t == nil:
return isZeroDefault
case t.Kind() == reflect.Interface && t.Implements(isZeroerType):
return isZeroInterfaceCustom
case t.Kind() == reflect.Pointer && t.Implements(isZeroerType):
return isZeroPointerCustom
case t.Implements(isZeroerType):
return isZeroCustom
case reflect.PointerTo(t).Implements(isZeroerType):
return isZeroAddrCustom
default:
return isZeroDefault
}
}
// isZeroInterfaceCustom returns true for nil or pointer-to-nil values,
// and delegates to the custom IsZero() implementation otherwise.
func isZeroInterfaceCustom(v reflect.Value) (bool, error) {
kind := v.Kind()
switch kind {
case reflect.Chan, reflect.Func, reflect.Map, reflect.Pointer, reflect.Interface, reflect.Slice:
if v.IsNil() {
return true, nil
}
}
switch kind {
case reflect.Interface, reflect.Pointer:
if elem := v.Elem(); elem.Kind() == reflect.Pointer && elem.IsNil() {
return true, nil
}
}
return v.Interface().(isZeroer).IsZero(), nil
}
// isZeroPointerCustom returns true for nil values,
// and delegates to the custom IsZero() implementation otherwise.
func isZeroPointerCustom(v reflect.Value) (bool, error) {
if v.IsNil() {
return true, nil
}
return v.Interface().(isZeroer).IsZero(), nil
}
// isZeroCustom delegates to the custom IsZero() implementation.
func isZeroCustom(v reflect.Value) (bool, error) {
return v.Interface().(isZeroer).IsZero(), nil
}
// isZeroAddrCustom delegates to the custom IsZero() implementation of the addr of the value.
func isZeroAddrCustom(v reflect.Value) (bool, error) {
if !v.CanAddr() {
// Temporarily box v so we can take the address.
v2 := reflect.New(v.Type()).Elem()
v2.Set(v)
v = v2
}
return v.Addr().Interface().(isZeroer).IsZero(), nil
}
// isZeroDefault calls reflect.Value#IsZero()
func isZeroDefault(v reflect.Value) (bool, error) {
if !v.IsValid() {
// v is zero value
return true, nil
}
return v.IsZero(), nil
}
// isZeroFieldStruct is used to determine whether to omit toarray structs
func isZeroFieldStruct(v reflect.Value) (bool, error) {
structType, err := getEncodingStructType(v.Type())
if err != nil {
return false, err
}
return len(structType.fields) == 0, nil
}

View File

@ -1,8 +1,6 @@
// Copyright (c) Faye Amacker. All rights reserved.
// Licensed under the MIT License. See LICENSE in the project root for license information.
//go:build go1.20
package cbor
import (
@ -67,8 +65,8 @@ func (me *mapKeyValueEncodeFunc) encodeKeyValues(e *bytes.Buffer, em *encMode, v
}
func getEncodeMapFunc(t reflect.Type) encodeFunc {
kf, _ := getEncodeFunc(t.Key())
ef, _ := getEncodeFunc(t.Elem())
kf, _, _ := getEncodeFunc(t.Key())
ef, _, _ := getEncodeFunc(t.Elem())
if kf == nil || ef == nil {
return nil
}
@ -76,13 +74,13 @@ func getEncodeMapFunc(t reflect.Type) encodeFunc {
kf: kf,
ef: ef,
kpool: sync.Pool{
New: func() interface{} {
New: func() any {
rk := reflect.New(t.Key()).Elem()
return &rk
},
},
vpool: sync.Pool{
New: func() interface{} {
New: func() any {
rv := reflect.New(t.Elem()).Elem()
return &rv
},

View File

@ -1,60 +0,0 @@
// Copyright (c) Faye Amacker. All rights reserved.
// Licensed under the MIT License. See LICENSE in the project root for license information.
//go:build !go1.20
package cbor
import (
"bytes"
"reflect"
)
type mapKeyValueEncodeFunc struct {
kf, ef encodeFunc
}
func (me *mapKeyValueEncodeFunc) encodeKeyValues(e *bytes.Buffer, em *encMode, v reflect.Value, kvs []keyValue) error {
if kvs == nil {
for i, iter := 0, v.MapRange(); iter.Next(); i++ {
if err := me.kf(e, em, iter.Key()); err != nil {
return err
}
if err := me.ef(e, em, iter.Value()); err != nil {
return err
}
}
return nil
}
initial := e.Len()
for i, iter := 0, v.MapRange(); iter.Next(); i++ {
offset := e.Len()
if err := me.kf(e, em, iter.Key()); err != nil {
return err
}
valueOffset := e.Len()
if err := me.ef(e, em, iter.Value()); err != nil {
return err
}
kvs[i] = keyValue{
offset: offset - initial,
valueOffset: valueOffset - initial,
nextOffset: e.Len() - initial,
}
}
return nil
}
func getEncodeMapFunc(t reflect.Type) encodeFunc {
kf, _ := getEncodeFunc(t.Key())
ef, _ := getEncodeFunc(t.Elem())
if kf == nil || ef == nil {
return nil
}
mkv := &mapKeyValueEncodeFunc{kf: kf, ef: ef}
return mapEncodeFunc{
e: mkv.encodeKeyValues,
}.encode
}

View File

@ -0,0 +1,8 @@
// Copyright (c) Faye Amacker. All rights reserved.
// Licensed under the MIT License. See LICENSE in the project root for license information.
//go:build go1.24
package cbor
var jsonStdlibSupportsOmitzero = true

View File

@ -0,0 +1,8 @@
// Copyright (c) Faye Amacker. All rights reserved.
// Licensed under the MIT License. See LICENSE in the project root for license information.
//go:build !go1.24
package cbor
var jsonStdlibSupportsOmitzero = false

View File

@ -1,3 +1,6 @@
// Copyright (c) Faye Amacker. All rights reserved.
// Licensed under the MIT License. See LICENSE in the project root for license information.
package cbor
import (
@ -45,6 +48,9 @@ func (sv SimpleValue) MarshalCBOR() ([]byte, error) {
}
// UnmarshalCBOR decodes CBOR simple value (major type 7) to SimpleValue.
//
// Deprecated: No longer used by this codec; kept for compatibility
// with user apps that directly call this function.
func (sv *SimpleValue) UnmarshalCBOR(data []byte) error {
if sv == nil {
return errors.New("cbor.SimpleValue: UnmarshalCBOR on nil pointer")
@ -52,6 +58,29 @@ func (sv *SimpleValue) UnmarshalCBOR(data []byte) error {
d := decoder{data: data, dm: defaultDecMode}
// Check well-formedness of CBOR data item.
// SimpleValue.UnmarshalCBOR() is exported, so
// the codec needs to support same behavior for:
// - Unmarshal(data, *SimpleValue)
// - SimpleValue.UnmarshalCBOR(data)
err := d.wellformed(false, false)
if err != nil {
return err
}
return sv.unmarshalCBOR(data)
}
// unmarshalCBOR decodes CBOR simple value (major type 7) to SimpleValue.
// This function assumes data is well-formed, and does not perform bounds checking.
// This function is called by Unmarshal().
func (sv *SimpleValue) unmarshalCBOR(data []byte) error {
if sv == nil {
return errors.New("cbor.SimpleValue: UnmarshalCBOR on nil pointer")
}
d := decoder{data: data, dm: defaultDecMode}
typ, ai, val := d.getHead()
if typ != cborTypePrimitives {

View File

@ -26,7 +26,7 @@ func NewDecoder(r io.Reader) *Decoder {
}
// Decode reads CBOR value and decodes it into the value pointed to by v.
func (dec *Decoder) Decode(v interface{}) error {
func (dec *Decoder) Decode(v any) error {
_, err := dec.readNext()
if err != nil {
// Return validation error or read error.
@ -170,7 +170,7 @@ func NewEncoder(w io.Writer) *Encoder {
}
// Encode writes the CBOR encoding of v.
func (enc *Encoder) Encode(v interface{}) error {
func (enc *Encoder) Encode(v any) error {
if len(enc.indefTypes) > 0 && v != nil {
indefType := enc.indefTypes[len(enc.indefTypes)-1]
if indefType == cborTypeTextString {

View File

@ -18,9 +18,11 @@ type field struct {
typ reflect.Type
ef encodeFunc
ief isEmptyFunc
izf isZeroFunc
typInfo *typeInfo // used to decoder to reuse type info
tagged bool // used to choose dominant field (at the same level tagged fields dominate untagged fields)
omitEmpty bool // used to skip empty field
omitZero bool // used to skip zero field
keyAsInt bool // used to encode/decode field name as int
}
@ -157,7 +159,7 @@ func appendFields(
f := t.Field(i)
ft := f.Type
for ft.Kind() == reflect.Ptr {
for ft.Kind() == reflect.Pointer {
ft = ft.Elem()
}
@ -165,9 +167,11 @@ func appendFields(
continue
}
cborTag := true
tag := f.Tag.Get("cbor")
if tag == "" {
tag = f.Tag.Get("json")
cborTag = false
}
if tag == "-" {
continue
@ -177,7 +181,7 @@ func appendFields(
// Parse field tag options
var tagFieldName string
var omitempty, keyasint bool
var omitempty, omitzero, keyasint bool
for j := 0; tag != ""; j++ {
var token string
idx := strings.IndexByte(tag, ',')
@ -192,6 +196,10 @@ func appendFields(
switch token {
case "omitempty":
omitempty = true
case "omitzero":
if cborTag || jsonStdlibSupportsOmitzero {
omitzero = true
}
case "keyasint":
keyasint = true
}
@ -213,6 +221,7 @@ func appendFields(
idx: fIdx,
typ: f.Type,
omitEmpty: omitempty,
omitZero: omitzero,
keyAsInt: keyasint,
tagged: tagged})
} else {
@ -230,8 +239,7 @@ func appendFields(
// a nonexportable anonymous field of struct type.
// Nonexportable anonymous field of struct type can contain exportable fields.
func isFieldExportable(f reflect.StructField, fk reflect.Kind) bool { //nolint:gocritic // ignore hugeParam
exportable := f.PkgPath == ""
return exportable || (f.Anonymous && fk == reflect.Struct)
return f.IsExported() || (f.Anonymous && fk == reflect.Struct)
}
type embeddedFieldNullPtrFunc func(reflect.Value) (reflect.Value, error)
@ -244,7 +252,7 @@ func getFieldValue(v reflect.Value, idx []int, f embeddedFieldNullPtrFunc) (fv r
fv = fv.Field(n)
if i < len(idx)-1 {
if fv.Kind() == reflect.Ptr && fv.Type().Elem().Kind() == reflect.Struct {
if fv.Kind() == reflect.Pointer && fv.Type().Elem().Kind() == reflect.Struct {
if fv.IsNil() {
// Null pointer to embedded struct field
fv, err = f(fv)

View File

@ -1,3 +1,6 @@
// Copyright (c) Faye Amacker. All rights reserved.
// Licensed under the MIT License. See LICENSE in the project root for license information.
package cbor
import (
@ -7,27 +10,54 @@ import (
"sync"
)
// Tag represents CBOR tag data, including tag number and unmarshaled tag content. Marshaling and
// unmarshaling of tag content is subject to any encode and decode options that would apply to
// enclosed data item if it were to appear outside of a tag.
// Tag represents a tagged data item (CBOR major type 6), comprising a tag number and the unmarshaled tag content.
// NOTE: The same encoding and decoding options that apply to untagged CBOR data items also applies to tag content
// during encoding and decoding.
type Tag struct {
Number uint64
Content interface{}
Content any
}
// RawTag represents CBOR tag data, including tag number and raw tag content.
// RawTag implements Unmarshaler and Marshaler interfaces.
// RawTag represents a tagged data item (CBOR major type 6), comprising a tag number and the raw tag content.
// The raw tag content (enclosed data item) is a CBOR-encoded data item.
// RawTag can be used to delay decoding a CBOR data item or precompute encoding a CBOR data item.
type RawTag struct {
Number uint64
Content RawMessage
}
// UnmarshalCBOR sets *t with tag number and raw tag content copied from data.
// UnmarshalCBOR sets *t with the tag number and the raw tag content copied from data.
//
// Deprecated: No longer used by this codec; kept for compatibility
// with user apps that directly call this function.
func (t *RawTag) UnmarshalCBOR(data []byte) error {
if t == nil {
return errors.New("cbor.RawTag: UnmarshalCBOR on nil pointer")
}
d := decoder{data: data, dm: defaultDecMode}
// Check if data is a well-formed CBOR data item.
// RawTag.UnmarshalCBOR() is exported, so
// the codec needs to support same behavior for:
// - Unmarshal(data, *RawTag)
// - RawTag.UnmarshalCBOR(data)
err := d.wellformed(false, false)
if err != nil {
return err
}
return t.unmarshalCBOR(data)
}
// unmarshalCBOR sets *t with the tag number and the raw tag content copied from data.
// This function assumes data is well-formed, and does not perform bounds checking.
// This function is called by Unmarshal().
func (t *RawTag) unmarshalCBOR(data []byte) error {
if t == nil {
return errors.New("cbor.RawTag: UnmarshalCBOR on nil pointer")
}
// Decoding CBOR null and undefined to cbor.RawTag is no-op.
if len(data) == 1 && (data[0] == 0xf6 || data[0] == 0xf7) {
return nil
@ -193,7 +223,7 @@ func (t *syncTagSet) Add(opts TagOptions, contentType reflect.Type, num uint64,
if contentType == nil {
return errors.New("cbor: cannot add nil content type to TagSet")
}
for contentType.Kind() == reflect.Ptr {
for contentType.Kind() == reflect.Pointer {
contentType = contentType.Elem()
}
tag, err := newTagItem(opts, contentType, num, nestedNum...)
@ -216,7 +246,7 @@ func (t *syncTagSet) Add(opts TagOptions, contentType reflect.Type, num uint64,
// Remove removes given tag content type from TagSet.
func (t *syncTagSet) Remove(contentType reflect.Type) {
for contentType.Kind() == reflect.Ptr {
for contentType.Kind() == reflect.Pointer {
contentType = contentType.Elem()
}
t.Lock()

View File

@ -15,7 +15,7 @@
package compiler
import (
yaml "gopkg.in/yaml.v3"
yaml "go.yaml.in/yaml/v3"
)
// Context contains state of the compiler as it traverses a document.

View File

@ -20,9 +20,9 @@ import (
"os/exec"
"strings"
yaml "go.yaml.in/yaml/v3"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/anypb"
yaml "gopkg.in/yaml.v3"
extensions "github.com/google/gnostic-models/extensions"
)

View File

@ -20,7 +20,7 @@ import (
"sort"
"strconv"
"gopkg.in/yaml.v3"
yaml "go.yaml.in/yaml/v3"
"github.com/google/gnostic-models/jsonschema"
)

View File

@ -24,7 +24,7 @@ import (
"strings"
"sync"
yaml "gopkg.in/yaml.v3"
yaml "go.yaml.in/yaml/v3"
)
var verboseReader = false

View File

@ -16,7 +16,7 @@
// of JSON Schemas.
package jsonschema
import "gopkg.in/yaml.v3"
import "go.yaml.in/yaml/v3"
// The Schema struct models a JSON Schema and, because schemas are
// defined hierarchically, contains many references to itself.

View File

@ -21,7 +21,7 @@ import (
"io/ioutil"
"strconv"
"gopkg.in/yaml.v3"
yaml "go.yaml.in/yaml/v3"
)
// This is a global map of all known Schemas.

View File

@ -17,7 +17,7 @@ package jsonschema
import (
"fmt"
"gopkg.in/yaml.v3"
yaml "go.yaml.in/yaml/v3"
)
const indentation = " "

View File

@ -21,7 +21,7 @@ import (
"regexp"
"strings"
"gopkg.in/yaml.v3"
yaml "go.yaml.in/yaml/v3"
"github.com/google/gnostic-models/compiler"
)
@ -60,7 +60,7 @@ func NewAdditionalPropertiesItem(in *yaml.Node, context *compiler.Context) (*Add
// since the oneof matched one of its possibilities, discard any matching errors
errors = make([]error, 0)
} else {
message := fmt.Sprintf("contains an invalid AdditionalPropertiesItem")
message := "contains an invalid AdditionalPropertiesItem"
err := compiler.NewError(context, message)
errors = []error{err}
}
@ -2543,7 +2543,7 @@ func NewNonBodyParameter(in *yaml.Node, context *compiler.Context) (*NonBodyPara
// since the oneof matched one of its possibilities, discard any matching errors
errors = make([]error, 0)
} else {
message := fmt.Sprintf("contains an invalid NonBodyParameter")
message := "contains an invalid NonBodyParameter"
err := compiler.NewError(context, message)
errors = []error{err}
}
@ -3271,7 +3271,7 @@ func NewParameter(in *yaml.Node, context *compiler.Context) (*Parameter, error)
// since the oneof matched one of its possibilities, discard any matching errors
errors = make([]error, 0)
} else {
message := fmt.Sprintf("contains an invalid Parameter")
message := "contains an invalid Parameter"
err := compiler.NewError(context, message)
errors = []error{err}
}
@ -3345,7 +3345,7 @@ func NewParametersItem(in *yaml.Node, context *compiler.Context) (*ParametersIte
// since the oneof matched one of its possibilities, discard any matching errors
errors = make([]error, 0)
} else {
message := fmt.Sprintf("contains an invalid ParametersItem")
message := "contains an invalid ParametersItem"
err := compiler.NewError(context, message)
errors = []error{err}
}
@ -4561,7 +4561,7 @@ func NewResponseValue(in *yaml.Node, context *compiler.Context) (*ResponseValue,
// since the oneof matched one of its possibilities, discard any matching errors
errors = make([]error, 0)
} else {
message := fmt.Sprintf("contains an invalid ResponseValue")
message := "contains an invalid ResponseValue"
err := compiler.NewError(context, message)
errors = []error{err}
}
@ -5030,7 +5030,7 @@ func NewSchemaItem(in *yaml.Node, context *compiler.Context) (*SchemaItem, error
// since the oneof matched one of its possibilities, discard any matching errors
errors = make([]error, 0)
} else {
message := fmt.Sprintf("contains an invalid SchemaItem")
message := "contains an invalid SchemaItem"
err := compiler.NewError(context, message)
errors = []error{err}
}
@ -5160,7 +5160,7 @@ func NewSecurityDefinitionsItem(in *yaml.Node, context *compiler.Context) (*Secu
// since the oneof matched one of its possibilities, discard any matching errors
errors = make([]error, 0)
} else {
message := fmt.Sprintf("contains an invalid SecurityDefinitionsItem")
message := "contains an invalid SecurityDefinitionsItem"
err := compiler.NewError(context, message)
errors = []error{err}
}
@ -6930,7 +6930,7 @@ func (m *BodyParameter) ToRawInfo() *yaml.Node {
// always include this required field.
info.Content = append(info.Content, compiler.NewScalarNodeForString("in"))
info.Content = append(info.Content, compiler.NewScalarNodeForString(m.In))
if m.Required != false {
if m.Required {
info.Content = append(info.Content, compiler.NewScalarNodeForString("required"))
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required))
}
@ -7149,7 +7149,7 @@ func (m *FileSchema) ToRawInfo() *yaml.Node {
// always include this required field.
info.Content = append(info.Content, compiler.NewScalarNodeForString("type"))
info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type))
if m.ReadOnly != false {
if m.ReadOnly {
info.Content = append(info.Content, compiler.NewScalarNodeForString("readOnly"))
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ReadOnly))
}
@ -7176,7 +7176,7 @@ func (m *FormDataParameterSubSchema) ToRawInfo() *yaml.Node {
if m == nil {
return info
}
if m.Required != false {
if m.Required {
info.Content = append(info.Content, compiler.NewScalarNodeForString("required"))
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required))
}
@ -7192,7 +7192,7 @@ func (m *FormDataParameterSubSchema) ToRawInfo() *yaml.Node {
info.Content = append(info.Content, compiler.NewScalarNodeForString("name"))
info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name))
}
if m.AllowEmptyValue != false {
if m.AllowEmptyValue {
info.Content = append(info.Content, compiler.NewScalarNodeForString("allowEmptyValue"))
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.AllowEmptyValue))
}
@ -7220,7 +7220,7 @@ func (m *FormDataParameterSubSchema) ToRawInfo() *yaml.Node {
info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum"))
info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum))
}
if m.ExclusiveMaximum != false {
if m.ExclusiveMaximum {
info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum"))
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum))
}
@ -7228,7 +7228,7 @@ func (m *FormDataParameterSubSchema) ToRawInfo() *yaml.Node {
info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum"))
info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum))
}
if m.ExclusiveMinimum != false {
if m.ExclusiveMinimum {
info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum"))
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum))
}
@ -7252,7 +7252,7 @@ func (m *FormDataParameterSubSchema) ToRawInfo() *yaml.Node {
info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems"))
info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems))
}
if m.UniqueItems != false {
if m.UniqueItems {
info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems"))
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems))
}
@ -7306,7 +7306,7 @@ func (m *Header) ToRawInfo() *yaml.Node {
info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum"))
info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum))
}
if m.ExclusiveMaximum != false {
if m.ExclusiveMaximum {
info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum"))
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum))
}
@ -7314,7 +7314,7 @@ func (m *Header) ToRawInfo() *yaml.Node {
info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum"))
info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum))
}
if m.ExclusiveMinimum != false {
if m.ExclusiveMinimum {
info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum"))
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum))
}
@ -7338,7 +7338,7 @@ func (m *Header) ToRawInfo() *yaml.Node {
info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems"))
info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems))
}
if m.UniqueItems != false {
if m.UniqueItems {
info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems"))
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems))
}
@ -7373,7 +7373,7 @@ func (m *HeaderParameterSubSchema) ToRawInfo() *yaml.Node {
if m == nil {
return info
}
if m.Required != false {
if m.Required {
info.Content = append(info.Content, compiler.NewScalarNodeForString("required"))
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required))
}
@ -7413,7 +7413,7 @@ func (m *HeaderParameterSubSchema) ToRawInfo() *yaml.Node {
info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum"))
info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum))
}
if m.ExclusiveMaximum != false {
if m.ExclusiveMaximum {
info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum"))
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum))
}
@ -7421,7 +7421,7 @@ func (m *HeaderParameterSubSchema) ToRawInfo() *yaml.Node {
info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum"))
info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum))
}
if m.ExclusiveMinimum != false {
if m.ExclusiveMinimum {
info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum"))
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum))
}
@ -7445,7 +7445,7 @@ func (m *HeaderParameterSubSchema) ToRawInfo() *yaml.Node {
info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems"))
info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems))
}
if m.UniqueItems != false {
if m.UniqueItems {
info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems"))
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems))
}
@ -7940,7 +7940,7 @@ func (m *Operation) ToRawInfo() *yaml.Node {
info.Content = append(info.Content, compiler.NewScalarNodeForString("schemes"))
info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Schemes))
}
if m.Deprecated != false {
if m.Deprecated {
info.Content = append(info.Content, compiler.NewScalarNodeForString("deprecated"))
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Deprecated))
}
@ -8110,7 +8110,7 @@ func (m *PathParameterSubSchema) ToRawInfo() *yaml.Node {
info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum"))
info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum))
}
if m.ExclusiveMaximum != false {
if m.ExclusiveMaximum {
info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum"))
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum))
}
@ -8118,7 +8118,7 @@ func (m *PathParameterSubSchema) ToRawInfo() *yaml.Node {
info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum"))
info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum))
}
if m.ExclusiveMinimum != false {
if m.ExclusiveMinimum {
info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum"))
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum))
}
@ -8142,7 +8142,7 @@ func (m *PathParameterSubSchema) ToRawInfo() *yaml.Node {
info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems"))
info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems))
}
if m.UniqueItems != false {
if m.UniqueItems {
info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems"))
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems))
}
@ -8218,7 +8218,7 @@ func (m *PrimitivesItems) ToRawInfo() *yaml.Node {
info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum"))
info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum))
}
if m.ExclusiveMaximum != false {
if m.ExclusiveMaximum {
info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum"))
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum))
}
@ -8226,7 +8226,7 @@ func (m *PrimitivesItems) ToRawInfo() *yaml.Node {
info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum"))
info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum))
}
if m.ExclusiveMinimum != false {
if m.ExclusiveMinimum {
info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum"))
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum))
}
@ -8250,7 +8250,7 @@ func (m *PrimitivesItems) ToRawInfo() *yaml.Node {
info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems"))
info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems))
}
if m.UniqueItems != false {
if m.UniqueItems {
info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems"))
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems))
}
@ -8296,7 +8296,7 @@ func (m *QueryParameterSubSchema) ToRawInfo() *yaml.Node {
if m == nil {
return info
}
if m.Required != false {
if m.Required {
info.Content = append(info.Content, compiler.NewScalarNodeForString("required"))
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required))
}
@ -8312,7 +8312,7 @@ func (m *QueryParameterSubSchema) ToRawInfo() *yaml.Node {
info.Content = append(info.Content, compiler.NewScalarNodeForString("name"))
info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name))
}
if m.AllowEmptyValue != false {
if m.AllowEmptyValue {
info.Content = append(info.Content, compiler.NewScalarNodeForString("allowEmptyValue"))
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.AllowEmptyValue))
}
@ -8340,7 +8340,7 @@ func (m *QueryParameterSubSchema) ToRawInfo() *yaml.Node {
info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum"))
info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum))
}
if m.ExclusiveMaximum != false {
if m.ExclusiveMaximum {
info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum"))
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum))
}
@ -8348,7 +8348,7 @@ func (m *QueryParameterSubSchema) ToRawInfo() *yaml.Node {
info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum"))
info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum))
}
if m.ExclusiveMinimum != false {
if m.ExclusiveMinimum {
info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum"))
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum))
}
@ -8372,7 +8372,7 @@ func (m *QueryParameterSubSchema) ToRawInfo() *yaml.Node {
info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems"))
info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems))
}
if m.UniqueItems != false {
if m.UniqueItems {
info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems"))
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems))
}
@ -8514,7 +8514,7 @@ func (m *Schema) ToRawInfo() *yaml.Node {
info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum"))
info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum))
}
if m.ExclusiveMaximum != false {
if m.ExclusiveMaximum {
info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum"))
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum))
}
@ -8522,7 +8522,7 @@ func (m *Schema) ToRawInfo() *yaml.Node {
info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum"))
info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum))
}
if m.ExclusiveMinimum != false {
if m.ExclusiveMinimum {
info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum"))
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum))
}
@ -8546,7 +8546,7 @@ func (m *Schema) ToRawInfo() *yaml.Node {
info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems"))
info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems))
}
if m.UniqueItems != false {
if m.UniqueItems {
info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems"))
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems))
}
@ -8610,7 +8610,7 @@ func (m *Schema) ToRawInfo() *yaml.Node {
info.Content = append(info.Content, compiler.NewScalarNodeForString("discriminator"))
info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Discriminator))
}
if m.ReadOnly != false {
if m.ReadOnly {
info.Content = append(info.Content, compiler.NewScalarNodeForString("readOnly"))
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ReadOnly))
}
@ -8796,11 +8796,11 @@ func (m *Xml) ToRawInfo() *yaml.Node {
info.Content = append(info.Content, compiler.NewScalarNodeForString("prefix"))
info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Prefix))
}
if m.Attribute != false {
if m.Attribute {
info.Content = append(info.Content, compiler.NewScalarNodeForString("attribute"))
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Attribute))
}
if m.Wrapped != false {
if m.Wrapped {
info.Content = append(info.Content, compiler.NewScalarNodeForString("wrapped"))
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Wrapped))
}

View File

@ -15,7 +15,7 @@
package openapi_v2
import (
"gopkg.in/yaml.v3"
yaml "go.yaml.in/yaml/v3"
"github.com/google/gnostic-models/compiler"
)

View File

@ -21,7 +21,7 @@ import (
"regexp"
"strings"
"gopkg.in/yaml.v3"
yaml "go.yaml.in/yaml/v3"
"github.com/google/gnostic-models/compiler"
)
@ -60,7 +60,7 @@ func NewAdditionalPropertiesItem(in *yaml.Node, context *compiler.Context) (*Add
// since the oneof matched one of its possibilities, discard any matching errors
errors = make([]error, 0)
} else {
message := fmt.Sprintf("contains an invalid AdditionalPropertiesItem")
message := "contains an invalid AdditionalPropertiesItem"
err := compiler.NewError(context, message)
errors = []error{err}
}
@ -113,7 +113,7 @@ func NewAnyOrExpression(in *yaml.Node, context *compiler.Context) (*AnyOrExpress
// since the oneof matched one of its possibilities, discard any matching errors
errors = make([]error, 0)
} else {
message := fmt.Sprintf("contains an invalid AnyOrExpression")
message := "contains an invalid AnyOrExpression"
err := compiler.NewError(context, message)
errors = []error{err}
}
@ -227,7 +227,7 @@ func NewCallbackOrReference(in *yaml.Node, context *compiler.Context) (*Callback
// since the oneof matched one of its possibilities, discard any matching errors
errors = make([]error, 0)
} else {
message := fmt.Sprintf("contains an invalid CallbackOrReference")
message := "contains an invalid CallbackOrReference"
err := compiler.NewError(context, message)
errors = []error{err}
}
@ -979,7 +979,7 @@ func NewExampleOrReference(in *yaml.Node, context *compiler.Context) (*ExampleOr
// since the oneof matched one of its possibilities, discard any matching errors
errors = make([]error, 0)
} else {
message := fmt.Sprintf("contains an invalid ExampleOrReference")
message := "contains an invalid ExampleOrReference"
err := compiler.NewError(context, message)
errors = []error{err}
}
@ -1320,7 +1320,7 @@ func NewHeaderOrReference(in *yaml.Node, context *compiler.Context) (*HeaderOrRe
// since the oneof matched one of its possibilities, discard any matching errors
errors = make([]error, 0)
} else {
message := fmt.Sprintf("contains an invalid HeaderOrReference")
message := "contains an invalid HeaderOrReference"
err := compiler.NewError(context, message)
errors = []error{err}
}
@ -1713,7 +1713,7 @@ func NewLinkOrReference(in *yaml.Node, context *compiler.Context) (*LinkOrRefere
// since the oneof matched one of its possibilities, discard any matching errors
errors = make([]error, 0)
} else {
message := fmt.Sprintf("contains an invalid LinkOrReference")
message := "contains an invalid LinkOrReference"
err := compiler.NewError(context, message)
errors = []error{err}
}
@ -3090,7 +3090,7 @@ func NewParameterOrReference(in *yaml.Node, context *compiler.Context) (*Paramet
// since the oneof matched one of its possibilities, discard any matching errors
errors = make([]error, 0)
} else {
message := fmt.Sprintf("contains an invalid ParameterOrReference")
message := "contains an invalid ParameterOrReference"
err := compiler.NewError(context, message)
errors = []error{err}
}
@ -3606,7 +3606,7 @@ func NewRequestBodyOrReference(in *yaml.Node, context *compiler.Context) (*Reque
// since the oneof matched one of its possibilities, discard any matching errors
errors = make([]error, 0)
} else {
message := fmt.Sprintf("contains an invalid RequestBodyOrReference")
message := "contains an invalid RequestBodyOrReference"
err := compiler.NewError(context, message)
errors = []error{err}
}
@ -3743,7 +3743,7 @@ func NewResponseOrReference(in *yaml.Node, context *compiler.Context) (*Response
// since the oneof matched one of its possibilities, discard any matching errors
errors = make([]error, 0)
} else {
message := fmt.Sprintf("contains an invalid ResponseOrReference")
message := "contains an invalid ResponseOrReference"
err := compiler.NewError(context, message)
errors = []error{err}
}
@ -4310,7 +4310,7 @@ func NewSchemaOrReference(in *yaml.Node, context *compiler.Context) (*SchemaOrRe
// since the oneof matched one of its possibilities, discard any matching errors
errors = make([]error, 0)
} else {
message := fmt.Sprintf("contains an invalid SchemaOrReference")
message := "contains an invalid SchemaOrReference"
err := compiler.NewError(context, message)
errors = []error{err}
}
@ -4543,7 +4543,7 @@ func NewSecuritySchemeOrReference(in *yaml.Node, context *compiler.Context) (*Se
// since the oneof matched one of its possibilities, discard any matching errors
errors = make([]error, 0)
} else {
message := fmt.Sprintf("contains an invalid SecuritySchemeOrReference")
message := "contains an invalid SecuritySchemeOrReference"
err := compiler.NewError(context, message)
errors = []error{err}
}

View File

@ -15,7 +15,7 @@
package openapi_v3
import (
"gopkg.in/yaml.v3"
yaml "go.yaml.in/yaml/v3"
"github.com/google/gnostic-models/compiler"
)

View File

@ -6,10 +6,12 @@ import (
)
type safeType struct {
reflect.Type
cfg *frozenConfig
Type reflect.Type
cfg *frozenConfig
}
var _ Type = &safeType{}
func (type2 *safeType) New() interface{} {
return reflect.New(type2.Type).Interface()
}
@ -18,6 +20,22 @@ func (type2 *safeType) UnsafeNew() unsafe.Pointer {
panic("does not support unsafe operation")
}
func (type2 *safeType) Kind() reflect.Kind {
return type2.Type.Kind()
}
func (type2 *safeType) Len() int {
return type2.Type.Len()
}
func (type2 *safeType) NumField() int {
return type2.Type.NumField()
}
func (type2 *safeType) String() string {
return type2.Type.String()
}
func (type2 *safeType) Elem() Type {
return type2.cfg.Type2(type2.Type.Elem())
}

27
vendor/github.com/pmezard/go-difflib/LICENSE generated vendored Normal file
View File

@ -0,0 +1,27 @@
Copyright (c) 2013, Patrick Mezard
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
The names of its contributors may not be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

772
vendor/github.com/pmezard/go-difflib/difflib/difflib.go generated vendored Normal file
View File

@ -0,0 +1,772 @@
// Package difflib is a partial port of Python difflib module.
//
// It provides tools to compare sequences of strings and generate textual diffs.
//
// The following class and functions have been ported:
//
// - SequenceMatcher
//
// - unified_diff
//
// - context_diff
//
// Getting unified diffs was the main goal of the port. Keep in mind this code
// is mostly suitable to output text differences in a human friendly way, there
// are no guarantees generated diffs are consumable by patch(1).
package difflib
import (
"bufio"
"bytes"
"fmt"
"io"
"strings"
)
func min(a, b int) int {
if a < b {
return a
}
return b
}
func max(a, b int) int {
if a > b {
return a
}
return b
}
func calculateRatio(matches, length int) float64 {
if length > 0 {
return 2.0 * float64(matches) / float64(length)
}
return 1.0
}
type Match struct {
A int
B int
Size int
}
type OpCode struct {
Tag byte
I1 int
I2 int
J1 int
J2 int
}
// SequenceMatcher compares sequence of strings. The basic
// algorithm predates, and is a little fancier than, an algorithm
// published in the late 1980's by Ratcliff and Obershelp under the
// hyperbolic name "gestalt pattern matching". The basic idea is to find
// the longest contiguous matching subsequence that contains no "junk"
// elements (R-O doesn't address junk). The same idea is then applied
// recursively to the pieces of the sequences to the left and to the right
// of the matching subsequence. This does not yield minimal edit
// sequences, but does tend to yield matches that "look right" to people.
//
// SequenceMatcher tries to compute a "human-friendly diff" between two
// sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the
// longest *contiguous* & junk-free matching subsequence. That's what
// catches peoples' eyes. The Windows(tm) windiff has another interesting
// notion, pairing up elements that appear uniquely in each sequence.
// That, and the method here, appear to yield more intuitive difference
// reports than does diff. This method appears to be the least vulnerable
// to synching up on blocks of "junk lines", though (like blank lines in
// ordinary text files, or maybe "<P>" lines in HTML files). That may be
// because this is the only method of the 3 that has a *concept* of
// "junk" <wink>.
//
// Timing: Basic R-O is cubic time worst case and quadratic time expected
// case. SequenceMatcher is quadratic time for the worst case and has
// expected-case behavior dependent in a complicated way on how many
// elements the sequences have in common; best case time is linear.
type SequenceMatcher struct {
a []string
b []string
b2j map[string][]int
IsJunk func(string) bool
autoJunk bool
bJunk map[string]struct{}
matchingBlocks []Match
fullBCount map[string]int
bPopular map[string]struct{}
opCodes []OpCode
}
func NewMatcher(a, b []string) *SequenceMatcher {
m := SequenceMatcher{autoJunk: true}
m.SetSeqs(a, b)
return &m
}
func NewMatcherWithJunk(a, b []string, autoJunk bool,
isJunk func(string) bool) *SequenceMatcher {
m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk}
m.SetSeqs(a, b)
return &m
}
// Set two sequences to be compared.
func (m *SequenceMatcher) SetSeqs(a, b []string) {
m.SetSeq1(a)
m.SetSeq2(b)
}
// Set the first sequence to be compared. The second sequence to be compared is
// not changed.
//
// SequenceMatcher computes and caches detailed information about the second
// sequence, so if you want to compare one sequence S against many sequences,
// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other
// sequences.
//
// See also SetSeqs() and SetSeq2().
func (m *SequenceMatcher) SetSeq1(a []string) {
if &a == &m.a {
return
}
m.a = a
m.matchingBlocks = nil
m.opCodes = nil
}
// Set the second sequence to be compared. The first sequence to be compared is
// not changed.
func (m *SequenceMatcher) SetSeq2(b []string) {
if &b == &m.b {
return
}
m.b = b
m.matchingBlocks = nil
m.opCodes = nil
m.fullBCount = nil
m.chainB()
}
func (m *SequenceMatcher) chainB() {
// Populate line -> index mapping
b2j := map[string][]int{}
for i, s := range m.b {
indices := b2j[s]
indices = append(indices, i)
b2j[s] = indices
}
// Purge junk elements
m.bJunk = map[string]struct{}{}
if m.IsJunk != nil {
junk := m.bJunk
for s, _ := range b2j {
if m.IsJunk(s) {
junk[s] = struct{}{}
}
}
for s, _ := range junk {
delete(b2j, s)
}
}
// Purge remaining popular elements
popular := map[string]struct{}{}
n := len(m.b)
if m.autoJunk && n >= 200 {
ntest := n/100 + 1
for s, indices := range b2j {
if len(indices) > ntest {
popular[s] = struct{}{}
}
}
for s, _ := range popular {
delete(b2j, s)
}
}
m.bPopular = popular
m.b2j = b2j
}
func (m *SequenceMatcher) isBJunk(s string) bool {
_, ok := m.bJunk[s]
return ok
}
// Find longest matching block in a[alo:ahi] and b[blo:bhi].
//
// If IsJunk is not defined:
//
// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
// alo <= i <= i+k <= ahi
// blo <= j <= j+k <= bhi
// and for all (i',j',k') meeting those conditions,
// k >= k'
// i <= i'
// and if i == i', j <= j'
//
// In other words, of all maximal matching blocks, return one that
// starts earliest in a, and of all those maximal matching blocks that
// start earliest in a, return the one that starts earliest in b.
//
// If IsJunk is defined, first the longest matching block is
// determined as above, but with the additional restriction that no
// junk element appears in the block. Then that block is extended as
// far as possible by matching (only) junk elements on both sides. So
// the resulting block never matches on junk except as identical junk
// happens to be adjacent to an "interesting" match.
//
// If no blocks match, return (alo, blo, 0).
func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match {
// CAUTION: stripping common prefix or suffix would be incorrect.
// E.g.,
// ab
// acab
// Longest matching block is "ab", but if common prefix is
// stripped, it's "a" (tied with "b"). UNIX(tm) diff does so
// strip, so ends up claiming that ab is changed to acab by
// inserting "ca" in the middle. That's minimal but unintuitive:
// "it's obvious" that someone inserted "ac" at the front.
// Windiff ends up at the same place as diff, but by pairing up
// the unique 'b's and then matching the first two 'a's.
besti, bestj, bestsize := alo, blo, 0
// find longest junk-free match
// during an iteration of the loop, j2len[j] = length of longest
// junk-free match ending with a[i-1] and b[j]
j2len := map[int]int{}
for i := alo; i != ahi; i++ {
// look at all instances of a[i] in b; note that because
// b2j has no junk keys, the loop is skipped if a[i] is junk
newj2len := map[int]int{}
for _, j := range m.b2j[m.a[i]] {
// a[i] matches b[j]
if j < blo {
continue
}
if j >= bhi {
break
}
k := j2len[j-1] + 1
newj2len[j] = k
if k > bestsize {
besti, bestj, bestsize = i-k+1, j-k+1, k
}
}
j2len = newj2len
}
// Extend the best by non-junk elements on each end. In particular,
// "popular" non-junk elements aren't in b2j, which greatly speeds
// the inner loop above, but also means "the best" match so far
// doesn't contain any junk *or* popular non-junk elements.
for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) &&
m.a[besti-1] == m.b[bestj-1] {
besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
}
for besti+bestsize < ahi && bestj+bestsize < bhi &&
!m.isBJunk(m.b[bestj+bestsize]) &&
m.a[besti+bestsize] == m.b[bestj+bestsize] {
bestsize += 1
}
// Now that we have a wholly interesting match (albeit possibly
// empty!), we may as well suck up the matching junk on each
// side of it too. Can't think of a good reason not to, and it
// saves post-processing the (possibly considerable) expense of
// figuring out what to do with it. In the case of an empty
// interesting match, this is clearly the right thing to do,
// because no other kind of match is possible in the regions.
for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) &&
m.a[besti-1] == m.b[bestj-1] {
besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
}
for besti+bestsize < ahi && bestj+bestsize < bhi &&
m.isBJunk(m.b[bestj+bestsize]) &&
m.a[besti+bestsize] == m.b[bestj+bestsize] {
bestsize += 1
}
return Match{A: besti, B: bestj, Size: bestsize}
}
// Return list of triples describing matching subsequences.
//
// Each triple is of the form (i, j, n), and means that
// a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in
// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are
// adjacent triples in the list, and the second is not the last triple in the
// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe
// adjacent equal blocks.
//
// The last triple is a dummy, (len(a), len(b), 0), and is the only
// triple with n==0.
func (m *SequenceMatcher) GetMatchingBlocks() []Match {
if m.matchingBlocks != nil {
return m.matchingBlocks
}
var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match
matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match {
match := m.findLongestMatch(alo, ahi, blo, bhi)
i, j, k := match.A, match.B, match.Size
if match.Size > 0 {
if alo < i && blo < j {
matched = matchBlocks(alo, i, blo, j, matched)
}
matched = append(matched, match)
if i+k < ahi && j+k < bhi {
matched = matchBlocks(i+k, ahi, j+k, bhi, matched)
}
}
return matched
}
matched := matchBlocks(0, len(m.a), 0, len(m.b), nil)
// It's possible that we have adjacent equal blocks in the
// matching_blocks list now.
nonAdjacent := []Match{}
i1, j1, k1 := 0, 0, 0
for _, b := range matched {
// Is this block adjacent to i1, j1, k1?
i2, j2, k2 := b.A, b.B, b.Size
if i1+k1 == i2 && j1+k1 == j2 {
// Yes, so collapse them -- this just increases the length of
// the first block by the length of the second, and the first
// block so lengthened remains the block to compare against.
k1 += k2
} else {
// Not adjacent. Remember the first block (k1==0 means it's
// the dummy we started with), and make the second block the
// new block to compare against.
if k1 > 0 {
nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
}
i1, j1, k1 = i2, j2, k2
}
}
if k1 > 0 {
nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
}
nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0})
m.matchingBlocks = nonAdjacent
return m.matchingBlocks
}
// Return list of 5-tuples describing how to turn a into b.
//
// Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple
// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the
// tuple preceding it, and likewise for j1 == the previous j2.
//
// The tags are characters, with these meanings:
//
// 'r' (replace): a[i1:i2] should be replaced by b[j1:j2]
//
// 'd' (delete): a[i1:i2] should be deleted, j1==j2 in this case.
//
// 'i' (insert): b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case.
//
// 'e' (equal): a[i1:i2] == b[j1:j2]
func (m *SequenceMatcher) GetOpCodes() []OpCode {
if m.opCodes != nil {
return m.opCodes
}
i, j := 0, 0
matching := m.GetMatchingBlocks()
opCodes := make([]OpCode, 0, len(matching))
for _, m := range matching {
// invariant: we've pumped out correct diffs to change
// a[:i] into b[:j], and the next matching block is
// a[ai:ai+size] == b[bj:bj+size]. So we need to pump
// out a diff to change a[i:ai] into b[j:bj], pump out
// the matching block, and move (i,j) beyond the match
ai, bj, size := m.A, m.B, m.Size
tag := byte(0)
if i < ai && j < bj {
tag = 'r'
} else if i < ai {
tag = 'd'
} else if j < bj {
tag = 'i'
}
if tag > 0 {
opCodes = append(opCodes, OpCode{tag, i, ai, j, bj})
}
i, j = ai+size, bj+size
// the list of matching blocks is terminated by a
// sentinel with size 0
if size > 0 {
opCodes = append(opCodes, OpCode{'e', ai, i, bj, j})
}
}
m.opCodes = opCodes
return m.opCodes
}
// Isolate change clusters by eliminating ranges with no changes.
//
// Return a generator of groups with up to n lines of context.
// Each group is in the same format as returned by GetOpCodes().
func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode {
if n < 0 {
n = 3
}
codes := m.GetOpCodes()
if len(codes) == 0 {
codes = []OpCode{OpCode{'e', 0, 1, 0, 1}}
}
// Fixup leading and trailing groups if they show no changes.
if codes[0].Tag == 'e' {
c := codes[0]
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2}
}
if codes[len(codes)-1].Tag == 'e' {
c := codes[len(codes)-1]
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)}
}
nn := n + n
groups := [][]OpCode{}
group := []OpCode{}
for _, c := range codes {
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
// End the current group and start a new one whenever
// there is a large range with no changes.
if c.Tag == 'e' && i2-i1 > nn {
group = append(group, OpCode{c.Tag, i1, min(i2, i1+n),
j1, min(j2, j1+n)})
groups = append(groups, group)
group = []OpCode{}
i1, j1 = max(i1, i2-n), max(j1, j2-n)
}
group = append(group, OpCode{c.Tag, i1, i2, j1, j2})
}
if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') {
groups = append(groups, group)
}
return groups
}
// Return a measure of the sequences' similarity (float in [0,1]).
//
// Where T is the total number of elements in both sequences, and
// M is the number of matches, this is 2.0*M / T.
// Note that this is 1 if the sequences are identical, and 0 if
// they have nothing in common.
//
// .Ratio() is expensive to compute if you haven't already computed
// .GetMatchingBlocks() or .GetOpCodes(), in which case you may
// want to try .QuickRatio() or .RealQuickRation() first to get an
// upper bound.
func (m *SequenceMatcher) Ratio() float64 {
matches := 0
for _, m := range m.GetMatchingBlocks() {
matches += m.Size
}
return calculateRatio(matches, len(m.a)+len(m.b))
}
// Return an upper bound on ratio() relatively quickly.
//
// This isn't defined beyond that it is an upper bound on .Ratio(), and
// is faster to compute.
func (m *SequenceMatcher) QuickRatio() float64 {
// viewing a and b as multisets, set matches to the cardinality
// of their intersection; this counts the number of matches
// without regard to order, so is clearly an upper bound
if m.fullBCount == nil {
m.fullBCount = map[string]int{}
for _, s := range m.b {
m.fullBCount[s] = m.fullBCount[s] + 1
}
}
// avail[x] is the number of times x appears in 'b' less the
// number of times we've seen it in 'a' so far ... kinda
avail := map[string]int{}
matches := 0
for _, s := range m.a {
n, ok := avail[s]
if !ok {
n = m.fullBCount[s]
}
avail[s] = n - 1
if n > 0 {
matches += 1
}
}
return calculateRatio(matches, len(m.a)+len(m.b))
}
// Return an upper bound on ratio() very quickly.
//
// This isn't defined beyond that it is an upper bound on .Ratio(), and
// is faster to compute than either .Ratio() or .QuickRatio().
func (m *SequenceMatcher) RealQuickRatio() float64 {
la, lb := len(m.a), len(m.b)
return calculateRatio(min(la, lb), la+lb)
}
// Convert range to the "ed" format
func formatRangeUnified(start, stop int) string {
// Per the diff spec at http://www.unix.org/single_unix_specification/
beginning := start + 1 // lines start numbering with one
length := stop - start
if length == 1 {
return fmt.Sprintf("%d", beginning)
}
if length == 0 {
beginning -= 1 // empty ranges begin at line just before the range
}
return fmt.Sprintf("%d,%d", beginning, length)
}
// Unified diff parameters
type UnifiedDiff struct {
A []string // First sequence lines
FromFile string // First file name
FromDate string // First file time
B []string // Second sequence lines
ToFile string // Second file name
ToDate string // Second file time
Eol string // Headers end of line, defaults to LF
Context int // Number of context lines
}
// Compare two sequences of lines; generate the delta as a unified diff.
//
// Unified diffs are a compact way of showing line changes and a few
// lines of context. The number of context lines is set by 'n' which
// defaults to three.
//
// By default, the diff control lines (those with ---, +++, or @@) are
// created with a trailing newline. This is helpful so that inputs
// created from file.readlines() result in diffs that are suitable for
// file.writelines() since both the inputs and outputs have trailing
// newlines.
//
// For inputs that do not have trailing newlines, set the lineterm
// argument to "" so that the output will be uniformly newline free.
//
// The unidiff format normally has a header for filenames and modification
// times. Any or all of these may be specified using strings for
// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
// The modification times are normally expressed in the ISO 8601 format.
func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error {
buf := bufio.NewWriter(writer)
defer buf.Flush()
wf := func(format string, args ...interface{}) error {
_, err := buf.WriteString(fmt.Sprintf(format, args...))
return err
}
ws := func(s string) error {
_, err := buf.WriteString(s)
return err
}
if len(diff.Eol) == 0 {
diff.Eol = "\n"
}
started := false
m := NewMatcher(diff.A, diff.B)
for _, g := range m.GetGroupedOpCodes(diff.Context) {
if !started {
started = true
fromDate := ""
if len(diff.FromDate) > 0 {
fromDate = "\t" + diff.FromDate
}
toDate := ""
if len(diff.ToDate) > 0 {
toDate = "\t" + diff.ToDate
}
if diff.FromFile != "" || diff.ToFile != "" {
err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol)
if err != nil {
return err
}
err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol)
if err != nil {
return err
}
}
}
first, last := g[0], g[len(g)-1]
range1 := formatRangeUnified(first.I1, last.I2)
range2 := formatRangeUnified(first.J1, last.J2)
if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil {
return err
}
for _, c := range g {
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
if c.Tag == 'e' {
for _, line := range diff.A[i1:i2] {
if err := ws(" " + line); err != nil {
return err
}
}
continue
}
if c.Tag == 'r' || c.Tag == 'd' {
for _, line := range diff.A[i1:i2] {
if err := ws("-" + line); err != nil {
return err
}
}
}
if c.Tag == 'r' || c.Tag == 'i' {
for _, line := range diff.B[j1:j2] {
if err := ws("+" + line); err != nil {
return err
}
}
}
}
}
return nil
}
// Like WriteUnifiedDiff but returns the diff a string.
func GetUnifiedDiffString(diff UnifiedDiff) (string, error) {
w := &bytes.Buffer{}
err := WriteUnifiedDiff(w, diff)
return string(w.Bytes()), err
}
// Convert range to the "ed" format.
func formatRangeContext(start, stop int) string {
// Per the diff spec at http://www.unix.org/single_unix_specification/
beginning := start + 1 // lines start numbering with one
length := stop - start
if length == 0 {
beginning -= 1 // empty ranges begin at line just before the range
}
if length <= 1 {
return fmt.Sprintf("%d", beginning)
}
return fmt.Sprintf("%d,%d", beginning, beginning+length-1)
}
type ContextDiff UnifiedDiff
// Compare two sequences of lines; generate the delta as a context diff.
//
// Context diffs are a compact way of showing line changes and a few
// lines of context. The number of context lines is set by diff.Context
// which defaults to three.
//
// By default, the diff control lines (those with *** or ---) are
// created with a trailing newline.
//
// For inputs that do not have trailing newlines, set the diff.Eol
// argument to "" so that the output will be uniformly newline free.
//
// The context diff format normally has a header for filenames and
// modification times. Any or all of these may be specified using
// strings for diff.FromFile, diff.ToFile, diff.FromDate, diff.ToDate.
// The modification times are normally expressed in the ISO 8601 format.
// If not specified, the strings default to blanks.
func WriteContextDiff(writer io.Writer, diff ContextDiff) error {
buf := bufio.NewWriter(writer)
defer buf.Flush()
var diffErr error
wf := func(format string, args ...interface{}) {
_, err := buf.WriteString(fmt.Sprintf(format, args...))
if diffErr == nil && err != nil {
diffErr = err
}
}
ws := func(s string) {
_, err := buf.WriteString(s)
if diffErr == nil && err != nil {
diffErr = err
}
}
if len(diff.Eol) == 0 {
diff.Eol = "\n"
}
prefix := map[byte]string{
'i': "+ ",
'd': "- ",
'r': "! ",
'e': " ",
}
started := false
m := NewMatcher(diff.A, diff.B)
for _, g := range m.GetGroupedOpCodes(diff.Context) {
if !started {
started = true
fromDate := ""
if len(diff.FromDate) > 0 {
fromDate = "\t" + diff.FromDate
}
toDate := ""
if len(diff.ToDate) > 0 {
toDate = "\t" + diff.ToDate
}
if diff.FromFile != "" || diff.ToFile != "" {
wf("*** %s%s%s", diff.FromFile, fromDate, diff.Eol)
wf("--- %s%s%s", diff.ToFile, toDate, diff.Eol)
}
}
first, last := g[0], g[len(g)-1]
ws("***************" + diff.Eol)
range1 := formatRangeContext(first.I1, last.I2)
wf("*** %s ****%s", range1, diff.Eol)
for _, c := range g {
if c.Tag == 'r' || c.Tag == 'd' {
for _, cc := range g {
if cc.Tag == 'i' {
continue
}
for _, line := range diff.A[cc.I1:cc.I2] {
ws(prefix[cc.Tag] + line)
}
}
break
}
}
range2 := formatRangeContext(first.J1, last.J2)
wf("--- %s ----%s", range2, diff.Eol)
for _, c := range g {
if c.Tag == 'r' || c.Tag == 'i' {
for _, cc := range g {
if cc.Tag == 'd' {
continue
}
for _, line := range diff.B[cc.J1:cc.J2] {
ws(prefix[cc.Tag] + line)
}
}
break
}
}
}
return diffErr
}
// Like WriteContextDiff but returns the diff a string.
func GetContextDiffString(diff ContextDiff) (string, error) {
w := &bytes.Buffer{}
err := WriteContextDiff(w, diff)
return string(w.Bytes()), err
}
// Split a string on "\n" while preserving them. The output can be used
// as input for UnifiedDiff and ContextDiff structures.
func SplitLines(s string) []string {
lines := strings.SplitAfter(s, "\n")
lines[len(lines)-1] += "\n"
return lines
}

50
vendor/go.yaml.in/yaml/v3/LICENSE generated vendored Normal file
View File

@ -0,0 +1,50 @@
This project is covered by two different licenses: MIT and Apache.
#### MIT License ####
The following files were ported to Go from C files of libyaml, and thus
are still covered by their original MIT license, with the additional
copyright staring in 2011 when the project was ported over:
apic.go emitterc.go parserc.go readerc.go scannerc.go
writerc.go yamlh.go yamlprivateh.go
Copyright (c) 2006-2010 Kirill Simonov
Copyright (c) 2006-2011 Kirill Simonov
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
### Apache License ###
All the remaining project files are covered by the Apache license:
Copyright (c) 2011-2019 Canonical Ltd
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,5 +1,4 @@
/*
Copyright The Kubernetes Authors.
Copyright 2011-2016 Canonical Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -12,16 +11,3 @@ distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by lister-gen. DO NOT EDIT.
package v1alpha1
// IPAddressListerExpansion allows custom methods to be added to
// IPAddressLister.
type IPAddressListerExpansion interface{}
// ServiceCIDRListerExpansion allows custom methods to be added to
// ServiceCIDRLister.
type ServiceCIDRListerExpansion interface{}

171
vendor/go.yaml.in/yaml/v3/README.md generated vendored Normal file
View File

@ -0,0 +1,171 @@
go.yaml.in/yaml
===============
YAML Support for the Go Language
## Introduction
The `yaml` package enables [Go](https://go.dev/) programs to comfortably encode
and decode [YAML](https://yaml.org/) values.
It was originally developed within [Canonical](https://www.canonical.com) as
part of the [juju](https://juju.ubuntu.com) project, and is based on a pure Go
port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) C library to
parse and generate YAML data quickly and reliably.
## Project Status
This project started as a fork of the extremely popular [go-yaml](
https://github.com/go-yaml/yaml/)
project, and is being maintained by the official [YAML organization](
https://github.com/yaml/).
The YAML team took over ongoing maintenance and development of the project after
discussion with go-yaml's author, @niemeyer, following his decision to
[label the project repository as "unmaintained"](
https://github.com/go-yaml/yaml/blob/944c86a7d2/README.md) in April 2025.
We have put together a team of dedicated maintainers including representatives
of go-yaml's most important downstream projects.
We will strive to earn the trust of the various go-yaml forks to switch back to
this repository as their upstream.
Please [contact us](https://cloud-native.slack.com/archives/C08PPAT8PS7) if you
would like to contribute or be involved.
## Compatibility
The `yaml` package supports most of YAML 1.2, but preserves some behavior from
1.1 for backwards compatibility.
Specifically, v3 of the `yaml` package:
* Supports YAML 1.1 bools (`yes`/`no`, `on`/`off`) as long as they are being
decoded into a typed bool value.
Otherwise they behave as a string.
Booleans in YAML 1.2 are `true`/`false` only.
* Supports octals encoded and decoded as `0777` per YAML 1.1, rather than
`0o777` as specified in YAML 1.2, because most parsers still use the old
format.
Octals in the `0o777` format are supported though, so new files work.
* Does not support base-60 floats.
These are gone from YAML 1.2, and were actually never supported by this
package as it's clearly a poor choice.
## Installation and Usage
The import path for the package is *go.yaml.in/yaml/v3*.
To install it, run:
```bash
go get go.yaml.in/yaml/v3
```
## API Documentation
See: <https://pkg.go.dev/go.yaml.in/yaml/v3>
## API Stability
The package API for yaml v3 will remain stable as described in [gopkg.in](
https://gopkg.in).
## Example
```go
package main
import (
"fmt"
"log"
"go.yaml.in/yaml/v3"
)
var data = `
a: Easy!
b:
c: 2
d: [3, 4]
`
// Note: struct fields must be public in order for unmarshal to
// correctly populate the data.
type T struct {
A string
B struct {
RenamedC int `yaml:"c"`
D []int `yaml:",flow"`
}
}
func main() {
t := T{}
err := yaml.Unmarshal([]byte(data), &t)
if err != nil {
log.Fatalf("error: %v", err)
}
fmt.Printf("--- t:\n%v\n\n", t)
d, err := yaml.Marshal(&t)
if err != nil {
log.Fatalf("error: %v", err)
}
fmt.Printf("--- t dump:\n%s\n\n", string(d))
m := make(map[interface{}]interface{})
err = yaml.Unmarshal([]byte(data), &m)
if err != nil {
log.Fatalf("error: %v", err)
}
fmt.Printf("--- m:\n%v\n\n", m)
d, err = yaml.Marshal(&m)
if err != nil {
log.Fatalf("error: %v", err)
}
fmt.Printf("--- m dump:\n%s\n\n", string(d))
}
```
This example will generate the following output:
```
--- t:
{Easy! {2 [3 4]}}
--- t dump:
a: Easy!
b:
c: 2
d: [3, 4]
--- m:
map[a:Easy! b:map[c:2 d:[3 4]]]
--- m dump:
a: Easy!
b:
c: 2
d:
- 3
- 4
```
## License
The yaml package is licensed under the MIT and Apache License 2.0 licenses.
Please see the LICENSE file for details.

747
vendor/go.yaml.in/yaml/v3/apic.go generated vendored Normal file
View File

@ -0,0 +1,747 @@
//
// Copyright (c) 2011-2019 Canonical Ltd
// Copyright (c) 2006-2010 Kirill Simonov
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of
// this software and associated documentation files (the "Software"), to deal in
// the Software without restriction, including without limitation the rights to
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
// of the Software, and to permit persons to whom the Software is furnished to do
// so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
package yaml
import (
"io"
)
func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
//fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens))
// Check if we can move the queue at the beginning of the buffer.
if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
if parser.tokens_head != len(parser.tokens) {
copy(parser.tokens, parser.tokens[parser.tokens_head:])
}
parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
parser.tokens_head = 0
}
parser.tokens = append(parser.tokens, *token)
if pos < 0 {
return
}
copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
parser.tokens[parser.tokens_head+pos] = *token
}
// Create a new parser object.
func yaml_parser_initialize(parser *yaml_parser_t) bool {
*parser = yaml_parser_t{
raw_buffer: make([]byte, 0, input_raw_buffer_size),
buffer: make([]byte, 0, input_buffer_size),
}
return true
}
// Destroy a parser object.
func yaml_parser_delete(parser *yaml_parser_t) {
*parser = yaml_parser_t{}
}
// String read handler.
func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
if parser.input_pos == len(parser.input) {
return 0, io.EOF
}
n = copy(buffer, parser.input[parser.input_pos:])
parser.input_pos += n
return n, nil
}
// Reader read handler.
func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
return parser.input_reader.Read(buffer)
}
// Set a string input.
func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
if parser.read_handler != nil {
panic("must set the input source only once")
}
parser.read_handler = yaml_string_read_handler
parser.input = input
parser.input_pos = 0
}
// Set a file input.
func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) {
if parser.read_handler != nil {
panic("must set the input source only once")
}
parser.read_handler = yaml_reader_read_handler
parser.input_reader = r
}
// Set the source encoding.
func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
if parser.encoding != yaml_ANY_ENCODING {
panic("must set the encoding only once")
}
parser.encoding = encoding
}
// Create a new emitter object.
func yaml_emitter_initialize(emitter *yaml_emitter_t) {
*emitter = yaml_emitter_t{
buffer: make([]byte, output_buffer_size),
raw_buffer: make([]byte, 0, output_raw_buffer_size),
states: make([]yaml_emitter_state_t, 0, initial_stack_size),
events: make([]yaml_event_t, 0, initial_queue_size),
best_width: -1,
}
}
// Destroy an emitter object.
func yaml_emitter_delete(emitter *yaml_emitter_t) {
*emitter = yaml_emitter_t{}
}
// String write handler.
func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
*emitter.output_buffer = append(*emitter.output_buffer, buffer...)
return nil
}
// yaml_writer_write_handler uses emitter.output_writer to write the
// emitted text.
func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
_, err := emitter.output_writer.Write(buffer)
return err
}
// Set a string output.
func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) {
if emitter.write_handler != nil {
panic("must set the output target only once")
}
emitter.write_handler = yaml_string_write_handler
emitter.output_buffer = output_buffer
}
// Set a file output.
func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) {
if emitter.write_handler != nil {
panic("must set the output target only once")
}
emitter.write_handler = yaml_writer_write_handler
emitter.output_writer = w
}
// Set the output encoding.
func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
if emitter.encoding != yaml_ANY_ENCODING {
panic("must set the output encoding only once")
}
emitter.encoding = encoding
}
// Set the canonical output style.
func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
emitter.canonical = canonical
}
// Set the indentation increment.
func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
if indent < 2 || indent > 9 {
indent = 2
}
emitter.best_indent = indent
}
// Set the preferred line width.
func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
if width < 0 {
width = -1
}
emitter.best_width = width
}
// Set if unescaped non-ASCII characters are allowed.
func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
emitter.unicode = unicode
}
// Set the preferred line break character.
func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
emitter.line_break = line_break
}
///*
// * Destroy a token object.
// */
//
//YAML_DECLARE(void)
//yaml_token_delete(yaml_token_t *token)
//{
// assert(token); // Non-NULL token object expected.
//
// switch (token.type)
// {
// case YAML_TAG_DIRECTIVE_TOKEN:
// yaml_free(token.data.tag_directive.handle);
// yaml_free(token.data.tag_directive.prefix);
// break;
//
// case YAML_ALIAS_TOKEN:
// yaml_free(token.data.alias.value);
// break;
//
// case YAML_ANCHOR_TOKEN:
// yaml_free(token.data.anchor.value);
// break;
//
// case YAML_TAG_TOKEN:
// yaml_free(token.data.tag.handle);
// yaml_free(token.data.tag.suffix);
// break;
//
// case YAML_SCALAR_TOKEN:
// yaml_free(token.data.scalar.value);
// break;
//
// default:
// break;
// }
//
// memset(token, 0, sizeof(yaml_token_t));
//}
//
///*
// * Check if a string is a valid UTF-8 sequence.
// *
// * Check 'reader.c' for more details on UTF-8 encoding.
// */
//
//static int
//yaml_check_utf8(yaml_char_t *start, size_t length)
//{
// yaml_char_t *end = start+length;
// yaml_char_t *pointer = start;
//
// while (pointer < end) {
// unsigned char octet;
// unsigned int width;
// unsigned int value;
// size_t k;
//
// octet = pointer[0];
// width = (octet & 0x80) == 0x00 ? 1 :
// (octet & 0xE0) == 0xC0 ? 2 :
// (octet & 0xF0) == 0xE0 ? 3 :
// (octet & 0xF8) == 0xF0 ? 4 : 0;
// value = (octet & 0x80) == 0x00 ? octet & 0x7F :
// (octet & 0xE0) == 0xC0 ? octet & 0x1F :
// (octet & 0xF0) == 0xE0 ? octet & 0x0F :
// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
// if (!width) return 0;
// if (pointer+width > end) return 0;
// for (k = 1; k < width; k ++) {
// octet = pointer[k];
// if ((octet & 0xC0) != 0x80) return 0;
// value = (value << 6) + (octet & 0x3F);
// }
// if (!((width == 1) ||
// (width == 2 && value >= 0x80) ||
// (width == 3 && value >= 0x800) ||
// (width == 4 && value >= 0x10000))) return 0;
//
// pointer += width;
// }
//
// return 1;
//}
//
// Create STREAM-START.
func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) {
*event = yaml_event_t{
typ: yaml_STREAM_START_EVENT,
encoding: encoding,
}
}
// Create STREAM-END.
func yaml_stream_end_event_initialize(event *yaml_event_t) {
*event = yaml_event_t{
typ: yaml_STREAM_END_EVENT,
}
}
// Create DOCUMENT-START.
func yaml_document_start_event_initialize(
event *yaml_event_t,
version_directive *yaml_version_directive_t,
tag_directives []yaml_tag_directive_t,
implicit bool,
) {
*event = yaml_event_t{
typ: yaml_DOCUMENT_START_EVENT,
version_directive: version_directive,
tag_directives: tag_directives,
implicit: implicit,
}
}
// Create DOCUMENT-END.
func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) {
*event = yaml_event_t{
typ: yaml_DOCUMENT_END_EVENT,
implicit: implicit,
}
}
// Create ALIAS.
func yaml_alias_event_initialize(event *yaml_event_t, anchor []byte) bool {
*event = yaml_event_t{
typ: yaml_ALIAS_EVENT,
anchor: anchor,
}
return true
}
// Create SCALAR.
func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool {
*event = yaml_event_t{
typ: yaml_SCALAR_EVENT,
anchor: anchor,
tag: tag,
value: value,
implicit: plain_implicit,
quoted_implicit: quoted_implicit,
style: yaml_style_t(style),
}
return true
}
// Create SEQUENCE-START.
func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool {
*event = yaml_event_t{
typ: yaml_SEQUENCE_START_EVENT,
anchor: anchor,
tag: tag,
implicit: implicit,
style: yaml_style_t(style),
}
return true
}
// Create SEQUENCE-END.
func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
*event = yaml_event_t{
typ: yaml_SEQUENCE_END_EVENT,
}
return true
}
// Create MAPPING-START.
func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) {
*event = yaml_event_t{
typ: yaml_MAPPING_START_EVENT,
anchor: anchor,
tag: tag,
implicit: implicit,
style: yaml_style_t(style),
}
}
// Create MAPPING-END.
func yaml_mapping_end_event_initialize(event *yaml_event_t) {
*event = yaml_event_t{
typ: yaml_MAPPING_END_EVENT,
}
}
// Destroy an event object.
func yaml_event_delete(event *yaml_event_t) {
*event = yaml_event_t{}
}
///*
// * Create a document object.
// */
//
//YAML_DECLARE(int)
//yaml_document_initialize(document *yaml_document_t,
// version_directive *yaml_version_directive_t,
// tag_directives_start *yaml_tag_directive_t,
// tag_directives_end *yaml_tag_directive_t,
// start_implicit int, end_implicit int)
//{
// struct {
// error yaml_error_type_t
// } context
// struct {
// start *yaml_node_t
// end *yaml_node_t
// top *yaml_node_t
// } nodes = { NULL, NULL, NULL }
// version_directive_copy *yaml_version_directive_t = NULL
// struct {
// start *yaml_tag_directive_t
// end *yaml_tag_directive_t
// top *yaml_tag_directive_t
// } tag_directives_copy = { NULL, NULL, NULL }
// value yaml_tag_directive_t = { NULL, NULL }
// mark yaml_mark_t = { 0, 0, 0 }
//
// assert(document) // Non-NULL document object is expected.
// assert((tag_directives_start && tag_directives_end) ||
// (tag_directives_start == tag_directives_end))
// // Valid tag directives are expected.
//
// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error
//
// if (version_directive) {
// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t))
// if (!version_directive_copy) goto error
// version_directive_copy.major = version_directive.major
// version_directive_copy.minor = version_directive.minor
// }
//
// if (tag_directives_start != tag_directives_end) {
// tag_directive *yaml_tag_directive_t
// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
// goto error
// for (tag_directive = tag_directives_start
// tag_directive != tag_directives_end; tag_directive ++) {
// assert(tag_directive.handle)
// assert(tag_directive.prefix)
// if (!yaml_check_utf8(tag_directive.handle,
// strlen((char *)tag_directive.handle)))
// goto error
// if (!yaml_check_utf8(tag_directive.prefix,
// strlen((char *)tag_directive.prefix)))
// goto error
// value.handle = yaml_strdup(tag_directive.handle)
// value.prefix = yaml_strdup(tag_directive.prefix)
// if (!value.handle || !value.prefix) goto error
// if (!PUSH(&context, tag_directives_copy, value))
// goto error
// value.handle = NULL
// value.prefix = NULL
// }
// }
//
// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
// tag_directives_copy.start, tag_directives_copy.top,
// start_implicit, end_implicit, mark, mark)
//
// return 1
//
//error:
// STACK_DEL(&context, nodes)
// yaml_free(version_directive_copy)
// while (!STACK_EMPTY(&context, tag_directives_copy)) {
// value yaml_tag_directive_t = POP(&context, tag_directives_copy)
// yaml_free(value.handle)
// yaml_free(value.prefix)
// }
// STACK_DEL(&context, tag_directives_copy)
// yaml_free(value.handle)
// yaml_free(value.prefix)
//
// return 0
//}
//
///*
// * Destroy a document object.
// */
//
//YAML_DECLARE(void)
//yaml_document_delete(document *yaml_document_t)
//{
// struct {
// error yaml_error_type_t
// } context
// tag_directive *yaml_tag_directive_t
//
// context.error = YAML_NO_ERROR // Eliminate a compiler warning.
//
// assert(document) // Non-NULL document object is expected.
//
// while (!STACK_EMPTY(&context, document.nodes)) {
// node yaml_node_t = POP(&context, document.nodes)
// yaml_free(node.tag)
// switch (node.type) {
// case YAML_SCALAR_NODE:
// yaml_free(node.data.scalar.value)
// break
// case YAML_SEQUENCE_NODE:
// STACK_DEL(&context, node.data.sequence.items)
// break
// case YAML_MAPPING_NODE:
// STACK_DEL(&context, node.data.mapping.pairs)
// break
// default:
// assert(0) // Should not happen.
// }
// }
// STACK_DEL(&context, document.nodes)
//
// yaml_free(document.version_directive)
// for (tag_directive = document.tag_directives.start
// tag_directive != document.tag_directives.end
// tag_directive++) {
// yaml_free(tag_directive.handle)
// yaml_free(tag_directive.prefix)
// }
// yaml_free(document.tag_directives.start)
//
// memset(document, 0, sizeof(yaml_document_t))
//}
//
///**
// * Get a document node.
// */
//
//YAML_DECLARE(yaml_node_t *)
//yaml_document_get_node(document *yaml_document_t, index int)
//{
// assert(document) // Non-NULL document object is expected.
//
// if (index > 0 && document.nodes.start + index <= document.nodes.top) {
// return document.nodes.start + index - 1
// }
// return NULL
//}
//
///**
// * Get the root object.
// */
//
//YAML_DECLARE(yaml_node_t *)
//yaml_document_get_root_node(document *yaml_document_t)
//{
// assert(document) // Non-NULL document object is expected.
//
// if (document.nodes.top != document.nodes.start) {
// return document.nodes.start
// }
// return NULL
//}
//
///*
// * Add a scalar node to a document.
// */
//
//YAML_DECLARE(int)
//yaml_document_add_scalar(document *yaml_document_t,
// tag *yaml_char_t, value *yaml_char_t, length int,
// style yaml_scalar_style_t)
//{
// struct {
// error yaml_error_type_t
// } context
// mark yaml_mark_t = { 0, 0, 0 }
// tag_copy *yaml_char_t = NULL
// value_copy *yaml_char_t = NULL
// node yaml_node_t
//
// assert(document) // Non-NULL document object is expected.
// assert(value) // Non-NULL value is expected.
//
// if (!tag) {
// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG
// }
//
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
// tag_copy = yaml_strdup(tag)
// if (!tag_copy) goto error
//
// if (length < 0) {
// length = strlen((char *)value)
// }
//
// if (!yaml_check_utf8(value, length)) goto error
// value_copy = yaml_malloc(length+1)
// if (!value_copy) goto error
// memcpy(value_copy, value, length)
// value_copy[length] = '\0'
//
// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark)
// if (!PUSH(&context, document.nodes, node)) goto error
//
// return document.nodes.top - document.nodes.start
//
//error:
// yaml_free(tag_copy)
// yaml_free(value_copy)
//
// return 0
//}
//
///*
// * Add a sequence node to a document.
// */
//
//YAML_DECLARE(int)
//yaml_document_add_sequence(document *yaml_document_t,
// tag *yaml_char_t, style yaml_sequence_style_t)
//{
// struct {
// error yaml_error_type_t
// } context
// mark yaml_mark_t = { 0, 0, 0 }
// tag_copy *yaml_char_t = NULL
// struct {
// start *yaml_node_item_t
// end *yaml_node_item_t
// top *yaml_node_item_t
// } items = { NULL, NULL, NULL }
// node yaml_node_t
//
// assert(document) // Non-NULL document object is expected.
//
// if (!tag) {
// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG
// }
//
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
// tag_copy = yaml_strdup(tag)
// if (!tag_copy) goto error
//
// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error
//
// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
// style, mark, mark)
// if (!PUSH(&context, document.nodes, node)) goto error
//
// return document.nodes.top - document.nodes.start
//
//error:
// STACK_DEL(&context, items)
// yaml_free(tag_copy)
//
// return 0
//}
//
///*
// * Add a mapping node to a document.
// */
//
//YAML_DECLARE(int)
//yaml_document_add_mapping(document *yaml_document_t,
// tag *yaml_char_t, style yaml_mapping_style_t)
//{
// struct {
// error yaml_error_type_t
// } context
// mark yaml_mark_t = { 0, 0, 0 }
// tag_copy *yaml_char_t = NULL
// struct {
// start *yaml_node_pair_t
// end *yaml_node_pair_t
// top *yaml_node_pair_t
// } pairs = { NULL, NULL, NULL }
// node yaml_node_t
//
// assert(document) // Non-NULL document object is expected.
//
// if (!tag) {
// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG
// }
//
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
// tag_copy = yaml_strdup(tag)
// if (!tag_copy) goto error
//
// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error
//
// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
// style, mark, mark)
// if (!PUSH(&context, document.nodes, node)) goto error
//
// return document.nodes.top - document.nodes.start
//
//error:
// STACK_DEL(&context, pairs)
// yaml_free(tag_copy)
//
// return 0
//}
//
///*
// * Append an item to a sequence node.
// */
//
//YAML_DECLARE(int)
//yaml_document_append_sequence_item(document *yaml_document_t,
// sequence int, item int)
//{
// struct {
// error yaml_error_type_t
// } context
//
// assert(document) // Non-NULL document is required.
// assert(sequence > 0
// && document.nodes.start + sequence <= document.nodes.top)
// // Valid sequence id is required.
// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE)
// // A sequence node is required.
// assert(item > 0 && document.nodes.start + item <= document.nodes.top)
// // Valid item id is required.
//
// if (!PUSH(&context,
// document.nodes.start[sequence-1].data.sequence.items, item))
// return 0
//
// return 1
//}
//
///*
// * Append a pair of a key and a value to a mapping node.
// */
//
//YAML_DECLARE(int)
//yaml_document_append_mapping_pair(document *yaml_document_t,
// mapping int, key int, value int)
//{
// struct {
// error yaml_error_type_t
// } context
//
// pair yaml_node_pair_t
//
// assert(document) // Non-NULL document is required.
// assert(mapping > 0
// && document.nodes.start + mapping <= document.nodes.top)
// // Valid mapping id is required.
// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE)
// // A mapping node is required.
// assert(key > 0 && document.nodes.start + key <= document.nodes.top)
// // Valid key id is required.
// assert(value > 0 && document.nodes.start + value <= document.nodes.top)
// // Valid value id is required.
//
// pair.key = key
// pair.value = value
//
// if (!PUSH(&context,
// document.nodes.start[mapping-1].data.mapping.pairs, pair))
// return 0
//
// return 1
//}
//
//

1018
vendor/go.yaml.in/yaml/v3/decode.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

2054
vendor/go.yaml.in/yaml/v3/emitterc.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

577
vendor/go.yaml.in/yaml/v3/encode.go generated vendored Normal file
View File

@ -0,0 +1,577 @@
//
// Copyright (c) 2011-2019 Canonical Ltd
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package yaml
import (
"encoding"
"fmt"
"io"
"reflect"
"regexp"
"sort"
"strconv"
"strings"
"time"
"unicode/utf8"
)
type encoder struct {
emitter yaml_emitter_t
event yaml_event_t
out []byte
flow bool
indent int
doneInit bool
}
func newEncoder() *encoder {
e := &encoder{}
yaml_emitter_initialize(&e.emitter)
yaml_emitter_set_output_string(&e.emitter, &e.out)
yaml_emitter_set_unicode(&e.emitter, true)
return e
}
func newEncoderWithWriter(w io.Writer) *encoder {
e := &encoder{}
yaml_emitter_initialize(&e.emitter)
yaml_emitter_set_output_writer(&e.emitter, w)
yaml_emitter_set_unicode(&e.emitter, true)
return e
}
func (e *encoder) init() {
if e.doneInit {
return
}
if e.indent == 0 {
e.indent = 4
}
e.emitter.best_indent = e.indent
yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)
e.emit()
e.doneInit = true
}
func (e *encoder) finish() {
e.emitter.open_ended = false
yaml_stream_end_event_initialize(&e.event)
e.emit()
}
func (e *encoder) destroy() {
yaml_emitter_delete(&e.emitter)
}
func (e *encoder) emit() {
// This will internally delete the e.event value.
e.must(yaml_emitter_emit(&e.emitter, &e.event))
}
func (e *encoder) must(ok bool) {
if !ok {
msg := e.emitter.problem
if msg == "" {
msg = "unknown problem generating YAML content"
}
failf("%s", msg)
}
}
func (e *encoder) marshalDoc(tag string, in reflect.Value) {
e.init()
var node *Node
if in.IsValid() {
node, _ = in.Interface().(*Node)
}
if node != nil && node.Kind == DocumentNode {
e.nodev(in)
} else {
yaml_document_start_event_initialize(&e.event, nil, nil, true)
e.emit()
e.marshal(tag, in)
yaml_document_end_event_initialize(&e.event, true)
e.emit()
}
}
func (e *encoder) marshal(tag string, in reflect.Value) {
tag = shortTag(tag)
if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() {
e.nilv()
return
}
iface := in.Interface()
switch value := iface.(type) {
case *Node:
e.nodev(in)
return
case Node:
if !in.CanAddr() {
var n = reflect.New(in.Type()).Elem()
n.Set(in)
in = n
}
e.nodev(in.Addr())
return
case time.Time:
e.timev(tag, in)
return
case *time.Time:
e.timev(tag, in.Elem())
return
case time.Duration:
e.stringv(tag, reflect.ValueOf(value.String()))
return
case Marshaler:
v, err := value.MarshalYAML()
if err != nil {
fail(err)
}
if v == nil {
e.nilv()
return
}
e.marshal(tag, reflect.ValueOf(v))
return
case encoding.TextMarshaler:
text, err := value.MarshalText()
if err != nil {
fail(err)
}
in = reflect.ValueOf(string(text))
case nil:
e.nilv()
return
}
switch in.Kind() {
case reflect.Interface:
e.marshal(tag, in.Elem())
case reflect.Map:
e.mapv(tag, in)
case reflect.Ptr:
e.marshal(tag, in.Elem())
case reflect.Struct:
e.structv(tag, in)
case reflect.Slice, reflect.Array:
e.slicev(tag, in)
case reflect.String:
e.stringv(tag, in)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
e.intv(tag, in)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
e.uintv(tag, in)
case reflect.Float32, reflect.Float64:
e.floatv(tag, in)
case reflect.Bool:
e.boolv(tag, in)
default:
panic("cannot marshal type: " + in.Type().String())
}
}
func (e *encoder) mapv(tag string, in reflect.Value) {
e.mappingv(tag, func() {
keys := keyList(in.MapKeys())
sort.Sort(keys)
for _, k := range keys {
e.marshal("", k)
e.marshal("", in.MapIndex(k))
}
})
}
func (e *encoder) fieldByIndex(v reflect.Value, index []int) (field reflect.Value) {
for _, num := range index {
for {
if v.Kind() == reflect.Ptr {
if v.IsNil() {
return reflect.Value{}
}
v = v.Elem()
continue
}
break
}
v = v.Field(num)
}
return v
}
func (e *encoder) structv(tag string, in reflect.Value) {
sinfo, err := getStructInfo(in.Type())
if err != nil {
panic(err)
}
e.mappingv(tag, func() {
for _, info := range sinfo.FieldsList {
var value reflect.Value
if info.Inline == nil {
value = in.Field(info.Num)
} else {
value = e.fieldByIndex(in, info.Inline)
if !value.IsValid() {
continue
}
}
if info.OmitEmpty && isZero(value) {
continue
}
e.marshal("", reflect.ValueOf(info.Key))
e.flow = info.Flow
e.marshal("", value)
}
if sinfo.InlineMap >= 0 {
m := in.Field(sinfo.InlineMap)
if m.Len() > 0 {
e.flow = false
keys := keyList(m.MapKeys())
sort.Sort(keys)
for _, k := range keys {
if _, found := sinfo.FieldsMap[k.String()]; found {
panic(fmt.Sprintf("cannot have key %q in inlined map: conflicts with struct field", k.String()))
}
e.marshal("", k)
e.flow = false
e.marshal("", m.MapIndex(k))
}
}
}
})
}
func (e *encoder) mappingv(tag string, f func()) {
implicit := tag == ""
style := yaml_BLOCK_MAPPING_STYLE
if e.flow {
e.flow = false
style = yaml_FLOW_MAPPING_STYLE
}
yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)
e.emit()
f()
yaml_mapping_end_event_initialize(&e.event)
e.emit()
}
func (e *encoder) slicev(tag string, in reflect.Value) {
implicit := tag == ""
style := yaml_BLOCK_SEQUENCE_STYLE
if e.flow {
e.flow = false
style = yaml_FLOW_SEQUENCE_STYLE
}
e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
e.emit()
n := in.Len()
for i := 0; i < n; i++ {
e.marshal("", in.Index(i))
}
e.must(yaml_sequence_end_event_initialize(&e.event))
e.emit()
}
// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1.
//
// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported
// in YAML 1.2 and by this package, but these should be marshalled quoted for
// the time being for compatibility with other parsers.
func isBase60Float(s string) (result bool) {
// Fast path.
if s == "" {
return false
}
c := s[0]
if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 {
return false
}
// Do the full match.
return base60float.MatchString(s)
}
// From http://yaml.org/type/float.html, except the regular expression there
// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix.
var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`)
// isOldBool returns whether s is bool notation as defined in YAML 1.1.
//
// We continue to force strings that YAML 1.1 would interpret as booleans to be
// rendered as quotes strings so that the marshalled output valid for YAML 1.1
// parsing.
func isOldBool(s string) (result bool) {
switch s {
case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON",
"n", "N", "no", "No", "NO", "off", "Off", "OFF":
return true
default:
return false
}
}
func (e *encoder) stringv(tag string, in reflect.Value) {
var style yaml_scalar_style_t
s := in.String()
canUsePlain := true
switch {
case !utf8.ValidString(s):
if tag == binaryTag {
failf("explicitly tagged !!binary data must be base64-encoded")
}
if tag != "" {
failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
}
// It can't be encoded directly as YAML so use a binary tag
// and encode it as base64.
tag = binaryTag
s = encodeBase64(s)
case tag == "":
// Check to see if it would resolve to a specific
// tag when encoded unquoted. If it doesn't,
// there's no need to quote it.
rtag, _ := resolve("", s)
canUsePlain = rtag == strTag && !(isBase60Float(s) || isOldBool(s))
}
// Note: it's possible for user code to emit invalid YAML
// if they explicitly specify a tag and a string containing
// text that's incompatible with that tag.
switch {
case strings.Contains(s, "\n"):
if e.flow {
style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
} else {
style = yaml_LITERAL_SCALAR_STYLE
}
case canUsePlain:
style = yaml_PLAIN_SCALAR_STYLE
default:
style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
}
e.emitScalar(s, "", tag, style, nil, nil, nil, nil)
}
func (e *encoder) boolv(tag string, in reflect.Value) {
var s string
if in.Bool() {
s = "true"
} else {
s = "false"
}
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
}
func (e *encoder) intv(tag string, in reflect.Value) {
s := strconv.FormatInt(in.Int(), 10)
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
}
func (e *encoder) uintv(tag string, in reflect.Value) {
s := strconv.FormatUint(in.Uint(), 10)
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
}
func (e *encoder) timev(tag string, in reflect.Value) {
t := in.Interface().(time.Time)
s := t.Format(time.RFC3339Nano)
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
}
func (e *encoder) floatv(tag string, in reflect.Value) {
// Issue #352: When formatting, use the precision of the underlying value
precision := 64
if in.Kind() == reflect.Float32 {
precision = 32
}
s := strconv.FormatFloat(in.Float(), 'g', -1, precision)
switch s {
case "+Inf":
s = ".inf"
case "-Inf":
s = "-.inf"
case "NaN":
s = ".nan"
}
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
}
func (e *encoder) nilv() {
e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
}
func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t, head, line, foot, tail []byte) {
// TODO Kill this function. Replace all initialize calls by their underlining Go literals.
implicit := tag == ""
if !implicit {
tag = longTag(tag)
}
e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style))
e.event.head_comment = head
e.event.line_comment = line
e.event.foot_comment = foot
e.event.tail_comment = tail
e.emit()
}
func (e *encoder) nodev(in reflect.Value) {
e.node(in.Interface().(*Node), "")
}
func (e *encoder) node(node *Node, tail string) {
// Zero nodes behave as nil.
if node.Kind == 0 && node.IsZero() {
e.nilv()
return
}
// If the tag was not explicitly requested, and dropping it won't change the
// implicit tag of the value, don't include it in the presentation.
var tag = node.Tag
var stag = shortTag(tag)
var forceQuoting bool
if tag != "" && node.Style&TaggedStyle == 0 {
if node.Kind == ScalarNode {
if stag == strTag && node.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0 {
tag = ""
} else {
rtag, _ := resolve("", node.Value)
if rtag == stag {
tag = ""
} else if stag == strTag {
tag = ""
forceQuoting = true
}
}
} else {
var rtag string
switch node.Kind {
case MappingNode:
rtag = mapTag
case SequenceNode:
rtag = seqTag
}
if rtag == stag {
tag = ""
}
}
}
switch node.Kind {
case DocumentNode:
yaml_document_start_event_initialize(&e.event, nil, nil, true)
e.event.head_comment = []byte(node.HeadComment)
e.emit()
for _, node := range node.Content {
e.node(node, "")
}
yaml_document_end_event_initialize(&e.event, true)
e.event.foot_comment = []byte(node.FootComment)
e.emit()
case SequenceNode:
style := yaml_BLOCK_SEQUENCE_STYLE
if node.Style&FlowStyle != 0 {
style = yaml_FLOW_SEQUENCE_STYLE
}
e.must(yaml_sequence_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style))
e.event.head_comment = []byte(node.HeadComment)
e.emit()
for _, node := range node.Content {
e.node(node, "")
}
e.must(yaml_sequence_end_event_initialize(&e.event))
e.event.line_comment = []byte(node.LineComment)
e.event.foot_comment = []byte(node.FootComment)
e.emit()
case MappingNode:
style := yaml_BLOCK_MAPPING_STYLE
if node.Style&FlowStyle != 0 {
style = yaml_FLOW_MAPPING_STYLE
}
yaml_mapping_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style)
e.event.tail_comment = []byte(tail)
e.event.head_comment = []byte(node.HeadComment)
e.emit()
// The tail logic below moves the foot comment of prior keys to the following key,
// since the value for each key may be a nested structure and the foot needs to be
// processed only the entirety of the value is streamed. The last tail is processed
// with the mapping end event.
var tail string
for i := 0; i+1 < len(node.Content); i += 2 {
k := node.Content[i]
foot := k.FootComment
if foot != "" {
kopy := *k
kopy.FootComment = ""
k = &kopy
}
e.node(k, tail)
tail = foot
v := node.Content[i+1]
e.node(v, "")
}
yaml_mapping_end_event_initialize(&e.event)
e.event.tail_comment = []byte(tail)
e.event.line_comment = []byte(node.LineComment)
e.event.foot_comment = []byte(node.FootComment)
e.emit()
case AliasNode:
yaml_alias_event_initialize(&e.event, []byte(node.Value))
e.event.head_comment = []byte(node.HeadComment)
e.event.line_comment = []byte(node.LineComment)
e.event.foot_comment = []byte(node.FootComment)
e.emit()
case ScalarNode:
value := node.Value
if !utf8.ValidString(value) {
if stag == binaryTag {
failf("explicitly tagged !!binary data must be base64-encoded")
}
if stag != "" {
failf("cannot marshal invalid UTF-8 data as %s", stag)
}
// It can't be encoded directly as YAML so use a binary tag
// and encode it as base64.
tag = binaryTag
value = encodeBase64(value)
}
style := yaml_PLAIN_SCALAR_STYLE
switch {
case node.Style&DoubleQuotedStyle != 0:
style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
case node.Style&SingleQuotedStyle != 0:
style = yaml_SINGLE_QUOTED_SCALAR_STYLE
case node.Style&LiteralStyle != 0:
style = yaml_LITERAL_SCALAR_STYLE
case node.Style&FoldedStyle != 0:
style = yaml_FOLDED_SCALAR_STYLE
case strings.Contains(value, "\n"):
style = yaml_LITERAL_SCALAR_STYLE
case forceQuoting:
style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
}
e.emitScalar(value, node.Anchor, tag, style, []byte(node.HeadComment), []byte(node.LineComment), []byte(node.FootComment), []byte(tail))
default:
failf("cannot encode node with unknown kind %d", node.Kind)
}
}

1274
vendor/go.yaml.in/yaml/v3/parserc.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

434
vendor/go.yaml.in/yaml/v3/readerc.go generated vendored Normal file
View File

@ -0,0 +1,434 @@
//
// Copyright (c) 2011-2019 Canonical Ltd
// Copyright (c) 2006-2010 Kirill Simonov
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of
// this software and associated documentation files (the "Software"), to deal in
// the Software without restriction, including without limitation the rights to
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
// of the Software, and to permit persons to whom the Software is furnished to do
// so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
package yaml
import (
"io"
)
// Set the reader error and return 0.
func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool {
parser.error = yaml_READER_ERROR
parser.problem = problem
parser.problem_offset = offset
parser.problem_value = value
return false
}
// Byte order marks.
const (
bom_UTF8 = "\xef\xbb\xbf"
bom_UTF16LE = "\xff\xfe"
bom_UTF16BE = "\xfe\xff"
)
// Determine the input stream encoding by checking the BOM symbol. If no BOM is
// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
// Ensure that we had enough bytes in the raw buffer.
for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
if !yaml_parser_update_raw_buffer(parser) {
return false
}
}
// Determine the encoding.
buf := parser.raw_buffer
pos := parser.raw_buffer_pos
avail := len(buf) - pos
if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] {
parser.encoding = yaml_UTF16LE_ENCODING
parser.raw_buffer_pos += 2
parser.offset += 2
} else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] {
parser.encoding = yaml_UTF16BE_ENCODING
parser.raw_buffer_pos += 2
parser.offset += 2
} else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] {
parser.encoding = yaml_UTF8_ENCODING
parser.raw_buffer_pos += 3
parser.offset += 3
} else {
parser.encoding = yaml_UTF8_ENCODING
}
return true
}
// Update the raw buffer.
func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
size_read := 0
// Return if the raw buffer is full.
if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
return true
}
// Return on EOF.
if parser.eof {
return true
}
// Move the remaining bytes in the raw buffer to the beginning.
if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
}
parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
parser.raw_buffer_pos = 0
// Call the read handler to fill the buffer.
size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
if err == io.EOF {
parser.eof = true
} else if err != nil {
return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1)
}
return true
}
// Ensure that the buffer contains at least `length` characters.
// Return true on success, false on failure.
//
// The length is supposed to be significantly less that the buffer size.
func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
if parser.read_handler == nil {
panic("read handler must be set")
}
// [Go] This function was changed to guarantee the requested length size at EOF.
// The fact we need to do this is pretty awful, but the description above implies
// for that to be the case, and there are tests
// If the EOF flag is set and the raw buffer is empty, do nothing.
if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
// [Go] ACTUALLY! Read the documentation of this function above.
// This is just broken. To return true, we need to have the
// given length in the buffer. Not doing that means every single
// check that calls this function to make sure the buffer has a
// given length is Go) panicking; or C) accessing invalid memory.
//return true
}
// Return if the buffer contains enough characters.
if parser.unread >= length {
return true
}
// Determine the input encoding if it is not known yet.
if parser.encoding == yaml_ANY_ENCODING {
if !yaml_parser_determine_encoding(parser) {
return false
}
}
// Move the unread characters to the beginning of the buffer.
buffer_len := len(parser.buffer)
if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len {
copy(parser.buffer, parser.buffer[parser.buffer_pos:])
buffer_len -= parser.buffer_pos
parser.buffer_pos = 0
} else if parser.buffer_pos == buffer_len {
buffer_len = 0
parser.buffer_pos = 0
}
// Open the whole buffer for writing, and cut it before returning.
parser.buffer = parser.buffer[:cap(parser.buffer)]
// Fill the buffer until it has enough characters.
first := true
for parser.unread < length {
// Fill the raw buffer if necessary.
if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
if !yaml_parser_update_raw_buffer(parser) {
parser.buffer = parser.buffer[:buffer_len]
return false
}
}
first = false
// Decode the raw buffer.
inner:
for parser.raw_buffer_pos != len(parser.raw_buffer) {
var value rune
var width int
raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
// Decode the next character.
switch parser.encoding {
case yaml_UTF8_ENCODING:
// Decode a UTF-8 character. Check RFC 3629
// (http://www.ietf.org/rfc/rfc3629.txt) for more details.
//
// The following table (taken from the RFC) is used for
// decoding.
//
// Char. number range | UTF-8 octet sequence
// (hexadecimal) | (binary)
// --------------------+------------------------------------
// 0000 0000-0000 007F | 0xxxxxxx
// 0000 0080-0000 07FF | 110xxxxx 10xxxxxx
// 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
// 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
//
// Additionally, the characters in the range 0xD800-0xDFFF
// are prohibited as they are reserved for use with UTF-16
// surrogate pairs.
// Determine the length of the UTF-8 sequence.
octet := parser.raw_buffer[parser.raw_buffer_pos]
switch {
case octet&0x80 == 0x00:
width = 1
case octet&0xE0 == 0xC0:
width = 2
case octet&0xF0 == 0xE0:
width = 3
case octet&0xF8 == 0xF0:
width = 4
default:
// The leading octet is invalid.
return yaml_parser_set_reader_error(parser,
"invalid leading UTF-8 octet",
parser.offset, int(octet))
}
// Check if the raw buffer contains an incomplete character.
if width > raw_unread {
if parser.eof {
return yaml_parser_set_reader_error(parser,
"incomplete UTF-8 octet sequence",
parser.offset, -1)
}
break inner
}
// Decode the leading octet.
switch {
case octet&0x80 == 0x00:
value = rune(octet & 0x7F)
case octet&0xE0 == 0xC0:
value = rune(octet & 0x1F)
case octet&0xF0 == 0xE0:
value = rune(octet & 0x0F)
case octet&0xF8 == 0xF0:
value = rune(octet & 0x07)
default:
value = 0
}
// Check and decode the trailing octets.
for k := 1; k < width; k++ {
octet = parser.raw_buffer[parser.raw_buffer_pos+k]
// Check if the octet is valid.
if (octet & 0xC0) != 0x80 {
return yaml_parser_set_reader_error(parser,
"invalid trailing UTF-8 octet",
parser.offset+k, int(octet))
}
// Decode the octet.
value = (value << 6) + rune(octet&0x3F)
}
// Check the length of the sequence against the value.
switch {
case width == 1:
case width == 2 && value >= 0x80:
case width == 3 && value >= 0x800:
case width == 4 && value >= 0x10000:
default:
return yaml_parser_set_reader_error(parser,
"invalid length of a UTF-8 sequence",
parser.offset, -1)
}
// Check the range of the value.
if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF {
return yaml_parser_set_reader_error(parser,
"invalid Unicode character",
parser.offset, int(value))
}
case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING:
var low, high int
if parser.encoding == yaml_UTF16LE_ENCODING {
low, high = 0, 1
} else {
low, high = 1, 0
}
// The UTF-16 encoding is not as simple as one might
// naively think. Check RFC 2781
// (http://www.ietf.org/rfc/rfc2781.txt).
//
// Normally, two subsequent bytes describe a Unicode
// character. However a special technique (called a
// surrogate pair) is used for specifying character
// values larger than 0xFFFF.
//
// A surrogate pair consists of two pseudo-characters:
// high surrogate area (0xD800-0xDBFF)
// low surrogate area (0xDC00-0xDFFF)
//
// The following formulas are used for decoding
// and encoding characters using surrogate pairs:
//
// U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF)
// U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF)
// W1 = 110110yyyyyyyyyy
// W2 = 110111xxxxxxxxxx
//
// where U is the character value, W1 is the high surrogate
// area, W2 is the low surrogate area.
// Check for incomplete UTF-16 character.
if raw_unread < 2 {
if parser.eof {
return yaml_parser_set_reader_error(parser,
"incomplete UTF-16 character",
parser.offset, -1)
}
break inner
}
// Get the character.
value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
(rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
// Check for unexpected low surrogate area.
if value&0xFC00 == 0xDC00 {
return yaml_parser_set_reader_error(parser,
"unexpected low surrogate area",
parser.offset, int(value))
}
// Check for a high surrogate area.
if value&0xFC00 == 0xD800 {
width = 4
// Check for incomplete surrogate pair.
if raw_unread < 4 {
if parser.eof {
return yaml_parser_set_reader_error(parser,
"incomplete UTF-16 surrogate pair",
parser.offset, -1)
}
break inner
}
// Get the next character.
value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
(rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
// Check for a low surrogate area.
if value2&0xFC00 != 0xDC00 {
return yaml_parser_set_reader_error(parser,
"expected low surrogate area",
parser.offset+2, int(value2))
}
// Generate the value of the surrogate pair.
value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
} else {
width = 2
}
default:
panic("impossible")
}
// Check if the character is in the allowed range:
// #x9 | #xA | #xD | [#x20-#x7E] (8 bit)
// | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit)
// | [#x10000-#x10FFFF] (32 bit)
switch {
case value == 0x09:
case value == 0x0A:
case value == 0x0D:
case value >= 0x20 && value <= 0x7E:
case value == 0x85:
case value >= 0xA0 && value <= 0xD7FF:
case value >= 0xE000 && value <= 0xFFFD:
case value >= 0x10000 && value <= 0x10FFFF:
default:
return yaml_parser_set_reader_error(parser,
"control characters are not allowed",
parser.offset, int(value))
}
// Move the raw pointers.
parser.raw_buffer_pos += width
parser.offset += width
// Finally put the character into the buffer.
if value <= 0x7F {
// 0000 0000-0000 007F . 0xxxxxxx
parser.buffer[buffer_len+0] = byte(value)
buffer_len += 1
} else if value <= 0x7FF {
// 0000 0080-0000 07FF . 110xxxxx 10xxxxxx
parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6))
parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F))
buffer_len += 2
} else if value <= 0xFFFF {
// 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx
parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12))
parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F))
parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F))
buffer_len += 3
} else {
// 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18))
parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F))
parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F))
parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F))
buffer_len += 4
}
parser.unread++
}
// On EOF, put NUL into the buffer and return.
if parser.eof {
parser.buffer[buffer_len] = 0
buffer_len++
parser.unread++
break
}
}
// [Go] Read the documentation of this function above. To return true,
// we need to have the given length in the buffer. Not doing that means
// every single check that calls this function to make sure the buffer
// has a given length is Go) panicking; or C) accessing invalid memory.
// This happens here due to the EOF above breaking early.
for buffer_len < length {
parser.buffer[buffer_len] = 0
buffer_len++
}
parser.buffer = parser.buffer[:buffer_len]
return true
}

326
vendor/go.yaml.in/yaml/v3/resolve.go generated vendored Normal file
View File

@ -0,0 +1,326 @@
//
// Copyright (c) 2011-2019 Canonical Ltd
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package yaml
import (
"encoding/base64"
"math"
"regexp"
"strconv"
"strings"
"time"
)
type resolveMapItem struct {
value interface{}
tag string
}
var resolveTable = make([]byte, 256)
var resolveMap = make(map[string]resolveMapItem)
func init() {
t := resolveTable
t[int('+')] = 'S' // Sign
t[int('-')] = 'S'
for _, c := range "0123456789" {
t[int(c)] = 'D' // Digit
}
for _, c := range "yYnNtTfFoO~" {
t[int(c)] = 'M' // In map
}
t[int('.')] = '.' // Float (potentially in map)
var resolveMapList = []struct {
v interface{}
tag string
l []string
}{
{true, boolTag, []string{"true", "True", "TRUE"}},
{false, boolTag, []string{"false", "False", "FALSE"}},
{nil, nullTag, []string{"", "~", "null", "Null", "NULL"}},
{math.NaN(), floatTag, []string{".nan", ".NaN", ".NAN"}},
{math.Inf(+1), floatTag, []string{".inf", ".Inf", ".INF"}},
{math.Inf(+1), floatTag, []string{"+.inf", "+.Inf", "+.INF"}},
{math.Inf(-1), floatTag, []string{"-.inf", "-.Inf", "-.INF"}},
{"<<", mergeTag, []string{"<<"}},
}
m := resolveMap
for _, item := range resolveMapList {
for _, s := range item.l {
m[s] = resolveMapItem{item.v, item.tag}
}
}
}
const (
nullTag = "!!null"
boolTag = "!!bool"
strTag = "!!str"
intTag = "!!int"
floatTag = "!!float"
timestampTag = "!!timestamp"
seqTag = "!!seq"
mapTag = "!!map"
binaryTag = "!!binary"
mergeTag = "!!merge"
)
var longTags = make(map[string]string)
var shortTags = make(map[string]string)
func init() {
for _, stag := range []string{nullTag, boolTag, strTag, intTag, floatTag, timestampTag, seqTag, mapTag, binaryTag, mergeTag} {
ltag := longTag(stag)
longTags[stag] = ltag
shortTags[ltag] = stag
}
}
const longTagPrefix = "tag:yaml.org,2002:"
func shortTag(tag string) string {
if strings.HasPrefix(tag, longTagPrefix) {
if stag, ok := shortTags[tag]; ok {
return stag
}
return "!!" + tag[len(longTagPrefix):]
}
return tag
}
func longTag(tag string) string {
if strings.HasPrefix(tag, "!!") {
if ltag, ok := longTags[tag]; ok {
return ltag
}
return longTagPrefix + tag[2:]
}
return tag
}
func resolvableTag(tag string) bool {
switch tag {
case "", strTag, boolTag, intTag, floatTag, nullTag, timestampTag:
return true
}
return false
}
var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`)
func resolve(tag string, in string) (rtag string, out interface{}) {
tag = shortTag(tag)
if !resolvableTag(tag) {
return tag, in
}
defer func() {
switch tag {
case "", rtag, strTag, binaryTag:
return
case floatTag:
if rtag == intTag {
switch v := out.(type) {
case int64:
rtag = floatTag
out = float64(v)
return
case int:
rtag = floatTag
out = float64(v)
return
}
}
}
failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag))
}()
// Any data is accepted as a !!str or !!binary.
// Otherwise, the prefix is enough of a hint about what it might be.
hint := byte('N')
if in != "" {
hint = resolveTable[in[0]]
}
if hint != 0 && tag != strTag && tag != binaryTag {
// Handle things we can lookup in a map.
if item, ok := resolveMap[in]; ok {
return item.tag, item.value
}
// Base 60 floats are a bad idea, were dropped in YAML 1.2, and
// are purposefully unsupported here. They're still quoted on
// the way out for compatibility with other parser, though.
switch hint {
case 'M':
// We've already checked the map above.
case '.':
// Not in the map, so maybe a normal float.
floatv, err := strconv.ParseFloat(in, 64)
if err == nil {
return floatTag, floatv
}
case 'D', 'S':
// Int, float, or timestamp.
// Only try values as a timestamp if the value is unquoted or there's an explicit
// !!timestamp tag.
if tag == "" || tag == timestampTag {
t, ok := parseTimestamp(in)
if ok {
return timestampTag, t
}
}
plain := strings.Replace(in, "_", "", -1)
intv, err := strconv.ParseInt(plain, 0, 64)
if err == nil {
if intv == int64(int(intv)) {
return intTag, int(intv)
} else {
return intTag, intv
}
}
uintv, err := strconv.ParseUint(plain, 0, 64)
if err == nil {
return intTag, uintv
}
if yamlStyleFloat.MatchString(plain) {
floatv, err := strconv.ParseFloat(plain, 64)
if err == nil {
return floatTag, floatv
}
}
if strings.HasPrefix(plain, "0b") {
intv, err := strconv.ParseInt(plain[2:], 2, 64)
if err == nil {
if intv == int64(int(intv)) {
return intTag, int(intv)
} else {
return intTag, intv
}
}
uintv, err := strconv.ParseUint(plain[2:], 2, 64)
if err == nil {
return intTag, uintv
}
} else if strings.HasPrefix(plain, "-0b") {
intv, err := strconv.ParseInt("-"+plain[3:], 2, 64)
if err == nil {
if true || intv == int64(int(intv)) {
return intTag, int(intv)
} else {
return intTag, intv
}
}
}
// Octals as introduced in version 1.2 of the spec.
// Octals from the 1.1 spec, spelled as 0777, are still
// decoded by default in v3 as well for compatibility.
// May be dropped in v4 depending on how usage evolves.
if strings.HasPrefix(plain, "0o") {
intv, err := strconv.ParseInt(plain[2:], 8, 64)
if err == nil {
if intv == int64(int(intv)) {
return intTag, int(intv)
} else {
return intTag, intv
}
}
uintv, err := strconv.ParseUint(plain[2:], 8, 64)
if err == nil {
return intTag, uintv
}
} else if strings.HasPrefix(plain, "-0o") {
intv, err := strconv.ParseInt("-"+plain[3:], 8, 64)
if err == nil {
if true || intv == int64(int(intv)) {
return intTag, int(intv)
} else {
return intTag, intv
}
}
}
default:
panic("internal error: missing handler for resolver table: " + string(rune(hint)) + " (with " + in + ")")
}
}
return strTag, in
}
// encodeBase64 encodes s as base64 that is broken up into multiple lines
// as appropriate for the resulting length.
func encodeBase64(s string) string {
const lineLen = 70
encLen := base64.StdEncoding.EncodedLen(len(s))
lines := encLen/lineLen + 1
buf := make([]byte, encLen*2+lines)
in := buf[0:encLen]
out := buf[encLen:]
base64.StdEncoding.Encode(in, []byte(s))
k := 0
for i := 0; i < len(in); i += lineLen {
j := i + lineLen
if j > len(in) {
j = len(in)
}
k += copy(out[k:], in[i:j])
if lines > 1 {
out[k] = '\n'
k++
}
}
return string(out[:k])
}
// This is a subset of the formats allowed by the regular expression
// defined at http://yaml.org/type/timestamp.html.
var allowedTimestampFormats = []string{
"2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields.
"2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t".
"2006-1-2 15:4:5.999999999", // space separated with no time zone
"2006-1-2", // date only
// Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5"
// from the set of examples.
}
// parseTimestamp parses s as a timestamp string and
// returns the timestamp and reports whether it succeeded.
// Timestamp formats are defined at http://yaml.org/type/timestamp.html
func parseTimestamp(s string) (time.Time, bool) {
// TODO write code to check all the formats supported by
// http://yaml.org/type/timestamp.html instead of using time.Parse.
// Quick check: all date formats start with YYYY-.
i := 0
for ; i < len(s); i++ {
if c := s[i]; c < '0' || c > '9' {
break
}
}
if i != 4 || i == len(s) || s[i] != '-' {
return time.Time{}, false
}
for _, format := range allowedTimestampFormats {
if t, err := time.Parse(format, s); err == nil {
return t, true
}
}
return time.Time{}, false
}

3040
vendor/go.yaml.in/yaml/v3/scannerc.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

134
vendor/go.yaml.in/yaml/v3/sorter.go generated vendored Normal file
View File

@ -0,0 +1,134 @@
//
// Copyright (c) 2011-2019 Canonical Ltd
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package yaml
import (
"reflect"
"unicode"
)
type keyList []reflect.Value
func (l keyList) Len() int { return len(l) }
func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
func (l keyList) Less(i, j int) bool {
a := l[i]
b := l[j]
ak := a.Kind()
bk := b.Kind()
for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() {
a = a.Elem()
ak = a.Kind()
}
for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() {
b = b.Elem()
bk = b.Kind()
}
af, aok := keyFloat(a)
bf, bok := keyFloat(b)
if aok && bok {
if af != bf {
return af < bf
}
if ak != bk {
return ak < bk
}
return numLess(a, b)
}
if ak != reflect.String || bk != reflect.String {
return ak < bk
}
ar, br := []rune(a.String()), []rune(b.String())
digits := false
for i := 0; i < len(ar) && i < len(br); i++ {
if ar[i] == br[i] {
digits = unicode.IsDigit(ar[i])
continue
}
al := unicode.IsLetter(ar[i])
bl := unicode.IsLetter(br[i])
if al && bl {
return ar[i] < br[i]
}
if al || bl {
if digits {
return al
} else {
return bl
}
}
var ai, bi int
var an, bn int64
if ar[i] == '0' || br[i] == '0' {
for j := i - 1; j >= 0 && unicode.IsDigit(ar[j]); j-- {
if ar[j] != '0' {
an = 1
bn = 1
break
}
}
}
for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ {
an = an*10 + int64(ar[ai]-'0')
}
for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ {
bn = bn*10 + int64(br[bi]-'0')
}
if an != bn {
return an < bn
}
if ai != bi {
return ai < bi
}
return ar[i] < br[i]
}
return len(ar) < len(br)
}
// keyFloat returns a float value for v if it is a number/bool
// and whether it is a number/bool or not.
func keyFloat(v reflect.Value) (f float64, ok bool) {
switch v.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return float64(v.Int()), true
case reflect.Float32, reflect.Float64:
return v.Float(), true
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return float64(v.Uint()), true
case reflect.Bool:
if v.Bool() {
return 1, true
}
return 0, true
}
return 0, false
}
// numLess returns whether a < b.
// a and b must necessarily have the same kind.
func numLess(a, b reflect.Value) bool {
switch a.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return a.Int() < b.Int()
case reflect.Float32, reflect.Float64:
return a.Float() < b.Float()
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return a.Uint() < b.Uint()
case reflect.Bool:
return !a.Bool() && b.Bool()
}
panic("not a number")
}

48
vendor/go.yaml.in/yaml/v3/writerc.go generated vendored Normal file
View File

@ -0,0 +1,48 @@
//
// Copyright (c) 2011-2019 Canonical Ltd
// Copyright (c) 2006-2010 Kirill Simonov
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of
// this software and associated documentation files (the "Software"), to deal in
// the Software without restriction, including without limitation the rights to
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
// of the Software, and to permit persons to whom the Software is furnished to do
// so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
package yaml
// Set the writer error and return false.
func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
emitter.error = yaml_WRITER_ERROR
emitter.problem = problem
return false
}
// Flush the output buffer.
func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
if emitter.write_handler == nil {
panic("write handler not set")
}
// Check if the buffer is empty.
if emitter.buffer_pos == 0 {
return true
}
if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
}
emitter.buffer_pos = 0
return true
}

703
vendor/go.yaml.in/yaml/v3/yaml.go generated vendored Normal file
View File

@ -0,0 +1,703 @@
//
// Copyright (c) 2011-2019 Canonical Ltd
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package yaml implements YAML support for the Go language.
//
// Source code and other details for the project are available at GitHub:
//
// https://github.com/yaml/go-yaml
package yaml
import (
"errors"
"fmt"
"io"
"reflect"
"strings"
"sync"
"unicode/utf8"
)
// The Unmarshaler interface may be implemented by types to customize their
// behavior when being unmarshaled from a YAML document.
type Unmarshaler interface {
UnmarshalYAML(value *Node) error
}
type obsoleteUnmarshaler interface {
UnmarshalYAML(unmarshal func(interface{}) error) error
}
// The Marshaler interface may be implemented by types to customize their
// behavior when being marshaled into a YAML document. The returned value
// is marshaled in place of the original value implementing Marshaler.
//
// If an error is returned by MarshalYAML, the marshaling procedure stops
// and returns with the provided error.
type Marshaler interface {
MarshalYAML() (interface{}, error)
}
// Unmarshal decodes the first document found within the in byte slice
// and assigns decoded values into the out value.
//
// Maps and pointers (to a struct, string, int, etc) are accepted as out
// values. If an internal pointer within a struct is not initialized,
// the yaml package will initialize it if necessary for unmarshalling
// the provided data. The out parameter must not be nil.
//
// The type of the decoded values should be compatible with the respective
// values in out. If one or more values cannot be decoded due to a type
// mismatches, decoding continues partially until the end of the YAML
// content, and a *yaml.TypeError is returned with details for all
// missed values.
//
// Struct fields are only unmarshalled if they are exported (have an
// upper case first letter), and are unmarshalled using the field name
// lowercased as the default key. Custom keys may be defined via the
// "yaml" name in the field tag: the content preceding the first comma
// is used as the key, and the following comma-separated options are
// used to tweak the marshalling process (see Marshal).
// Conflicting names result in a runtime error.
//
// For example:
//
// type T struct {
// F int `yaml:"a,omitempty"`
// B int
// }
// var t T
// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
//
// See the documentation of Marshal for the format of tags and a list of
// supported tag options.
func Unmarshal(in []byte, out interface{}) (err error) {
return unmarshal(in, out, false)
}
// A Decoder reads and decodes YAML values from an input stream.
type Decoder struct {
parser *parser
knownFields bool
}
// NewDecoder returns a new decoder that reads from r.
//
// The decoder introduces its own buffering and may read
// data from r beyond the YAML values requested.
func NewDecoder(r io.Reader) *Decoder {
return &Decoder{
parser: newParserFromReader(r),
}
}
// KnownFields ensures that the keys in decoded mappings to
// exist as fields in the struct being decoded into.
func (dec *Decoder) KnownFields(enable bool) {
dec.knownFields = enable
}
// Decode reads the next YAML-encoded value from its input
// and stores it in the value pointed to by v.
//
// See the documentation for Unmarshal for details about the
// conversion of YAML into a Go value.
func (dec *Decoder) Decode(v interface{}) (err error) {
d := newDecoder()
d.knownFields = dec.knownFields
defer handleErr(&err)
node := dec.parser.parse()
if node == nil {
return io.EOF
}
out := reflect.ValueOf(v)
if out.Kind() == reflect.Ptr && !out.IsNil() {
out = out.Elem()
}
d.unmarshal(node, out)
if len(d.terrors) > 0 {
return &TypeError{d.terrors}
}
return nil
}
// Decode decodes the node and stores its data into the value pointed to by v.
//
// See the documentation for Unmarshal for details about the
// conversion of YAML into a Go value.
func (n *Node) Decode(v interface{}) (err error) {
d := newDecoder()
defer handleErr(&err)
out := reflect.ValueOf(v)
if out.Kind() == reflect.Ptr && !out.IsNil() {
out = out.Elem()
}
d.unmarshal(n, out)
if len(d.terrors) > 0 {
return &TypeError{d.terrors}
}
return nil
}
func unmarshal(in []byte, out interface{}, strict bool) (err error) {
defer handleErr(&err)
d := newDecoder()
p := newParser(in)
defer p.destroy()
node := p.parse()
if node != nil {
v := reflect.ValueOf(out)
if v.Kind() == reflect.Ptr && !v.IsNil() {
v = v.Elem()
}
d.unmarshal(node, v)
}
if len(d.terrors) > 0 {
return &TypeError{d.terrors}
}
return nil
}
// Marshal serializes the value provided into a YAML document. The structure
// of the generated document will reflect the structure of the value itself.
// Maps and pointers (to struct, string, int, etc) are accepted as the in value.
//
// Struct fields are only marshalled if they are exported (have an upper case
// first letter), and are marshalled using the field name lowercased as the
// default key. Custom keys may be defined via the "yaml" name in the field
// tag: the content preceding the first comma is used as the key, and the
// following comma-separated options are used to tweak the marshalling process.
// Conflicting names result in a runtime error.
//
// The field tag format accepted is:
//
// `(...) yaml:"[<key>][,<flag1>[,<flag2>]]" (...)`
//
// The following flags are currently supported:
//
// omitempty Only include the field if it's not set to the zero
// value for the type or to empty slices or maps.
// Zero valued structs will be omitted if all their public
// fields are zero, unless they implement an IsZero
// method (see the IsZeroer interface type), in which
// case the field will be excluded if IsZero returns true.
//
// flow Marshal using a flow style (useful for structs,
// sequences and maps).
//
// inline Inline the field, which must be a struct or a map,
// causing all of its fields or keys to be processed as if
// they were part of the outer struct. For maps, keys must
// not conflict with the yaml keys of other struct fields.
//
// In addition, if the key is "-", the field is ignored.
//
// For example:
//
// type T struct {
// F int `yaml:"a,omitempty"`
// B int
// }
// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
func Marshal(in interface{}) (out []byte, err error) {
defer handleErr(&err)
e := newEncoder()
defer e.destroy()
e.marshalDoc("", reflect.ValueOf(in))
e.finish()
out = e.out
return
}
// An Encoder writes YAML values to an output stream.
type Encoder struct {
encoder *encoder
}
// NewEncoder returns a new encoder that writes to w.
// The Encoder should be closed after use to flush all data
// to w.
func NewEncoder(w io.Writer) *Encoder {
return &Encoder{
encoder: newEncoderWithWriter(w),
}
}
// Encode writes the YAML encoding of v to the stream.
// If multiple items are encoded to the stream, the
// second and subsequent document will be preceded
// with a "---" document separator, but the first will not.
//
// See the documentation for Marshal for details about the conversion of Go
// values to YAML.
func (e *Encoder) Encode(v interface{}) (err error) {
defer handleErr(&err)
e.encoder.marshalDoc("", reflect.ValueOf(v))
return nil
}
// Encode encodes value v and stores its representation in n.
//
// See the documentation for Marshal for details about the
// conversion of Go values into YAML.
func (n *Node) Encode(v interface{}) (err error) {
defer handleErr(&err)
e := newEncoder()
defer e.destroy()
e.marshalDoc("", reflect.ValueOf(v))
e.finish()
p := newParser(e.out)
p.textless = true
defer p.destroy()
doc := p.parse()
*n = *doc.Content[0]
return nil
}
// SetIndent changes the used indentation used when encoding.
func (e *Encoder) SetIndent(spaces int) {
if spaces < 0 {
panic("yaml: cannot indent to a negative number of spaces")
}
e.encoder.indent = spaces
}
// CompactSeqIndent makes it so that '- ' is considered part of the indentation.
func (e *Encoder) CompactSeqIndent() {
e.encoder.emitter.compact_sequence_indent = true
}
// DefaultSeqIndent makes it so that '- ' is not considered part of the indentation.
func (e *Encoder) DefaultSeqIndent() {
e.encoder.emitter.compact_sequence_indent = false
}
// Close closes the encoder by writing any remaining data.
// It does not write a stream terminating string "...".
func (e *Encoder) Close() (err error) {
defer handleErr(&err)
e.encoder.finish()
return nil
}
func handleErr(err *error) {
if v := recover(); v != nil {
if e, ok := v.(yamlError); ok {
*err = e.err
} else {
panic(v)
}
}
}
type yamlError struct {
err error
}
func fail(err error) {
panic(yamlError{err})
}
func failf(format string, args ...interface{}) {
panic(yamlError{fmt.Errorf("yaml: "+format, args...)})
}
// A TypeError is returned by Unmarshal when one or more fields in
// the YAML document cannot be properly decoded into the requested
// types. When this error is returned, the value is still
// unmarshaled partially.
type TypeError struct {
Errors []string
}
func (e *TypeError) Error() string {
return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n "))
}
type Kind uint32
const (
DocumentNode Kind = 1 << iota
SequenceNode
MappingNode
ScalarNode
AliasNode
)
type Style uint32
const (
TaggedStyle Style = 1 << iota
DoubleQuotedStyle
SingleQuotedStyle
LiteralStyle
FoldedStyle
FlowStyle
)
// Node represents an element in the YAML document hierarchy. While documents
// are typically encoded and decoded into higher level types, such as structs
// and maps, Node is an intermediate representation that allows detailed
// control over the content being decoded or encoded.
//
// It's worth noting that although Node offers access into details such as
// line numbers, colums, and comments, the content when re-encoded will not
// have its original textual representation preserved. An effort is made to
// render the data plesantly, and to preserve comments near the data they
// describe, though.
//
// Values that make use of the Node type interact with the yaml package in the
// same way any other type would do, by encoding and decoding yaml data
// directly or indirectly into them.
//
// For example:
//
// var person struct {
// Name string
// Address yaml.Node
// }
// err := yaml.Unmarshal(data, &person)
//
// Or by itself:
//
// var person Node
// err := yaml.Unmarshal(data, &person)
type Node struct {
// Kind defines whether the node is a document, a mapping, a sequence,
// a scalar value, or an alias to another node. The specific data type of
// scalar nodes may be obtained via the ShortTag and LongTag methods.
Kind Kind
// Style allows customizing the apperance of the node in the tree.
Style Style
// Tag holds the YAML tag defining the data type for the value.
// When decoding, this field will always be set to the resolved tag,
// even when it wasn't explicitly provided in the YAML content.
// When encoding, if this field is unset the value type will be
// implied from the node properties, and if it is set, it will only
// be serialized into the representation if TaggedStyle is used or
// the implicit tag diverges from the provided one.
Tag string
// Value holds the unescaped and unquoted represenation of the value.
Value string
// Anchor holds the anchor name for this node, which allows aliases to point to it.
Anchor string
// Alias holds the node that this alias points to. Only valid when Kind is AliasNode.
Alias *Node
// Content holds contained nodes for documents, mappings, and sequences.
Content []*Node
// HeadComment holds any comments in the lines preceding the node and
// not separated by an empty line.
HeadComment string
// LineComment holds any comments at the end of the line where the node is in.
LineComment string
// FootComment holds any comments following the node and before empty lines.
FootComment string
// Line and Column hold the node position in the decoded YAML text.
// These fields are not respected when encoding the node.
Line int
Column int
}
// IsZero returns whether the node has all of its fields unset.
func (n *Node) IsZero() bool {
return n.Kind == 0 && n.Style == 0 && n.Tag == "" && n.Value == "" && n.Anchor == "" && n.Alias == nil && n.Content == nil &&
n.HeadComment == "" && n.LineComment == "" && n.FootComment == "" && n.Line == 0 && n.Column == 0
}
// LongTag returns the long form of the tag that indicates the data type for
// the node. If the Tag field isn't explicitly defined, one will be computed
// based on the node properties.
func (n *Node) LongTag() string {
return longTag(n.ShortTag())
}
// ShortTag returns the short form of the YAML tag that indicates data type for
// the node. If the Tag field isn't explicitly defined, one will be computed
// based on the node properties.
func (n *Node) ShortTag() string {
if n.indicatedString() {
return strTag
}
if n.Tag == "" || n.Tag == "!" {
switch n.Kind {
case MappingNode:
return mapTag
case SequenceNode:
return seqTag
case AliasNode:
if n.Alias != nil {
return n.Alias.ShortTag()
}
case ScalarNode:
tag, _ := resolve("", n.Value)
return tag
case 0:
// Special case to make the zero value convenient.
if n.IsZero() {
return nullTag
}
}
return ""
}
return shortTag(n.Tag)
}
func (n *Node) indicatedString() bool {
return n.Kind == ScalarNode &&
(shortTag(n.Tag) == strTag ||
(n.Tag == "" || n.Tag == "!") && n.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0)
}
// SetString is a convenience function that sets the node to a string value
// and defines its style in a pleasant way depending on its content.
func (n *Node) SetString(s string) {
n.Kind = ScalarNode
if utf8.ValidString(s) {
n.Value = s
n.Tag = strTag
} else {
n.Value = encodeBase64(s)
n.Tag = binaryTag
}
if strings.Contains(n.Value, "\n") {
n.Style = LiteralStyle
}
}
// --------------------------------------------------------------------------
// Maintain a mapping of keys to structure field indexes
// The code in this section was copied from mgo/bson.
// structInfo holds details for the serialization of fields of
// a given struct.
type structInfo struct {
FieldsMap map[string]fieldInfo
FieldsList []fieldInfo
// InlineMap is the number of the field in the struct that
// contains an ,inline map, or -1 if there's none.
InlineMap int
// InlineUnmarshalers holds indexes to inlined fields that
// contain unmarshaler values.
InlineUnmarshalers [][]int
}
type fieldInfo struct {
Key string
Num int
OmitEmpty bool
Flow bool
// Id holds the unique field identifier, so we can cheaply
// check for field duplicates without maintaining an extra map.
Id int
// Inline holds the field index if the field is part of an inlined struct.
Inline []int
}
var structMap = make(map[reflect.Type]*structInfo)
var fieldMapMutex sync.RWMutex
var unmarshalerType reflect.Type
func init() {
var v Unmarshaler
unmarshalerType = reflect.ValueOf(&v).Elem().Type()
}
func getStructInfo(st reflect.Type) (*structInfo, error) {
fieldMapMutex.RLock()
sinfo, found := structMap[st]
fieldMapMutex.RUnlock()
if found {
return sinfo, nil
}
n := st.NumField()
fieldsMap := make(map[string]fieldInfo)
fieldsList := make([]fieldInfo, 0, n)
inlineMap := -1
inlineUnmarshalers := [][]int(nil)
for i := 0; i != n; i++ {
field := st.Field(i)
if field.PkgPath != "" && !field.Anonymous {
continue // Private field
}
info := fieldInfo{Num: i}
tag := field.Tag.Get("yaml")
if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
tag = string(field.Tag)
}
if tag == "-" {
continue
}
inline := false
fields := strings.Split(tag, ",")
if len(fields) > 1 {
for _, flag := range fields[1:] {
switch flag {
case "omitempty":
info.OmitEmpty = true
case "flow":
info.Flow = true
case "inline":
inline = true
default:
return nil, errors.New(fmt.Sprintf("unsupported flag %q in tag %q of type %s", flag, tag, st))
}
}
tag = fields[0]
}
if inline {
switch field.Type.Kind() {
case reflect.Map:
if inlineMap >= 0 {
return nil, errors.New("multiple ,inline maps in struct " + st.String())
}
if field.Type.Key() != reflect.TypeOf("") {
return nil, errors.New("option ,inline needs a map with string keys in struct " + st.String())
}
inlineMap = info.Num
case reflect.Struct, reflect.Ptr:
ftype := field.Type
for ftype.Kind() == reflect.Ptr {
ftype = ftype.Elem()
}
if ftype.Kind() != reflect.Struct {
return nil, errors.New("option ,inline may only be used on a struct or map field")
}
if reflect.PtrTo(ftype).Implements(unmarshalerType) {
inlineUnmarshalers = append(inlineUnmarshalers, []int{i})
} else {
sinfo, err := getStructInfo(ftype)
if err != nil {
return nil, err
}
for _, index := range sinfo.InlineUnmarshalers {
inlineUnmarshalers = append(inlineUnmarshalers, append([]int{i}, index...))
}
for _, finfo := range sinfo.FieldsList {
if _, found := fieldsMap[finfo.Key]; found {
msg := "duplicated key '" + finfo.Key + "' in struct " + st.String()
return nil, errors.New(msg)
}
if finfo.Inline == nil {
finfo.Inline = []int{i, finfo.Num}
} else {
finfo.Inline = append([]int{i}, finfo.Inline...)
}
finfo.Id = len(fieldsList)
fieldsMap[finfo.Key] = finfo
fieldsList = append(fieldsList, finfo)
}
}
default:
return nil, errors.New("option ,inline may only be used on a struct or map field")
}
continue
}
if tag != "" {
info.Key = tag
} else {
info.Key = strings.ToLower(field.Name)
}
if _, found = fieldsMap[info.Key]; found {
msg := "duplicated key '" + info.Key + "' in struct " + st.String()
return nil, errors.New(msg)
}
info.Id = len(fieldsList)
fieldsList = append(fieldsList, info)
fieldsMap[info.Key] = info
}
sinfo = &structInfo{
FieldsMap: fieldsMap,
FieldsList: fieldsList,
InlineMap: inlineMap,
InlineUnmarshalers: inlineUnmarshalers,
}
fieldMapMutex.Lock()
structMap[st] = sinfo
fieldMapMutex.Unlock()
return sinfo, nil
}
// IsZeroer is used to check whether an object is zero to
// determine whether it should be omitted when marshaling
// with the omitempty flag. One notable implementation
// is time.Time.
type IsZeroer interface {
IsZero() bool
}
func isZero(v reflect.Value) bool {
kind := v.Kind()
if z, ok := v.Interface().(IsZeroer); ok {
if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() {
return true
}
return z.IsZero()
}
switch kind {
case reflect.String:
return len(v.String()) == 0
case reflect.Interface, reflect.Ptr:
return v.IsNil()
case reflect.Slice:
return v.Len() == 0
case reflect.Map:
return v.Len() == 0
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return v.Uint() == 0
case reflect.Bool:
return !v.Bool()
case reflect.Struct:
vt := v.Type()
for i := v.NumField() - 1; i >= 0; i-- {
if vt.Field(i).PkgPath != "" {
continue // Private field
}
if !isZero(v.Field(i)) {
return false
}
}
return true
}
return false
}

811
vendor/go.yaml.in/yaml/v3/yamlh.go generated vendored Normal file
View File

@ -0,0 +1,811 @@
//
// Copyright (c) 2011-2019 Canonical Ltd
// Copyright (c) 2006-2010 Kirill Simonov
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of
// this software and associated documentation files (the "Software"), to deal in
// the Software without restriction, including without limitation the rights to
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
// of the Software, and to permit persons to whom the Software is furnished to do
// so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
package yaml
import (
"fmt"
"io"
)
// The version directive data.
type yaml_version_directive_t struct {
major int8 // The major version number.
minor int8 // The minor version number.
}
// The tag directive data.
type yaml_tag_directive_t struct {
handle []byte // The tag handle.
prefix []byte // The tag prefix.
}
type yaml_encoding_t int
// The stream encoding.
const (
// Let the parser choose the encoding.
yaml_ANY_ENCODING yaml_encoding_t = iota
yaml_UTF8_ENCODING // The default UTF-8 encoding.
yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM.
yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM.
)
type yaml_break_t int
// Line break types.
const (
// Let the parser choose the break type.
yaml_ANY_BREAK yaml_break_t = iota
yaml_CR_BREAK // Use CR for line breaks (Mac style).
yaml_LN_BREAK // Use LN for line breaks (Unix style).
yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style).
)
type yaml_error_type_t int
// Many bad things could happen with the parser and emitter.
const (
// No error is produced.
yaml_NO_ERROR yaml_error_type_t = iota
yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory.
yaml_READER_ERROR // Cannot read or decode the input stream.
yaml_SCANNER_ERROR // Cannot scan the input stream.
yaml_PARSER_ERROR // Cannot parse the input stream.
yaml_COMPOSER_ERROR // Cannot compose a YAML document.
yaml_WRITER_ERROR // Cannot write to the output stream.
yaml_EMITTER_ERROR // Cannot emit a YAML stream.
)
// The pointer position.
type yaml_mark_t struct {
index int // The position index.
line int // The position line.
column int // The position column.
}
// Node Styles
type yaml_style_t int8
type yaml_scalar_style_t yaml_style_t
// Scalar styles.
const (
// Let the emitter choose the style.
yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = 0
yaml_PLAIN_SCALAR_STYLE yaml_scalar_style_t = 1 << iota // The plain scalar style.
yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style.
yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style.
yaml_LITERAL_SCALAR_STYLE // The literal scalar style.
yaml_FOLDED_SCALAR_STYLE // The folded scalar style.
)
type yaml_sequence_style_t yaml_style_t
// Sequence styles.
const (
// Let the emitter choose the style.
yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota
yaml_BLOCK_SEQUENCE_STYLE // The block sequence style.
yaml_FLOW_SEQUENCE_STYLE // The flow sequence style.
)
type yaml_mapping_style_t yaml_style_t
// Mapping styles.
const (
// Let the emitter choose the style.
yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota
yaml_BLOCK_MAPPING_STYLE // The block mapping style.
yaml_FLOW_MAPPING_STYLE // The flow mapping style.
)
// Tokens
type yaml_token_type_t int
// Token types.
const (
// An empty token.
yaml_NO_TOKEN yaml_token_type_t = iota
yaml_STREAM_START_TOKEN // A STREAM-START token.
yaml_STREAM_END_TOKEN // A STREAM-END token.
yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token.
yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token.
yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token.
yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token.
yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token.
yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token.
yaml_BLOCK_END_TOKEN // A BLOCK-END token.
yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token.
yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token.
yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token.
yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token.
yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token.
yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token.
yaml_KEY_TOKEN // A KEY token.
yaml_VALUE_TOKEN // A VALUE token.
yaml_ALIAS_TOKEN // An ALIAS token.
yaml_ANCHOR_TOKEN // An ANCHOR token.
yaml_TAG_TOKEN // A TAG token.
yaml_SCALAR_TOKEN // A SCALAR token.
)
func (tt yaml_token_type_t) String() string {
switch tt {
case yaml_NO_TOKEN:
return "yaml_NO_TOKEN"
case yaml_STREAM_START_TOKEN:
return "yaml_STREAM_START_TOKEN"
case yaml_STREAM_END_TOKEN:
return "yaml_STREAM_END_TOKEN"
case yaml_VERSION_DIRECTIVE_TOKEN:
return "yaml_VERSION_DIRECTIVE_TOKEN"
case yaml_TAG_DIRECTIVE_TOKEN:
return "yaml_TAG_DIRECTIVE_TOKEN"
case yaml_DOCUMENT_START_TOKEN:
return "yaml_DOCUMENT_START_TOKEN"
case yaml_DOCUMENT_END_TOKEN:
return "yaml_DOCUMENT_END_TOKEN"
case yaml_BLOCK_SEQUENCE_START_TOKEN:
return "yaml_BLOCK_SEQUENCE_START_TOKEN"
case yaml_BLOCK_MAPPING_START_TOKEN:
return "yaml_BLOCK_MAPPING_START_TOKEN"
case yaml_BLOCK_END_TOKEN:
return "yaml_BLOCK_END_TOKEN"
case yaml_FLOW_SEQUENCE_START_TOKEN:
return "yaml_FLOW_SEQUENCE_START_TOKEN"
case yaml_FLOW_SEQUENCE_END_TOKEN:
return "yaml_FLOW_SEQUENCE_END_TOKEN"
case yaml_FLOW_MAPPING_START_TOKEN:
return "yaml_FLOW_MAPPING_START_TOKEN"
case yaml_FLOW_MAPPING_END_TOKEN:
return "yaml_FLOW_MAPPING_END_TOKEN"
case yaml_BLOCK_ENTRY_TOKEN:
return "yaml_BLOCK_ENTRY_TOKEN"
case yaml_FLOW_ENTRY_TOKEN:
return "yaml_FLOW_ENTRY_TOKEN"
case yaml_KEY_TOKEN:
return "yaml_KEY_TOKEN"
case yaml_VALUE_TOKEN:
return "yaml_VALUE_TOKEN"
case yaml_ALIAS_TOKEN:
return "yaml_ALIAS_TOKEN"
case yaml_ANCHOR_TOKEN:
return "yaml_ANCHOR_TOKEN"
case yaml_TAG_TOKEN:
return "yaml_TAG_TOKEN"
case yaml_SCALAR_TOKEN:
return "yaml_SCALAR_TOKEN"
}
return "<unknown token>"
}
// The token structure.
type yaml_token_t struct {
// The token type.
typ yaml_token_type_t
// The start/end of the token.
start_mark, end_mark yaml_mark_t
// The stream encoding (for yaml_STREAM_START_TOKEN).
encoding yaml_encoding_t
// The alias/anchor/scalar value or tag/tag directive handle
// (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN).
value []byte
// The tag suffix (for yaml_TAG_TOKEN).
suffix []byte
// The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN).
prefix []byte
// The scalar style (for yaml_SCALAR_TOKEN).
style yaml_scalar_style_t
// The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN).
major, minor int8
}
// Events
type yaml_event_type_t int8
// Event types.
const (
// An empty event.
yaml_NO_EVENT yaml_event_type_t = iota
yaml_STREAM_START_EVENT // A STREAM-START event.
yaml_STREAM_END_EVENT // A STREAM-END event.
yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event.
yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event.
yaml_ALIAS_EVENT // An ALIAS event.
yaml_SCALAR_EVENT // A SCALAR event.
yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event.
yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event.
yaml_MAPPING_START_EVENT // A MAPPING-START event.
yaml_MAPPING_END_EVENT // A MAPPING-END event.
yaml_TAIL_COMMENT_EVENT
)
var eventStrings = []string{
yaml_NO_EVENT: "none",
yaml_STREAM_START_EVENT: "stream start",
yaml_STREAM_END_EVENT: "stream end",
yaml_DOCUMENT_START_EVENT: "document start",
yaml_DOCUMENT_END_EVENT: "document end",
yaml_ALIAS_EVENT: "alias",
yaml_SCALAR_EVENT: "scalar",
yaml_SEQUENCE_START_EVENT: "sequence start",
yaml_SEQUENCE_END_EVENT: "sequence end",
yaml_MAPPING_START_EVENT: "mapping start",
yaml_MAPPING_END_EVENT: "mapping end",
yaml_TAIL_COMMENT_EVENT: "tail comment",
}
func (e yaml_event_type_t) String() string {
if e < 0 || int(e) >= len(eventStrings) {
return fmt.Sprintf("unknown event %d", e)
}
return eventStrings[e]
}
// The event structure.
type yaml_event_t struct {
// The event type.
typ yaml_event_type_t
// The start and end of the event.
start_mark, end_mark yaml_mark_t
// The document encoding (for yaml_STREAM_START_EVENT).
encoding yaml_encoding_t
// The version directive (for yaml_DOCUMENT_START_EVENT).
version_directive *yaml_version_directive_t
// The list of tag directives (for yaml_DOCUMENT_START_EVENT).
tag_directives []yaml_tag_directive_t
// The comments
head_comment []byte
line_comment []byte
foot_comment []byte
tail_comment []byte
// The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT).
anchor []byte
// The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
tag []byte
// The scalar value (for yaml_SCALAR_EVENT).
value []byte
// Is the document start/end indicator implicit, or the tag optional?
// (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT).
implicit bool
// Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT).
quoted_implicit bool
// The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
style yaml_style_t
}
func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) }
func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) }
func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) }
// Nodes
const (
yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null.
yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false.
yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values.
yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values.
yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values.
yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values.
yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences.
yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping.
// Not in original libyaml.
yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
yaml_MERGE_TAG = "tag:yaml.org,2002:merge"
yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str.
yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq.
yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map.
)
type yaml_node_type_t int
// Node types.
const (
// An empty node.
yaml_NO_NODE yaml_node_type_t = iota
yaml_SCALAR_NODE // A scalar node.
yaml_SEQUENCE_NODE // A sequence node.
yaml_MAPPING_NODE // A mapping node.
)
// An element of a sequence node.
type yaml_node_item_t int
// An element of a mapping node.
type yaml_node_pair_t struct {
key int // The key of the element.
value int // The value of the element.
}
// The node structure.
type yaml_node_t struct {
typ yaml_node_type_t // The node type.
tag []byte // The node tag.
// The node data.
// The scalar parameters (for yaml_SCALAR_NODE).
scalar struct {
value []byte // The scalar value.
length int // The length of the scalar value.
style yaml_scalar_style_t // The scalar style.
}
// The sequence parameters (for YAML_SEQUENCE_NODE).
sequence struct {
items_data []yaml_node_item_t // The stack of sequence items.
style yaml_sequence_style_t // The sequence style.
}
// The mapping parameters (for yaml_MAPPING_NODE).
mapping struct {
pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value).
pairs_start *yaml_node_pair_t // The beginning of the stack.
pairs_end *yaml_node_pair_t // The end of the stack.
pairs_top *yaml_node_pair_t // The top of the stack.
style yaml_mapping_style_t // The mapping style.
}
start_mark yaml_mark_t // The beginning of the node.
end_mark yaml_mark_t // The end of the node.
}
// The document structure.
type yaml_document_t struct {
// The document nodes.
nodes []yaml_node_t
// The version directive.
version_directive *yaml_version_directive_t
// The list of tag directives.
tag_directives_data []yaml_tag_directive_t
tag_directives_start int // The beginning of the tag directives list.
tag_directives_end int // The end of the tag directives list.
start_implicit int // Is the document start indicator implicit?
end_implicit int // Is the document end indicator implicit?
// The start/end of the document.
start_mark, end_mark yaml_mark_t
}
// The prototype of a read handler.
//
// The read handler is called when the parser needs to read more bytes from the
// source. The handler should write not more than size bytes to the buffer.
// The number of written bytes should be set to the size_read variable.
//
// [in,out] data A pointer to an application data specified by
//
// yaml_parser_set_input().
//
// [out] buffer The buffer to write the data from the source.
// [in] size The size of the buffer.
// [out] size_read The actual number of bytes read from the source.
//
// On success, the handler should return 1. If the handler failed,
// the returned value should be 0. On EOF, the handler should set the
// size_read to 0 and return 1.
type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error)
// This structure holds information about a potential simple key.
type yaml_simple_key_t struct {
possible bool // Is a simple key possible?
required bool // Is a simple key required?
token_number int // The number of the token.
mark yaml_mark_t // The position mark.
}
// The states of the parser.
type yaml_parser_state_t int
const (
yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota
yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document.
yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START.
yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document.
yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END.
yaml_PARSE_BLOCK_NODE_STATE // Expect a block node.
yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence.
yaml_PARSE_FLOW_NODE_STATE // Expect a flow node.
yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence.
yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence.
yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence.
yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key.
yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value.
yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence.
yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence.
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping.
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping.
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry.
yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping.
yaml_PARSE_END_STATE // Expect nothing.
)
func (ps yaml_parser_state_t) String() string {
switch ps {
case yaml_PARSE_STREAM_START_STATE:
return "yaml_PARSE_STREAM_START_STATE"
case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE"
case yaml_PARSE_DOCUMENT_START_STATE:
return "yaml_PARSE_DOCUMENT_START_STATE"
case yaml_PARSE_DOCUMENT_CONTENT_STATE:
return "yaml_PARSE_DOCUMENT_CONTENT_STATE"
case yaml_PARSE_DOCUMENT_END_STATE:
return "yaml_PARSE_DOCUMENT_END_STATE"
case yaml_PARSE_BLOCK_NODE_STATE:
return "yaml_PARSE_BLOCK_NODE_STATE"
case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE"
case yaml_PARSE_FLOW_NODE_STATE:
return "yaml_PARSE_FLOW_NODE_STATE"
case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE"
case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE"
case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE"
case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE"
case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE"
case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE"
case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE"
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE"
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE"
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE"
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE"
case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE"
case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
return "yaml_PARSE_FLOW_MAPPING_KEY_STATE"
case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE"
case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE"
case yaml_PARSE_END_STATE:
return "yaml_PARSE_END_STATE"
}
return "<unknown parser state>"
}
// This structure holds aliases data.
type yaml_alias_data_t struct {
anchor []byte // The anchor.
index int // The node id.
mark yaml_mark_t // The anchor mark.
}
// The parser structure.
//
// All members are internal. Manage the structure using the
// yaml_parser_ family of functions.
type yaml_parser_t struct {
// Error handling
error yaml_error_type_t // Error type.
problem string // Error description.
// The byte about which the problem occurred.
problem_offset int
problem_value int
problem_mark yaml_mark_t
// The error context.
context string
context_mark yaml_mark_t
// Reader stuff
read_handler yaml_read_handler_t // Read handler.
input_reader io.Reader // File input data.
input []byte // String input data.
input_pos int
eof bool // EOF flag
buffer []byte // The working buffer.
buffer_pos int // The current position of the buffer.
unread int // The number of unread characters in the buffer.
newlines int // The number of line breaks since last non-break/non-blank character
raw_buffer []byte // The raw buffer.
raw_buffer_pos int // The current position of the buffer.
encoding yaml_encoding_t // The input encoding.
offset int // The offset of the current position (in bytes).
mark yaml_mark_t // The mark of the current position.
// Comments
head_comment []byte // The current head comments
line_comment []byte // The current line comments
foot_comment []byte // The current foot comments
tail_comment []byte // Foot comment that happens at the end of a block.
stem_comment []byte // Comment in item preceding a nested structure (list inside list item, etc)
comments []yaml_comment_t // The folded comments for all parsed tokens
comments_head int
// Scanner stuff
stream_start_produced bool // Have we started to scan the input stream?
stream_end_produced bool // Have we reached the end of the input stream?
flow_level int // The number of unclosed '[' and '{' indicators.
tokens []yaml_token_t // The tokens queue.
tokens_head int // The head of the tokens queue.
tokens_parsed int // The number of tokens fetched from the queue.
token_available bool // Does the tokens queue contain a token ready for dequeueing.
indent int // The current indentation level.
indents []int // The indentation levels stack.
simple_key_allowed bool // May a simple key occur at the current position?
simple_keys []yaml_simple_key_t // The stack of simple keys.
simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number
// Parser stuff
state yaml_parser_state_t // The current parser state.
states []yaml_parser_state_t // The parser states stack.
marks []yaml_mark_t // The stack of marks.
tag_directives []yaml_tag_directive_t // The list of TAG directives.
// Dumper stuff
aliases []yaml_alias_data_t // The alias data.
document *yaml_document_t // The currently parsed document.
}
type yaml_comment_t struct {
scan_mark yaml_mark_t // Position where scanning for comments started
token_mark yaml_mark_t // Position after which tokens will be associated with this comment
start_mark yaml_mark_t // Position of '#' comment mark
end_mark yaml_mark_t // Position where comment terminated
head []byte
line []byte
foot []byte
}
// Emitter Definitions
// The prototype of a write handler.
//
// The write handler is called when the emitter needs to flush the accumulated
// characters to the output. The handler should write @a size bytes of the
// @a buffer to the output.
//
// @param[in,out] data A pointer to an application data specified by
//
// yaml_emitter_set_output().
//
// @param[in] buffer The buffer with bytes to be written.
// @param[in] size The size of the buffer.
//
// @returns On success, the handler should return @c 1. If the handler failed,
// the returned value should be @c 0.
type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
type yaml_emitter_state_t int
// The emitter states.
const (
// Expect STREAM-START.
yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota
yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END.
yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END.
yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document.
yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END.
yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence.
yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE // Expect the next item of a flow sequence, with the comma already written out
yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence.
yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE // Expect the next key of a flow mapping, with the comma already written out
yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping.
yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence.
yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence.
yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping.
yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping.
yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping.
yaml_EMIT_END_STATE // Expect nothing.
)
// The emitter structure.
//
// All members are internal. Manage the structure using the @c yaml_emitter_
// family of functions.
type yaml_emitter_t struct {
// Error handling
error yaml_error_type_t // Error type.
problem string // Error description.
// Writer stuff
write_handler yaml_write_handler_t // Write handler.
output_buffer *[]byte // String output data.
output_writer io.Writer // File output data.
buffer []byte // The working buffer.
buffer_pos int // The current position of the buffer.
raw_buffer []byte // The raw buffer.
raw_buffer_pos int // The current position of the buffer.
encoding yaml_encoding_t // The stream encoding.
// Emitter stuff
canonical bool // If the output is in the canonical style?
best_indent int // The number of indentation spaces.
best_width int // The preferred width of the output lines.
unicode bool // Allow unescaped non-ASCII characters?
line_break yaml_break_t // The preferred line break.
state yaml_emitter_state_t // The current emitter state.
states []yaml_emitter_state_t // The stack of states.
events []yaml_event_t // The event queue.
events_head int // The head of the event queue.
indents []int // The stack of indentation levels.
tag_directives []yaml_tag_directive_t // The list of tag directives.
indent int // The current indentation level.
compact_sequence_indent bool // Is '- ' is considered part of the indentation for sequence elements?
flow_level int // The current flow level.
root_context bool // Is it the document root context?
sequence_context bool // Is it a sequence context?
mapping_context bool // Is it a mapping context?
simple_key_context bool // Is it a simple mapping key context?
line int // The current line.
column int // The current column.
whitespace bool // If the last character was a whitespace?
indention bool // If the last character was an indentation character (' ', '-', '?', ':')?
open_ended bool // If an explicit document end is required?
space_above bool // Is there's an empty line above?
foot_indent int // The indent used to write the foot comment above, or -1 if none.
// Anchor analysis.
anchor_data struct {
anchor []byte // The anchor value.
alias bool // Is it an alias?
}
// Tag analysis.
tag_data struct {
handle []byte // The tag handle.
suffix []byte // The tag suffix.
}
// Scalar analysis.
scalar_data struct {
value []byte // The scalar value.
multiline bool // Does the scalar contain line breaks?
flow_plain_allowed bool // Can the scalar be expessed in the flow plain style?
block_plain_allowed bool // Can the scalar be expressed in the block plain style?
single_quoted_allowed bool // Can the scalar be expressed in the single quoted style?
block_allowed bool // Can the scalar be expressed in the literal or folded styles?
style yaml_scalar_style_t // The output style.
}
// Comments
head_comment []byte
line_comment []byte
foot_comment []byte
tail_comment []byte
key_line_comment []byte
// Dumper stuff
opened bool // If the stream was already opened?
closed bool // If the stream was already closed?
// The information associated with the document nodes.
anchors *struct {
references int // The number of references.
anchor int // The anchor id.
serialized bool // If the node has been emitted?
}
last_anchor_id int // The last assigned anchor id.
document *yaml_document_t // The currently emitted document.
}

198
vendor/go.yaml.in/yaml/v3/yamlprivateh.go generated vendored Normal file
View File

@ -0,0 +1,198 @@
//
// Copyright (c) 2011-2019 Canonical Ltd
// Copyright (c) 2006-2010 Kirill Simonov
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of
// this software and associated documentation files (the "Software"), to deal in
// the Software without restriction, including without limitation the rights to
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
// of the Software, and to permit persons to whom the Software is furnished to do
// so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
package yaml
const (
// The size of the input raw buffer.
input_raw_buffer_size = 512
// The size of the input buffer.
// It should be possible to decode the whole raw buffer.
input_buffer_size = input_raw_buffer_size * 3
// The size of the output buffer.
output_buffer_size = 128
// The size of the output raw buffer.
// It should be possible to encode the whole output buffer.
output_raw_buffer_size = (output_buffer_size*2 + 2)
// The size of other stacks and queues.
initial_stack_size = 16
initial_queue_size = 16
initial_string_size = 16
)
// Check if the character at the specified position is an alphabetical
// character, a digit, '_', or '-'.
func is_alpha(b []byte, i int) bool {
return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-'
}
// Check if the character at the specified position is a digit.
func is_digit(b []byte, i int) bool {
return b[i] >= '0' && b[i] <= '9'
}
// Get the value of a digit.
func as_digit(b []byte, i int) int {
return int(b[i]) - '0'
}
// Check if the character at the specified position is a hex-digit.
func is_hex(b []byte, i int) bool {
return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f'
}
// Get the value of a hex-digit.
func as_hex(b []byte, i int) int {
bi := b[i]
if bi >= 'A' && bi <= 'F' {
return int(bi) - 'A' + 10
}
if bi >= 'a' && bi <= 'f' {
return int(bi) - 'a' + 10
}
return int(bi) - '0'
}
// Check if the character is ASCII.
func is_ascii(b []byte, i int) bool {
return b[i] <= 0x7F
}
// Check if the character at the start of the buffer can be printed unescaped.
func is_printable(b []byte, i int) bool {
return ((b[i] == 0x0A) || // . == #x0A
(b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E
(b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF
(b[i] > 0xC2 && b[i] < 0xED) ||
(b[i] == 0xED && b[i+1] < 0xA0) ||
(b[i] == 0xEE) ||
(b[i] == 0xEF && // #xE000 <= . <= #xFFFD
!(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF
!(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
}
// Check if the character at the specified position is NUL.
func is_z(b []byte, i int) bool {
return b[i] == 0x00
}
// Check if the beginning of the buffer is a BOM.
func is_bom(b []byte, i int) bool {
return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF
}
// Check if the character at the specified position is space.
func is_space(b []byte, i int) bool {
return b[i] == ' '
}
// Check if the character at the specified position is tab.
func is_tab(b []byte, i int) bool {
return b[i] == '\t'
}
// Check if the character at the specified position is blank (space or tab).
func is_blank(b []byte, i int) bool {
//return is_space(b, i) || is_tab(b, i)
return b[i] == ' ' || b[i] == '\t'
}
// Check if the character at the specified position is a line break.
func is_break(b []byte, i int) bool {
return (b[i] == '\r' || // CR (#xD)
b[i] == '\n' || // LF (#xA)
b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029)
}
func is_crlf(b []byte, i int) bool {
return b[i] == '\r' && b[i+1] == '\n'
}
// Check if the character is a line break or NUL.
func is_breakz(b []byte, i int) bool {
//return is_break(b, i) || is_z(b, i)
return (
// is_break:
b[i] == '\r' || // CR (#xD)
b[i] == '\n' || // LF (#xA)
b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
// is_z:
b[i] == 0)
}
// Check if the character is a line break, space, or NUL.
func is_spacez(b []byte, i int) bool {
//return is_space(b, i) || is_breakz(b, i)
return (
// is_space:
b[i] == ' ' ||
// is_breakz:
b[i] == '\r' || // CR (#xD)
b[i] == '\n' || // LF (#xA)
b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
b[i] == 0)
}
// Check if the character is a line break, space, tab, or NUL.
func is_blankz(b []byte, i int) bool {
//return is_blank(b, i) || is_breakz(b, i)
return (
// is_blank:
b[i] == ' ' || b[i] == '\t' ||
// is_breakz:
b[i] == '\r' || // CR (#xD)
b[i] == '\n' || // LF (#xA)
b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
b[i] == 0)
}
// Determine the width of the character.
func width(b byte) int {
// Don't replace these by a switch without first
// confirming that it is being inlined.
if b&0x80 == 0x00 {
return 1
}
if b&0xE0 == 0xC0 {
return 2
}
if b&0xF0 == 0xE0 {
return 3
}
if b&0xF8 == 0xF0 {
return 4
}
return 0
}

File diff suppressed because it is too large Load Diff

View File

@ -29,6 +29,51 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
// Package-wide variables from generator "generated".
option go_package = "k8s.io/api/admissionregistration/v1beta1";
// ApplyConfiguration defines the desired configuration values of an object.
message ApplyConfiguration {
// expression will be evaluated by CEL to create an apply configuration.
// ref: https://github.com/google/cel-spec
//
// Apply configurations are declared in CEL using object initialization. For example, this CEL expression
// returns an apply configuration to set a single field:
//
// Object{
// spec: Object.spec{
// serviceAccountName: "example"
// }
// }
//
// Apply configurations may not modify atomic structs, maps or arrays due to the risk of accidental deletion of
// values not included in the apply configuration.
//
// CEL expressions have access to the object types needed to create apply configurations:
//
// - 'Object' - CEL type of the resource object.
// - 'Object.<fieldName>' - CEL type of object field (such as 'Object.spec')
// - 'Object.<fieldName1>.<fieldName2>...<fieldNameN>` - CEL type of nested field (such as 'Object.spec.containers')
//
// CEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables:
//
// - 'object' - The object from the incoming request. The value is null for DELETE requests.
// - 'oldObject' - The existing object. The value is null for CREATE requests.
// - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)).
// - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind.
// - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources.
// - 'variables' - Map of composited variables, from its name to its lazily evaluated value.
// For example, a variable named 'foo' can be accessed as 'variables.foo'.
// - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.
// See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz
// - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the
// request resource.
//
// The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the
// object. No other metadata properties are accessible.
//
// Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible.
// Required.
optional string expression = 1;
}
// AuditAnnotation describes how to produce an audit annotation for an API request.
message AuditAnnotation {
// key specifies the audit annotation key. The audit annotation keys of
@ -79,6 +124,75 @@ message ExpressionWarning {
optional string warning = 3;
}
// JSONPatch defines a JSON Patch.
message JSONPatch {
// expression will be evaluated by CEL to create a [JSON patch](https://jsonpatch.com/).
// ref: https://github.com/google/cel-spec
//
// expression must return an array of JSONPatch values.
//
// For example, this CEL expression returns a JSON patch to conditionally modify a value:
//
// [
// JSONPatch{op: "test", path: "/spec/example", value: "Red"},
// JSONPatch{op: "replace", path: "/spec/example", value: "Green"}
// ]
//
// To define an object for the patch value, use Object types. For example:
//
// [
// JSONPatch{
// op: "add",
// path: "/spec/selector",
// value: Object.spec.selector{matchLabels: {"environment": "test"}}
// }
// ]
//
// To use strings containing '/' and '~' as JSONPatch path keys, use "jsonpatch.escapeKey". For example:
//
// [
// JSONPatch{
// op: "add",
// path: "/metadata/labels/" + jsonpatch.escapeKey("example.com/environment"),
// value: "test"
// },
// ]
//
// CEL expressions have access to the types needed to create JSON patches and objects:
//
// - 'JSONPatch' - CEL type of JSON Patch operations. JSONPatch has the fields 'op', 'from', 'path' and 'value'.
// See [JSON patch](https://jsonpatch.com/) for more details. The 'value' field may be set to any of: string,
// integer, array, map or object. If set, the 'path' and 'from' fields must be set to a
// [JSON pointer](https://datatracker.ietf.org/doc/html/rfc6901/) string, where the 'jsonpatch.escapeKey()' CEL
// function may be used to escape path keys containing '/' and '~'.
// - 'Object' - CEL type of the resource object.
// - 'Object.<fieldName>' - CEL type of object field (such as 'Object.spec')
// - 'Object.<fieldName1>.<fieldName2>...<fieldNameN>` - CEL type of nested field (such as 'Object.spec.containers')
//
// CEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables:
//
// - 'object' - The object from the incoming request. The value is null for DELETE requests.
// - 'oldObject' - The existing object. The value is null for CREATE requests.
// - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)).
// - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind.
// - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources.
// - 'variables' - Map of composited variables, from its name to its lazily evaluated value.
// For example, a variable named 'foo' can be accessed as 'variables.foo'.
// - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.
// See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz
// - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the
// request resource.
//
// CEL expressions have access to [Kubernetes CEL function libraries](https://kubernetes.io/docs/reference/using-api/cel/#cel-options-language-features-and-libraries)
// as well as:
//
// - 'jsonpatch.escapeKey' - Performs JSONPatch key escaping. '~' and '/' are escaped as '~0' and `~1' respectively).
//
// Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible.
// Required.
optional string expression = 1;
}
// MatchCondition represents a condition which must be fulfilled for a request to be sent to a webhook.
message MatchCondition {
// Name is an identifier for this match condition, used for strategic merging of MatchConditions,
@ -203,6 +317,173 @@ message MatchResources {
optional string matchPolicy = 7;
}
// MutatingAdmissionPolicy describes the definition of an admission mutation policy that mutates the object coming into admission chain.
message MutatingAdmissionPolicy {
// Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
// +optional
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
// Specification of the desired behavior of the MutatingAdmissionPolicy.
optional MutatingAdmissionPolicySpec spec = 2;
}
// MutatingAdmissionPolicyBinding binds the MutatingAdmissionPolicy with parametrized resources.
// MutatingAdmissionPolicyBinding and the optional parameter resource together define how cluster administrators
// configure policies for clusters.
//
// For a given admission request, each binding will cause its policy to be
// evaluated N times, where N is 1 for policies/bindings that don't use
// params, otherwise N is the number of parameters selected by the binding.
// Each evaluation is constrained by a [runtime cost budget](https://kubernetes.io/docs/reference/using-api/cel/#runtime-cost-budget).
//
// Adding/removing policies, bindings, or params can not affect whether a
// given (policy, binding, param) combination is within its own CEL budget.
message MutatingAdmissionPolicyBinding {
// Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
// +optional
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
// Specification of the desired behavior of the MutatingAdmissionPolicyBinding.
optional MutatingAdmissionPolicyBindingSpec spec = 2;
}
// MutatingAdmissionPolicyBindingList is a list of MutatingAdmissionPolicyBinding.
message MutatingAdmissionPolicyBindingList {
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
// +optional
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
// List of PolicyBinding.
repeated MutatingAdmissionPolicyBinding items = 2;
}
// MutatingAdmissionPolicyBindingSpec is the specification of the MutatingAdmissionPolicyBinding.
message MutatingAdmissionPolicyBindingSpec {
// policyName references a MutatingAdmissionPolicy name which the MutatingAdmissionPolicyBinding binds to.
// If the referenced resource does not exist, this binding is considered invalid and will be ignored
// Required.
optional string policyName = 1;
// paramRef specifies the parameter resource used to configure the admission control policy.
// It should point to a resource of the type specified in spec.ParamKind of the bound MutatingAdmissionPolicy.
// If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the MutatingAdmissionPolicy applied.
// If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param.
// +optional
optional ParamRef paramRef = 2;
// matchResources limits what resources match this binding and may be mutated by it.
// Note that if matchResources matches a resource, the resource must also match a policy's matchConstraints and
// matchConditions before the resource may be mutated.
// When matchResources is unset, it does not constrain resource matching, and only the policy's matchConstraints
// and matchConditions must match for the resource to be mutated.
// Additionally, matchResources.resourceRules are optional and do not constraint matching when unset.
// Note that this is differs from MutatingAdmissionPolicy matchConstraints, where resourceRules are required.
// The CREATE, UPDATE and CONNECT operations are allowed. The DELETE operation may not be matched.
// '*' matches CREATE, UPDATE and CONNECT.
// +optional
optional MatchResources matchResources = 3;
}
// MutatingAdmissionPolicyList is a list of MutatingAdmissionPolicy.
message MutatingAdmissionPolicyList {
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
// +optional
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
// List of ValidatingAdmissionPolicy.
repeated MutatingAdmissionPolicy items = 2;
}
// MutatingAdmissionPolicySpec is the specification of the desired behavior of the admission policy.
message MutatingAdmissionPolicySpec {
// paramKind specifies the kind of resources used to parameterize this policy.
// If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions.
// If paramKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied.
// If paramKind is specified but paramRef is unset in MutatingAdmissionPolicyBinding, the params variable will be null.
// +optional
optional ParamKind paramKind = 1;
// matchConstraints specifies what resources this policy is designed to validate.
// The MutatingAdmissionPolicy cares about a request if it matches _all_ Constraints.
// However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API
// MutatingAdmissionPolicy cannot match MutatingAdmissionPolicy and MutatingAdmissionPolicyBinding.
// The CREATE, UPDATE and CONNECT operations are allowed. The DELETE operation may not be matched.
// '*' matches CREATE, UPDATE and CONNECT.
// Required.
optional MatchResources matchConstraints = 2;
// variables contain definitions of variables that can be used in composition of other expressions.
// Each variable is defined as a named CEL expression.
// The variables defined here will be available under `variables` in other expressions of the policy
// except matchConditions because matchConditions are evaluated before the rest of the policy.
//
// The expression of a variable can refer to other variables defined earlier in the list but not those after.
// Thus, variables must be sorted by the order of first appearance and acyclic.
// +listType=atomic
// +optional
repeated Variable variables = 3;
// mutations contain operations to perform on matching objects.
// mutations may not be empty; a minimum of one mutation is required.
// mutations are evaluated in order, and are reinvoked according to
// the reinvocationPolicy.
// The mutations of a policy are invoked for each binding of this policy
// and reinvocation of mutations occurs on a per binding basis.
//
// +listType=atomic
// +optional
repeated Mutation mutations = 4;
// failurePolicy defines how to handle failures for the admission policy. Failures can
// occur from CEL expression parse errors, type check errors, runtime errors and invalid
// or mis-configured policy definitions or bindings.
//
// A policy is invalid if paramKind refers to a non-existent Kind.
// A binding is invalid if paramRef.name refers to a non-existent resource.
//
// failurePolicy does not define how validations that evaluate to false are handled.
//
// Allowed values are Ignore or Fail. Defaults to Fail.
// +optional
optional string failurePolicy = 5;
// matchConditions is a list of conditions that must be met for a request to be validated.
// Match conditions filter requests that have already been matched by the matchConstraints.
// An empty list of matchConditions matches all requests.
// There are a maximum of 64 match conditions allowed.
//
// If a parameter object is provided, it can be accessed via the `params` handle in the same
// manner as validation expressions.
//
// The exact matching logic is (in order):
// 1. If ANY matchCondition evaluates to FALSE, the policy is skipped.
// 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated.
// 3. If any matchCondition evaluates to an error (but none are FALSE):
// - If failurePolicy=Fail, reject the request
// - If failurePolicy=Ignore, the policy is skipped
//
// +patchMergeKey=name
// +patchStrategy=merge
// +listType=map
// +listMapKey=name
// +optional
repeated MatchCondition matchConditions = 6;
// reinvocationPolicy indicates whether mutations may be called multiple times per MutatingAdmissionPolicyBinding
// as part of a single admission evaluation.
// Allowed values are "Never" and "IfNeeded".
//
// Never: These mutations will not be called more than once per binding in a single admission evaluation.
//
// IfNeeded: These mutations may be invoked more than once per binding for a single admission request and there is no guarantee of
// order with respect to other admission plugins, admission webhooks, bindings of this policy and admission policies. Mutations are only
// reinvoked when mutations change the object after this mutation is invoked.
// Required.
optional string reinvocationPolicy = 7;
}
// MutatingWebhook describes an admission webhook and the resources and operations it applies to.
message MutatingWebhook {
// The name of the admission webhook.
@ -401,6 +682,26 @@ message MutatingWebhookConfigurationList {
repeated MutatingWebhookConfiguration items = 2;
}
// Mutation specifies the CEL expression which is used to apply the Mutation.
message Mutation {
// patchType indicates the patch strategy used.
// Allowed values are "ApplyConfiguration" and "JSONPatch".
// Required.
//
// +unionDiscriminator
optional string patchType = 2;
// applyConfiguration defines the desired configuration values of an object.
// The configuration is applied to the admission object using
// [structured merge diff](https://github.com/kubernetes-sigs/structured-merge-diff).
// A CEL expression is used to create apply configuration.
optional ApplyConfiguration applyConfiguration = 3;
// jsonPatch defines a [JSON patch](https://jsonpatch.com/) operation to perform a mutation to the object.
// A CEL expression is used to create the JSON patch.
optional JSONPatch jsonPatch = 4;
}
// NamedRuleWithOperations is a tuple of Operations and Resources with ResourceNames.
// +structType=atomic
message NamedRuleWithOperations {

View File

@ -54,6 +54,10 @@ func addKnownTypes(scheme *runtime.Scheme) error {
&ValidatingAdmissionPolicyList{},
&ValidatingAdmissionPolicyBinding{},
&ValidatingAdmissionPolicyBindingList{},
&MutatingAdmissionPolicy{},
&MutatingAdmissionPolicyList{},
&MutatingAdmissionPolicyBinding{},
&MutatingAdmissionPolicyBindingList{},
)
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil

View File

@ -1073,7 +1073,7 @@ type MutatingWebhook struct {
}
// ReinvocationPolicyType specifies what type of policy the admission hook uses.
type ReinvocationPolicyType string
type ReinvocationPolicyType = v1.ReinvocationPolicyType
const (
// NeverReinvocationPolicy indicates that the webhook must not be called more than once in a
@ -1197,3 +1197,332 @@ type MatchCondition struct {
// Required.
Expression string `json:"expression" protobuf:"bytes,2,opt,name=expression"`
}
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:prerelease-lifecycle-gen:introduced=1.34
// MutatingAdmissionPolicy describes the definition of an admission mutation policy that mutates the object coming into admission chain.
type MutatingAdmissionPolicy struct {
metav1.TypeMeta `json:",inline"`
// Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Specification of the desired behavior of the MutatingAdmissionPolicy.
Spec MutatingAdmissionPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:prerelease-lifecycle-gen:introduced=1.34
// MutatingAdmissionPolicyList is a list of MutatingAdmissionPolicy.
type MutatingAdmissionPolicyList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of ValidatingAdmissionPolicy.
Items []MutatingAdmissionPolicy `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// MutatingAdmissionPolicySpec is the specification of the desired behavior of the admission policy.
type MutatingAdmissionPolicySpec struct {
// paramKind specifies the kind of resources used to parameterize this policy.
// If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions.
// If paramKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied.
// If paramKind is specified but paramRef is unset in MutatingAdmissionPolicyBinding, the params variable will be null.
// +optional
ParamKind *ParamKind `json:"paramKind,omitempty" protobuf:"bytes,1,rep,name=paramKind"`
// matchConstraints specifies what resources this policy is designed to validate.
// The MutatingAdmissionPolicy cares about a request if it matches _all_ Constraints.
// However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API
// MutatingAdmissionPolicy cannot match MutatingAdmissionPolicy and MutatingAdmissionPolicyBinding.
// The CREATE, UPDATE and CONNECT operations are allowed. The DELETE operation may not be matched.
// '*' matches CREATE, UPDATE and CONNECT.
// Required.
MatchConstraints *MatchResources `json:"matchConstraints,omitempty" protobuf:"bytes,2,rep,name=matchConstraints"`
// variables contain definitions of variables that can be used in composition of other expressions.
// Each variable is defined as a named CEL expression.
// The variables defined here will be available under `variables` in other expressions of the policy
// except matchConditions because matchConditions are evaluated before the rest of the policy.
//
// The expression of a variable can refer to other variables defined earlier in the list but not those after.
// Thus, variables must be sorted by the order of first appearance and acyclic.
// +listType=atomic
// +optional
Variables []Variable `json:"variables,omitempty" protobuf:"bytes,3,rep,name=variables"`
// mutations contain operations to perform on matching objects.
// mutations may not be empty; a minimum of one mutation is required.
// mutations are evaluated in order, and are reinvoked according to
// the reinvocationPolicy.
// The mutations of a policy are invoked for each binding of this policy
// and reinvocation of mutations occurs on a per binding basis.
//
// +listType=atomic
// +optional
Mutations []Mutation `json:"mutations,omitempty" protobuf:"bytes,4,rep,name=mutations"`
// failurePolicy defines how to handle failures for the admission policy. Failures can
// occur from CEL expression parse errors, type check errors, runtime errors and invalid
// or mis-configured policy definitions or bindings.
//
// A policy is invalid if paramKind refers to a non-existent Kind.
// A binding is invalid if paramRef.name refers to a non-existent resource.
//
// failurePolicy does not define how validations that evaluate to false are handled.
//
// Allowed values are Ignore or Fail. Defaults to Fail.
// +optional
FailurePolicy *FailurePolicyType `json:"failurePolicy,omitempty" protobuf:"bytes,5,opt,name=failurePolicy,casttype=FailurePolicyType"`
// matchConditions is a list of conditions that must be met for a request to be validated.
// Match conditions filter requests that have already been matched by the matchConstraints.
// An empty list of matchConditions matches all requests.
// There are a maximum of 64 match conditions allowed.
//
// If a parameter object is provided, it can be accessed via the `params` handle in the same
// manner as validation expressions.
//
// The exact matching logic is (in order):
// 1. If ANY matchCondition evaluates to FALSE, the policy is skipped.
// 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated.
// 3. If any matchCondition evaluates to an error (but none are FALSE):
// - If failurePolicy=Fail, reject the request
// - If failurePolicy=Ignore, the policy is skipped
//
// +patchMergeKey=name
// +patchStrategy=merge
// +listType=map
// +listMapKey=name
// +optional
MatchConditions []MatchCondition `json:"matchConditions,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,6,rep,name=matchConditions"`
// reinvocationPolicy indicates whether mutations may be called multiple times per MutatingAdmissionPolicyBinding
// as part of a single admission evaluation.
// Allowed values are "Never" and "IfNeeded".
//
// Never: These mutations will not be called more than once per binding in a single admission evaluation.
//
// IfNeeded: These mutations may be invoked more than once per binding for a single admission request and there is no guarantee of
// order with respect to other admission plugins, admission webhooks, bindings of this policy and admission policies. Mutations are only
// reinvoked when mutations change the object after this mutation is invoked.
// Required.
ReinvocationPolicy ReinvocationPolicyType `json:"reinvocationPolicy,omitempty" protobuf:"bytes,7,opt,name=reinvocationPolicy,casttype=ReinvocationPolicyType"`
}
// Mutation specifies the CEL expression which is used to apply the Mutation.
type Mutation struct {
// patchType indicates the patch strategy used.
// Allowed values are "ApplyConfiguration" and "JSONPatch".
// Required.
//
// +unionDiscriminator
PatchType PatchType `json:"patchType" protobuf:"bytes,2,opt,name=patchType,casttype=PatchType"`
// applyConfiguration defines the desired configuration values of an object.
// The configuration is applied to the admission object using
// [structured merge diff](https://github.com/kubernetes-sigs/structured-merge-diff).
// A CEL expression is used to create apply configuration.
ApplyConfiguration *ApplyConfiguration `json:"applyConfiguration,omitempty" protobuf:"bytes,3,opt,name=applyConfiguration"`
// jsonPatch defines a [JSON patch](https://jsonpatch.com/) operation to perform a mutation to the object.
// A CEL expression is used to create the JSON patch.
JSONPatch *JSONPatch `json:"jsonPatch,omitempty" protobuf:"bytes,4,opt,name=jsonPatch"`
}
// PatchType specifies the type of patch operation for a mutation.
// +enum
type PatchType string
const (
// ApplyConfiguration indicates that the mutation is using apply configuration to mutate the object.
PatchTypeApplyConfiguration PatchType = "ApplyConfiguration"
// JSONPatch indicates that the object is mutated through JSON Patch.
PatchTypeJSONPatch PatchType = "JSONPatch"
)
// ApplyConfiguration defines the desired configuration values of an object.
type ApplyConfiguration struct {
// expression will be evaluated by CEL to create an apply configuration.
// ref: https://github.com/google/cel-spec
//
// Apply configurations are declared in CEL using object initialization. For example, this CEL expression
// returns an apply configuration to set a single field:
//
// Object{
// spec: Object.spec{
// serviceAccountName: "example"
// }
// }
//
// Apply configurations may not modify atomic structs, maps or arrays due to the risk of accidental deletion of
// values not included in the apply configuration.
//
// CEL expressions have access to the object types needed to create apply configurations:
//
// - 'Object' - CEL type of the resource object.
// - 'Object.<fieldName>' - CEL type of object field (such as 'Object.spec')
// - 'Object.<fieldName1>.<fieldName2>...<fieldNameN>` - CEL type of nested field (such as 'Object.spec.containers')
//
// CEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables:
//
// - 'object' - The object from the incoming request. The value is null for DELETE requests.
// - 'oldObject' - The existing object. The value is null for CREATE requests.
// - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)).
// - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind.
// - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources.
// - 'variables' - Map of composited variables, from its name to its lazily evaluated value.
// For example, a variable named 'foo' can be accessed as 'variables.foo'.
// - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.
// See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz
// - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the
// request resource.
//
// The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the
// object. No other metadata properties are accessible.
//
// Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible.
// Required.
Expression string `json:"expression,omitempty" protobuf:"bytes,1,opt,name=expression"`
}
// JSONPatch defines a JSON Patch.
type JSONPatch struct {
// expression will be evaluated by CEL to create a [JSON patch](https://jsonpatch.com/).
// ref: https://github.com/google/cel-spec
//
// expression must return an array of JSONPatch values.
//
// For example, this CEL expression returns a JSON patch to conditionally modify a value:
//
// [
// JSONPatch{op: "test", path: "/spec/example", value: "Red"},
// JSONPatch{op: "replace", path: "/spec/example", value: "Green"}
// ]
//
// To define an object for the patch value, use Object types. For example:
//
// [
// JSONPatch{
// op: "add",
// path: "/spec/selector",
// value: Object.spec.selector{matchLabels: {"environment": "test"}}
// }
// ]
//
// To use strings containing '/' and '~' as JSONPatch path keys, use "jsonpatch.escapeKey". For example:
//
// [
// JSONPatch{
// op: "add",
// path: "/metadata/labels/" + jsonpatch.escapeKey("example.com/environment"),
// value: "test"
// },
// ]
//
// CEL expressions have access to the types needed to create JSON patches and objects:
//
// - 'JSONPatch' - CEL type of JSON Patch operations. JSONPatch has the fields 'op', 'from', 'path' and 'value'.
// See [JSON patch](https://jsonpatch.com/) for more details. The 'value' field may be set to any of: string,
// integer, array, map or object. If set, the 'path' and 'from' fields must be set to a
// [JSON pointer](https://datatracker.ietf.org/doc/html/rfc6901/) string, where the 'jsonpatch.escapeKey()' CEL
// function may be used to escape path keys containing '/' and '~'.
// - 'Object' - CEL type of the resource object.
// - 'Object.<fieldName>' - CEL type of object field (such as 'Object.spec')
// - 'Object.<fieldName1>.<fieldName2>...<fieldNameN>` - CEL type of nested field (such as 'Object.spec.containers')
//
// CEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables:
//
// - 'object' - The object from the incoming request. The value is null for DELETE requests.
// - 'oldObject' - The existing object. The value is null for CREATE requests.
// - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)).
// - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind.
// - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources.
// - 'variables' - Map of composited variables, from its name to its lazily evaluated value.
// For example, a variable named 'foo' can be accessed as 'variables.foo'.
// - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.
// See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz
// - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the
// request resource.
//
// CEL expressions have access to [Kubernetes CEL function libraries](https://kubernetes.io/docs/reference/using-api/cel/#cel-options-language-features-and-libraries)
// as well as:
//
// - 'jsonpatch.escapeKey' - Performs JSONPatch key escaping. '~' and '/' are escaped as '~0' and `~1' respectively).
//
//
// Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible.
// Required.
Expression string `json:"expression,omitempty" protobuf:"bytes,1,opt,name=expression"`
}
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:prerelease-lifecycle-gen:introduced=1.34
// MutatingAdmissionPolicyBinding binds the MutatingAdmissionPolicy with parametrized resources.
// MutatingAdmissionPolicyBinding and the optional parameter resource together define how cluster administrators
// configure policies for clusters.
//
// For a given admission request, each binding will cause its policy to be
// evaluated N times, where N is 1 for policies/bindings that don't use
// params, otherwise N is the number of parameters selected by the binding.
// Each evaluation is constrained by a [runtime cost budget](https://kubernetes.io/docs/reference/using-api/cel/#runtime-cost-budget).
//
// Adding/removing policies, bindings, or params can not affect whether a
// given (policy, binding, param) combination is within its own CEL budget.
type MutatingAdmissionPolicyBinding struct {
metav1.TypeMeta `json:",inline"`
// Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Specification of the desired behavior of the MutatingAdmissionPolicyBinding.
Spec MutatingAdmissionPolicyBindingSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:prerelease-lifecycle-gen:introduced=1.34
// MutatingAdmissionPolicyBindingList is a list of MutatingAdmissionPolicyBinding.
type MutatingAdmissionPolicyBindingList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of PolicyBinding.
Items []MutatingAdmissionPolicyBinding `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// MutatingAdmissionPolicyBindingSpec is the specification of the MutatingAdmissionPolicyBinding.
type MutatingAdmissionPolicyBindingSpec struct {
// policyName references a MutatingAdmissionPolicy name which the MutatingAdmissionPolicyBinding binds to.
// If the referenced resource does not exist, this binding is considered invalid and will be ignored
// Required.
PolicyName string `json:"policyName,omitempty" protobuf:"bytes,1,rep,name=policyName"`
// paramRef specifies the parameter resource used to configure the admission control policy.
// It should point to a resource of the type specified in spec.ParamKind of the bound MutatingAdmissionPolicy.
// If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the MutatingAdmissionPolicy applied.
// If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param.
// +optional
ParamRef *ParamRef `json:"paramRef,omitempty" protobuf:"bytes,2,rep,name=paramRef"`
// matchResources limits what resources match this binding and may be mutated by it.
// Note that if matchResources matches a resource, the resource must also match a policy's matchConstraints and
// matchConditions before the resource may be mutated.
// When matchResources is unset, it does not constrain resource matching, and only the policy's matchConstraints
// and matchConditions must match for the resource to be mutated.
// Additionally, matchResources.resourceRules are optional and do not constraint matching when unset.
// Note that this is differs from MutatingAdmissionPolicy matchConstraints, where resourceRules are required.
// The CREATE, UPDATE and CONNECT operations are allowed. The DELETE operation may not be matched.
// '*' matches CREATE, UPDATE and CONNECT.
// +optional
MatchResources *MatchResources `json:"matchResources,omitempty" protobuf:"bytes,3,rep,name=matchResources"`
}

View File

@ -27,6 +27,15 @@ package v1beta1
// Those methods can be generated by using hack/update-codegen.sh
// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
var map_ApplyConfiguration = map[string]string{
"": "ApplyConfiguration defines the desired configuration values of an object.",
"expression": "expression will be evaluated by CEL to create an apply configuration. ref: https://github.com/google/cel-spec\n\nApply configurations are declared in CEL using object initialization. For example, this CEL expression returns an apply configuration to set a single field:\n\n\tObject{\n\t spec: Object.spec{\n\t serviceAccountName: \"example\"\n\t }\n\t}\n\nApply configurations may not modify atomic structs, maps or arrays due to the risk of accidental deletion of values not included in the apply configuration.\n\nCEL expressions have access to the object types needed to create apply configurations:\n\n- 'Object' - CEL type of the resource object. - 'Object.<fieldName>' - CEL type of object field (such as 'Object.spec') - 'Object.<fieldName1>.<fieldName2>...<fieldNameN>` - CEL type of nested field (such as 'Object.spec.containers')\n\nCEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables:\n\n- 'object' - The object from the incoming request. The value is null for DELETE requests. - 'oldObject' - The existing object. The value is null for CREATE requests. - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. - 'variables' - Map of composited variables, from its name to its lazily evaluated value.\n For example, a variable named 'foo' can be accessed as 'variables.foo'.\n- 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.\n See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\n- 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the\n request resource.\n\nThe `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the object. No other metadata properties are accessible.\n\nOnly property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. Required.",
}
func (ApplyConfiguration) SwaggerDoc() map[string]string {
return map_ApplyConfiguration
}
var map_AuditAnnotation = map[string]string{
"": "AuditAnnotation describes how to produce an audit annotation for an API request.",
"key": "key specifies the audit annotation key. The audit annotation keys of a ValidatingAdmissionPolicy must be unique. The key must be a qualified name ([A-Za-z0-9][-A-Za-z0-9_.]*) no more than 63 bytes in length.\n\nThe key is combined with the resource name of the ValidatingAdmissionPolicy to construct an audit annotation key: \"{ValidatingAdmissionPolicy name}/{key}\".\n\nIf an admission webhook uses the same resource name as this ValidatingAdmissionPolicy and the same audit annotation key, the annotation key will be identical. In this case, the first annotation written with the key will be included in the audit event and all subsequent annotations with the same key will be discarded.\n\nRequired.",
@ -47,6 +56,15 @@ func (ExpressionWarning) SwaggerDoc() map[string]string {
return map_ExpressionWarning
}
var map_JSONPatch = map[string]string{
"": "JSONPatch defines a JSON Patch.",
"expression": "expression will be evaluated by CEL to create a [JSON patch](https://jsonpatch.com/). ref: https://github.com/google/cel-spec\n\nexpression must return an array of JSONPatch values.\n\nFor example, this CEL expression returns a JSON patch to conditionally modify a value:\n\n\t [\n\t JSONPatch{op: \"test\", path: \"/spec/example\", value: \"Red\"},\n\t JSONPatch{op: \"replace\", path: \"/spec/example\", value: \"Green\"}\n\t ]\n\nTo define an object for the patch value, use Object types. For example:\n\n\t [\n\t JSONPatch{\n\t op: \"add\",\n\t path: \"/spec/selector\",\n\t value: Object.spec.selector{matchLabels: {\"environment\": \"test\"}}\n\t }\n\t ]\n\nTo use strings containing '/' and '~' as JSONPatch path keys, use \"jsonpatch.escapeKey\". For example:\n\n\t [\n\t JSONPatch{\n\t op: \"add\",\n\t path: \"/metadata/labels/\" + jsonpatch.escapeKey(\"example.com/environment\"),\n\t value: \"test\"\n\t },\n\t ]\n\nCEL expressions have access to the types needed to create JSON patches and objects:\n\n- 'JSONPatch' - CEL type of JSON Patch operations. JSONPatch has the fields 'op', 'from', 'path' and 'value'.\n See [JSON patch](https://jsonpatch.com/) for more details. The 'value' field may be set to any of: string,\n integer, array, map or object. If set, the 'path' and 'from' fields must be set to a\n [JSON pointer](https://datatracker.ietf.org/doc/html/rfc6901/) string, where the 'jsonpatch.escapeKey()' CEL\n function may be used to escape path keys containing '/' and '~'.\n- 'Object' - CEL type of the resource object. - 'Object.<fieldName>' - CEL type of object field (such as 'Object.spec') - 'Object.<fieldName1>.<fieldName2>...<fieldNameN>` - CEL type of nested field (such as 'Object.spec.containers')\n\nCEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables:\n\n- 'object' - The object from the incoming request. The value is null for DELETE requests. - 'oldObject' - The existing object. The value is null for CREATE requests. - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. - 'variables' - Map of composited variables, from its name to its lazily evaluated value.\n For example, a variable named 'foo' can be accessed as 'variables.foo'.\n- 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.\n See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\n- 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the\n request resource.\n\nCEL expressions have access to [Kubernetes CEL function libraries](https://kubernetes.io/docs/reference/using-api/cel/#cel-options-language-features-and-libraries) as well as:\n\n- 'jsonpatch.escapeKey' - Performs JSONPatch key escaping. '~' and '/' are escaped as '~0' and `~1' respectively).\n\nOnly property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. Required.",
}
func (JSONPatch) SwaggerDoc() map[string]string {
return map_JSONPatch
}
var map_MatchCondition = map[string]string{
"": "MatchCondition represents a condition which must be fulfilled for a request to be sent to a webhook.",
"name": "Name is an identifier for this match condition, used for strategic merging of MatchConditions, as well as providing an identifier for logging purposes. A good name should be descriptive of the associated expression. Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName')\n\nRequired.",
@ -70,6 +88,72 @@ func (MatchResources) SwaggerDoc() map[string]string {
return map_MatchResources
}
var map_MutatingAdmissionPolicy = map[string]string{
"": "MutatingAdmissionPolicy describes the definition of an admission mutation policy that mutates the object coming into admission chain.",
"metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.",
"spec": "Specification of the desired behavior of the MutatingAdmissionPolicy.",
}
func (MutatingAdmissionPolicy) SwaggerDoc() map[string]string {
return map_MutatingAdmissionPolicy
}
var map_MutatingAdmissionPolicyBinding = map[string]string{
"": "MutatingAdmissionPolicyBinding binds the MutatingAdmissionPolicy with parametrized resources. MutatingAdmissionPolicyBinding and the optional parameter resource together define how cluster administrators configure policies for clusters.\n\nFor a given admission request, each binding will cause its policy to be evaluated N times, where N is 1 for policies/bindings that don't use params, otherwise N is the number of parameters selected by the binding. Each evaluation is constrained by a [runtime cost budget](https://kubernetes.io/docs/reference/using-api/cel/#runtime-cost-budget).\n\nAdding/removing policies, bindings, or params can not affect whether a given (policy, binding, param) combination is within its own CEL budget.",
"metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.",
"spec": "Specification of the desired behavior of the MutatingAdmissionPolicyBinding.",
}
func (MutatingAdmissionPolicyBinding) SwaggerDoc() map[string]string {
return map_MutatingAdmissionPolicyBinding
}
var map_MutatingAdmissionPolicyBindingList = map[string]string{
"": "MutatingAdmissionPolicyBindingList is a list of MutatingAdmissionPolicyBinding.",
"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
"items": "List of PolicyBinding.",
}
func (MutatingAdmissionPolicyBindingList) SwaggerDoc() map[string]string {
return map_MutatingAdmissionPolicyBindingList
}
var map_MutatingAdmissionPolicyBindingSpec = map[string]string{
"": "MutatingAdmissionPolicyBindingSpec is the specification of the MutatingAdmissionPolicyBinding.",
"policyName": "policyName references a MutatingAdmissionPolicy name which the MutatingAdmissionPolicyBinding binds to. If the referenced resource does not exist, this binding is considered invalid and will be ignored Required.",
"paramRef": "paramRef specifies the parameter resource used to configure the admission control policy. It should point to a resource of the type specified in spec.ParamKind of the bound MutatingAdmissionPolicy. If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the MutatingAdmissionPolicy applied. If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param.",
"matchResources": "matchResources limits what resources match this binding and may be mutated by it. Note that if matchResources matches a resource, the resource must also match a policy's matchConstraints and matchConditions before the resource may be mutated. When matchResources is unset, it does not constrain resource matching, and only the policy's matchConstraints and matchConditions must match for the resource to be mutated. Additionally, matchResources.resourceRules are optional and do not constraint matching when unset. Note that this is differs from MutatingAdmissionPolicy matchConstraints, where resourceRules are required. The CREATE, UPDATE and CONNECT operations are allowed. The DELETE operation may not be matched. '*' matches CREATE, UPDATE and CONNECT.",
}
func (MutatingAdmissionPolicyBindingSpec) SwaggerDoc() map[string]string {
return map_MutatingAdmissionPolicyBindingSpec
}
var map_MutatingAdmissionPolicyList = map[string]string{
"": "MutatingAdmissionPolicyList is a list of MutatingAdmissionPolicy.",
"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
"items": "List of ValidatingAdmissionPolicy.",
}
func (MutatingAdmissionPolicyList) SwaggerDoc() map[string]string {
return map_MutatingAdmissionPolicyList
}
var map_MutatingAdmissionPolicySpec = map[string]string{
"": "MutatingAdmissionPolicySpec is the specification of the desired behavior of the admission policy.",
"paramKind": "paramKind specifies the kind of resources used to parameterize this policy. If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions. If paramKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied. If paramKind is specified but paramRef is unset in MutatingAdmissionPolicyBinding, the params variable will be null.",
"matchConstraints": "matchConstraints specifies what resources this policy is designed to validate. The MutatingAdmissionPolicy cares about a request if it matches _all_ Constraints. However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API MutatingAdmissionPolicy cannot match MutatingAdmissionPolicy and MutatingAdmissionPolicyBinding. The CREATE, UPDATE and CONNECT operations are allowed. The DELETE operation may not be matched. '*' matches CREATE, UPDATE and CONNECT. Required.",
"variables": "variables contain definitions of variables that can be used in composition of other expressions. Each variable is defined as a named CEL expression. The variables defined here will be available under `variables` in other expressions of the policy except matchConditions because matchConditions are evaluated before the rest of the policy.\n\nThe expression of a variable can refer to other variables defined earlier in the list but not those after. Thus, variables must be sorted by the order of first appearance and acyclic.",
"mutations": "mutations contain operations to perform on matching objects. mutations may not be empty; a minimum of one mutation is required. mutations are evaluated in order, and are reinvoked according to the reinvocationPolicy. The mutations of a policy are invoked for each binding of this policy and reinvocation of mutations occurs on a per binding basis.",
"failurePolicy": "failurePolicy defines how to handle failures for the admission policy. Failures can occur from CEL expression parse errors, type check errors, runtime errors and invalid or mis-configured policy definitions or bindings.\n\nA policy is invalid if paramKind refers to a non-existent Kind. A binding is invalid if paramRef.name refers to a non-existent resource.\n\nfailurePolicy does not define how validations that evaluate to false are handled.\n\nAllowed values are Ignore or Fail. Defaults to Fail.",
"matchConditions": "matchConditions is a list of conditions that must be met for a request to be validated. Match conditions filter requests that have already been matched by the matchConstraints. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nIf a parameter object is provided, it can be accessed via the `params` handle in the same manner as validation expressions.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the policy is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the policy is skipped",
"reinvocationPolicy": "reinvocationPolicy indicates whether mutations may be called multiple times per MutatingAdmissionPolicyBinding as part of a single admission evaluation. Allowed values are \"Never\" and \"IfNeeded\".\n\nNever: These mutations will not be called more than once per binding in a single admission evaluation.\n\nIfNeeded: These mutations may be invoked more than once per binding for a single admission request and there is no guarantee of order with respect to other admission plugins, admission webhooks, bindings of this policy and admission policies. Mutations are only reinvoked when mutations change the object after this mutation is invoked. Required.",
}
func (MutatingAdmissionPolicySpec) SwaggerDoc() map[string]string {
return map_MutatingAdmissionPolicySpec
}
var map_MutatingWebhook = map[string]string{
"": "MutatingWebhook describes an admission webhook and the resources and operations it applies to.",
"name": "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.",
@ -110,6 +194,17 @@ func (MutatingWebhookConfigurationList) SwaggerDoc() map[string]string {
return map_MutatingWebhookConfigurationList
}
var map_Mutation = map[string]string{
"": "Mutation specifies the CEL expression which is used to apply the Mutation.",
"patchType": "patchType indicates the patch strategy used. Allowed values are \"ApplyConfiguration\" and \"JSONPatch\". Required.",
"applyConfiguration": "applyConfiguration defines the desired configuration values of an object. The configuration is applied to the admission object using [structured merge diff](https://github.com/kubernetes-sigs/structured-merge-diff). A CEL expression is used to create apply configuration.",
"jsonPatch": "jsonPatch defines a [JSON patch](https://jsonpatch.com/) operation to perform a mutation to the object. A CEL expression is used to create the JSON patch.",
}
func (Mutation) SwaggerDoc() map[string]string {
return map_Mutation
}
var map_NamedRuleWithOperations = map[string]string{
"": "NamedRuleWithOperations is a tuple of Operations and Resources with ResourceNames.",
"resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.",

View File

@ -27,6 +27,22 @@ import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ApplyConfiguration) DeepCopyInto(out *ApplyConfiguration) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplyConfiguration.
func (in *ApplyConfiguration) DeepCopy() *ApplyConfiguration {
if in == nil {
return nil
}
out := new(ApplyConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AuditAnnotation) DeepCopyInto(out *AuditAnnotation) {
*out = *in
@ -59,6 +75,22 @@ func (in *ExpressionWarning) DeepCopy() *ExpressionWarning {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *JSONPatch) DeepCopyInto(out *JSONPatch) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONPatch.
func (in *JSONPatch) DeepCopy() *JSONPatch {
if in == nil {
return nil
}
out := new(JSONPatch)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MatchCondition) DeepCopyInto(out *MatchCondition) {
*out = *in
@ -120,6 +152,200 @@ func (in *MatchResources) DeepCopy() *MatchResources {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MutatingAdmissionPolicy) DeepCopyInto(out *MutatingAdmissionPolicy) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingAdmissionPolicy.
func (in *MutatingAdmissionPolicy) DeepCopy() *MutatingAdmissionPolicy {
if in == nil {
return nil
}
out := new(MutatingAdmissionPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *MutatingAdmissionPolicy) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MutatingAdmissionPolicyBinding) DeepCopyInto(out *MutatingAdmissionPolicyBinding) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingAdmissionPolicyBinding.
func (in *MutatingAdmissionPolicyBinding) DeepCopy() *MutatingAdmissionPolicyBinding {
if in == nil {
return nil
}
out := new(MutatingAdmissionPolicyBinding)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *MutatingAdmissionPolicyBinding) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MutatingAdmissionPolicyBindingList) DeepCopyInto(out *MutatingAdmissionPolicyBindingList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]MutatingAdmissionPolicyBinding, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingAdmissionPolicyBindingList.
func (in *MutatingAdmissionPolicyBindingList) DeepCopy() *MutatingAdmissionPolicyBindingList {
if in == nil {
return nil
}
out := new(MutatingAdmissionPolicyBindingList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *MutatingAdmissionPolicyBindingList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MutatingAdmissionPolicyBindingSpec) DeepCopyInto(out *MutatingAdmissionPolicyBindingSpec) {
*out = *in
if in.ParamRef != nil {
in, out := &in.ParamRef, &out.ParamRef
*out = new(ParamRef)
(*in).DeepCopyInto(*out)
}
if in.MatchResources != nil {
in, out := &in.MatchResources, &out.MatchResources
*out = new(MatchResources)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingAdmissionPolicyBindingSpec.
func (in *MutatingAdmissionPolicyBindingSpec) DeepCopy() *MutatingAdmissionPolicyBindingSpec {
if in == nil {
return nil
}
out := new(MutatingAdmissionPolicyBindingSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MutatingAdmissionPolicyList) DeepCopyInto(out *MutatingAdmissionPolicyList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]MutatingAdmissionPolicy, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingAdmissionPolicyList.
func (in *MutatingAdmissionPolicyList) DeepCopy() *MutatingAdmissionPolicyList {
if in == nil {
return nil
}
out := new(MutatingAdmissionPolicyList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *MutatingAdmissionPolicyList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MutatingAdmissionPolicySpec) DeepCopyInto(out *MutatingAdmissionPolicySpec) {
*out = *in
if in.ParamKind != nil {
in, out := &in.ParamKind, &out.ParamKind
*out = new(ParamKind)
**out = **in
}
if in.MatchConstraints != nil {
in, out := &in.MatchConstraints, &out.MatchConstraints
*out = new(MatchResources)
(*in).DeepCopyInto(*out)
}
if in.Variables != nil {
in, out := &in.Variables, &out.Variables
*out = make([]Variable, len(*in))
copy(*out, *in)
}
if in.Mutations != nil {
in, out := &in.Mutations, &out.Mutations
*out = make([]Mutation, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.FailurePolicy != nil {
in, out := &in.FailurePolicy, &out.FailurePolicy
*out = new(FailurePolicyType)
**out = **in
}
if in.MatchConditions != nil {
in, out := &in.MatchConditions, &out.MatchConditions
*out = make([]MatchCondition, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingAdmissionPolicySpec.
func (in *MutatingAdmissionPolicySpec) DeepCopy() *MutatingAdmissionPolicySpec {
if in == nil {
return nil
}
out := new(MutatingAdmissionPolicySpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MutatingWebhook) DeepCopyInto(out *MutatingWebhook) {
*out = *in
@ -168,7 +394,7 @@ func (in *MutatingWebhook) DeepCopyInto(out *MutatingWebhook) {
}
if in.ReinvocationPolicy != nil {
in, out := &in.ReinvocationPolicy, &out.ReinvocationPolicy
*out = new(ReinvocationPolicyType)
*out = new(admissionregistrationv1.ReinvocationPolicyType)
**out = **in
}
if in.MatchConditions != nil {
@ -255,6 +481,32 @@ func (in *MutatingWebhookConfigurationList) DeepCopyObject() runtime.Object {
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Mutation) DeepCopyInto(out *Mutation) {
*out = *in
if in.ApplyConfiguration != nil {
in, out := &in.ApplyConfiguration, &out.ApplyConfiguration
*out = new(ApplyConfiguration)
**out = **in
}
if in.JSONPatch != nil {
in, out := &in.JSONPatch, &out.JSONPatch
*out = new(JSONPatch)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Mutation.
func (in *Mutation) DeepCopy() *Mutation {
if in == nil {
return nil
}
out := new(Mutation)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NamedRuleWithOperations) DeepCopyInto(out *NamedRuleWithOperations) {
*out = *in

View File

@ -25,6 +25,78 @@ import (
schema "k8s.io/apimachinery/pkg/runtime/schema"
)
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
func (in *MutatingAdmissionPolicy) APILifecycleIntroduced() (major, minor int) {
return 1, 34
}
// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
func (in *MutatingAdmissionPolicy) APILifecycleDeprecated() (major, minor int) {
return 1, 37
}
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
func (in *MutatingAdmissionPolicy) APILifecycleRemoved() (major, minor int) {
return 1, 40
}
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
func (in *MutatingAdmissionPolicyBinding) APILifecycleIntroduced() (major, minor int) {
return 1, 34
}
// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
func (in *MutatingAdmissionPolicyBinding) APILifecycleDeprecated() (major, minor int) {
return 1, 37
}
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
func (in *MutatingAdmissionPolicyBinding) APILifecycleRemoved() (major, minor int) {
return 1, 40
}
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
func (in *MutatingAdmissionPolicyBindingList) APILifecycleIntroduced() (major, minor int) {
return 1, 34
}
// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
func (in *MutatingAdmissionPolicyBindingList) APILifecycleDeprecated() (major, minor int) {
return 1, 37
}
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
func (in *MutatingAdmissionPolicyBindingList) APILifecycleRemoved() (major, minor int) {
return 1, 40
}
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
func (in *MutatingAdmissionPolicyList) APILifecycleIntroduced() (major, minor int) {
return 1, 34
}
// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
func (in *MutatingAdmissionPolicyList) APILifecycleDeprecated() (major, minor int) {
return 1, 37
}
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
func (in *MutatingAdmissionPolicyList) APILifecycleRemoved() (major, minor int) {
return 1, 40
}
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
func (in *MutatingWebhookConfiguration) APILifecycleIntroduced() (major, minor int) {

View File

@ -530,7 +530,7 @@ message RollingUpdateDaemonSet {
// pod is available (Ready for at least minReadySeconds) the old DaemonSet pod
// on that node is marked deleted. If the old pod becomes unavailable for any
// reason (Ready transitions to false, is evicted, or is drained) an updated
// pod is immediatedly created on that node without considering surge limits.
// pod is immediately created on that node without considering surge limits.
// Allowing surge implies the possibility that the resources consumed by the
// daemonset on any given node can double if the readiness check fails, and
// so resource intensive daemonsets should take into account that they may

2
vendor/k8s.io/api/apps/v1/types.go generated vendored
View File

@ -635,7 +635,7 @@ type RollingUpdateDaemonSet struct {
// pod is available (Ready for at least minReadySeconds) the old DaemonSet pod
// on that node is marked deleted. If the old pod becomes unavailable for any
// reason (Ready transitions to false, is evicted, or is drained) an updated
// pod is immediatedly created on that node without considering surge limits.
// pod is immediately created on that node without considering surge limits.
// Allowing surge implies the possibility that the resources consumed by the
// daemonset on any given node can double if the readiness check fails, and
// so resource intensive daemonsets should take into account that they may

View File

@ -265,7 +265,7 @@ func (ReplicaSetStatus) SwaggerDoc() map[string]string {
var map_RollingUpdateDaemonSet = map[string]string{
"": "Spec to control the desired behavior of daemon set rolling update.",
"maxUnavailable": "The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0 if MaxSurge is 0 Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.",
"maxSurge": "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption.",
"maxSurge": "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediately created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption.",
}
func (RollingUpdateDaemonSet) SwaggerDoc() map[string]string {

View File

@ -316,6 +316,9 @@ message Scale {
message ScaleSpec {
// replicas is the number of observed instances of the scaled object.
// +optional
// +k8s:optional
// +default=0
// +k8s:minimum=0
optional int32 replicas = 1;
}

View File

@ -33,6 +33,9 @@ const (
type ScaleSpec struct {
// replicas is the number of observed instances of the scaled object.
// +optional
// +k8s:optional
// +default=0
// +k8s:minimum=0
Replicas int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"`
}
@ -60,6 +63,7 @@ type ScaleStatus struct {
// +k8s:prerelease-lifecycle-gen:deprecated=1.8
// +k8s:prerelease-lifecycle-gen:removed=1.16
// +k8s:prerelease-lifecycle-gen:replacement=autoscaling,v1,Scale
// +k8s:isSubresource=/scale
// Scale represents a scaling request for a resource.
type Scale struct {

View File

@ -536,7 +536,7 @@ message RollingUpdateDaemonSet {
// pod is available (Ready for at least minReadySeconds) the old DaemonSet pod
// on that node is marked deleted. If the old pod becomes unavailable for any
// reason (Ready transitions to false, is evicted, or is drained) an updated
// pod is immediatedly created on that node without considering surge limits.
// pod is immediately created on that node without considering surge limits.
// Allowing surge implies the possibility that the resources consumed by the
// daemonset on any given node can double if the readiness check fails, and
// so resource intensive daemonsets should take into account that they may
@ -614,6 +614,9 @@ message Scale {
message ScaleSpec {
// desired number of instances for the scaled object.
// +optional
// +k8s:optional
// +default=0
// +k8s:minimum=0
optional int32 replicas = 1;
}

View File

@ -35,6 +35,9 @@ const (
type ScaleSpec struct {
// desired number of instances for the scaled object.
// +optional
// +k8s:optional
// +default=0
// +k8s:minimum=0
Replicas int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"`
}
@ -63,6 +66,7 @@ type ScaleStatus struct {
// +k8s:prerelease-lifecycle-gen:deprecated=1.9
// +k8s:prerelease-lifecycle-gen:removed=1.16
// +k8s:prerelease-lifecycle-gen:replacement=autoscaling,v1,Scale
// +k8s:isSubresource=/scale
// Scale represents a scaling request for a resource.
type Scale struct {
@ -681,7 +685,7 @@ type RollingUpdateDaemonSet struct {
// pod is available (Ready for at least minReadySeconds) the old DaemonSet pod
// on that node is marked deleted. If the old pod becomes unavailable for any
// reason (Ready transitions to false, is evicted, or is drained) an updated
// pod is immediatedly created on that node without considering surge limits.
// pod is immediately created on that node without considering surge limits.
// Allowing surge implies the possibility that the resources consumed by the
// daemonset on any given node can double if the readiness check fails, and
// so resource intensive daemonsets should take into account that they may

View File

@ -265,7 +265,7 @@ func (ReplicaSetStatus) SwaggerDoc() map[string]string {
var map_RollingUpdateDaemonSet = map[string]string{
"": "Spec to control the desired behavior of daemon set rolling update.",
"maxUnavailable": "The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0 if MaxSurge is 0 Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.",
"maxSurge": "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption.",
"maxSurge": "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediately created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption.",
}
func (RollingUpdateDaemonSet) SwaggerDoc() map[string]string {

View File

@ -167,16 +167,10 @@ message ResourceAttributes {
optional string name = 7;
// fieldSelector describes the limitation on access based on field. It can only limit access, not broaden it.
//
// This field is alpha-level. To use this field, you must enable the
// `AuthorizeWithSelectors` feature gate (disabled by default).
// +optional
optional FieldSelectorAttributes fieldSelector = 8;
// labelSelector describes the limitation on access based on labels. It can only limit access, not broaden it.
//
// This field is alpha-level. To use this field, you must enable the
// `AuthorizeWithSelectors` feature gate (disabled by default).
// +optional
optional LabelSelectorAttributes labelSelector = 9;
}

View File

@ -119,15 +119,9 @@ type ResourceAttributes struct {
// +optional
Name string `json:"name,omitempty" protobuf:"bytes,7,opt,name=name"`
// fieldSelector describes the limitation on access based on field. It can only limit access, not broaden it.
//
// This field is alpha-level. To use this field, you must enable the
// `AuthorizeWithSelectors` feature gate (disabled by default).
// +optional
FieldSelector *FieldSelectorAttributes `json:"fieldSelector,omitempty" protobuf:"bytes,8,opt,name=fieldSelector"`
// labelSelector describes the limitation on access based on labels. It can only limit access, not broaden it.
//
// This field is alpha-level. To use this field, you must enable the
// `AuthorizeWithSelectors` feature gate (disabled by default).
// +optional
LabelSelector *LabelSelectorAttributes `json:"labelSelector,omitempty" protobuf:"bytes,9,opt,name=labelSelector"`
}

View File

@ -87,8 +87,8 @@ var map_ResourceAttributes = map[string]string{
"resource": "Resource is one of the existing resource types. \"*\" means all.",
"subresource": "Subresource is one of the existing resource types. \"\" means none.",
"name": "Name is the name of the resource being requested for a \"get\" or deleted for a \"delete\". \"\" (empty) means all.",
"fieldSelector": "fieldSelector describes the limitation on access based on field. It can only limit access, not broaden it.\n\nThis field is alpha-level. To use this field, you must enable the `AuthorizeWithSelectors` feature gate (disabled by default).",
"labelSelector": "labelSelector describes the limitation on access based on labels. It can only limit access, not broaden it.\n\nThis field is alpha-level. To use this field, you must enable the `AuthorizeWithSelectors` feature gate (disabled by default).",
"fieldSelector": "fieldSelector describes the limitation on access based on field. It can only limit access, not broaden it.",
"labelSelector": "labelSelector describes the limitation on access based on labels. It can only limit access, not broaden it.",
}
func (ResourceAttributes) SwaggerDoc() map[string]string {

View File

@ -472,6 +472,9 @@ message Scale {
message ScaleSpec {
// replicas is the desired number of instances for the scaled object.
// +optional
// +k8s:optional
// +default=0
// +k8s:minimum=0
optional int32 replicas = 1;
}

View File

@ -117,6 +117,7 @@ type HorizontalPodAutoscalerList struct {
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:prerelease-lifecycle-gen:introduced=1.2
// +k8s:isSubresource=/scale
// Scale represents a scaling request for a resource.
type Scale struct {
@ -138,6 +139,9 @@ type Scale struct {
type ScaleSpec struct {
// replicas is the desired number of instances for the scaled object.
// +optional
// +k8s:optional
// +default=0
// +k8s:minimum=0
Replicas int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"`
}

View File

@ -226,7 +226,8 @@ message JobSpec {
optional SuccessPolicy successPolicy = 16;
// Specifies the number of retries before marking this job failed.
// Defaults to 6
// Defaults to 6, unless backoffLimitPerIndex (only Indexed Job) is specified.
// When backoffLimitPerIndex is specified, backoffLimit defaults to 2147483647.
// +optional
optional int32 backoffLimit = 7;
@ -329,8 +330,6 @@ message JobSpec {
//
// When using podFailurePolicy, Failed is the the only allowed value.
// TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use.
// This is an beta field. To use this, enable the JobPodReplacementPolicy feature toggle.
// This is on by default.
// +optional
optional string podReplacementPolicy = 14;
@ -570,7 +569,7 @@ message PodFailurePolicyRule {
message SuccessPolicy {
// rules represents the list of alternative rules for the declaring the Jobs
// as successful before `.status.succeeded >= .spec.completions`. Once any of the rules are met,
// the "SucceededCriteriaMet" condition is added, and the lingering pods are removed.
// the "SuccessCriteriaMet" condition is added, and the lingering pods are removed.
// The terminal state for such a Job has the "Complete" condition.
// Additionally, these rules are evaluated in order; Once the Job meets one of the rules,
// other rules are ignored. At most 20 elements are allowed.

View File

@ -257,7 +257,7 @@ type PodFailurePolicy struct {
type SuccessPolicy struct {
// rules represents the list of alternative rules for the declaring the Jobs
// as successful before `.status.succeeded >= .spec.completions`. Once any of the rules are met,
// the "SucceededCriteriaMet" condition is added, and the lingering pods are removed.
// the "SuccessCriteriaMet" condition is added, and the lingering pods are removed.
// The terminal state for such a Job has the "Complete" condition.
// Additionally, these rules are evaluated in order; Once the Job meets one of the rules,
// other rules are ignored. At most 20 elements are allowed.
@ -347,7 +347,8 @@ type JobSpec struct {
SuccessPolicy *SuccessPolicy `json:"successPolicy,omitempty" protobuf:"bytes,16,opt,name=successPolicy"`
// Specifies the number of retries before marking this job failed.
// Defaults to 6
// Defaults to 6, unless backoffLimitPerIndex (only Indexed Job) is specified.
// When backoffLimitPerIndex is specified, backoffLimit defaults to 2147483647.
// +optional
BackoffLimit *int32 `json:"backoffLimit,omitempty" protobuf:"varint,7,opt,name=backoffLimit"`
@ -455,8 +456,6 @@ type JobSpec struct {
//
// When using podFailurePolicy, Failed is the the only allowed value.
// TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use.
// This is an beta field. To use this, enable the JobPodReplacementPolicy feature toggle.
// This is on by default.
// +optional
PodReplacementPolicy *PodReplacementPolicy `json:"podReplacementPolicy,omitempty" protobuf:"bytes,14,opt,name=podReplacementPolicy,casttype=podReplacementPolicy"`

View File

@ -117,7 +117,7 @@ var map_JobSpec = map[string]string{
"activeDeadlineSeconds": "Specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it; value must be positive integer. If a Job is suspended (at creation or through an update), this timer will effectively be stopped and reset when the Job is resumed again.",
"podFailurePolicy": "Specifies the policy of handling failed pods. In particular, it allows to specify the set of actions and conditions which need to be satisfied to take the associated action. If empty, the default behaviour applies - the counter of failed pods, represented by the jobs's .status.failed field, is incremented and it is checked against the backoffLimit. This field cannot be used in combination with restartPolicy=OnFailure.",
"successPolicy": "successPolicy specifies the policy when the Job can be declared as succeeded. If empty, the default behavior applies - the Job is declared as succeeded only when the number of succeeded pods equals to the completions. When the field is specified, it must be immutable and works only for the Indexed Jobs. Once the Job meets the SuccessPolicy, the lingering pods are terminated.",
"backoffLimit": "Specifies the number of retries before marking this job failed. Defaults to 6",
"backoffLimit": "Specifies the number of retries before marking this job failed. Defaults to 6, unless backoffLimitPerIndex (only Indexed Job) is specified. When backoffLimitPerIndex is specified, backoffLimit defaults to 2147483647.",
"backoffLimitPerIndex": "Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable.",
"maxFailedIndexes": "Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5.",
"selector": "A label query over pods that should match the pod count. Normally, the system sets this field for you. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors",
@ -126,7 +126,7 @@ var map_JobSpec = map[string]string{
"ttlSecondsAfterFinished": "ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes.",
"completionMode": "completionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\n\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\n\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5. In addition, The Pod name takes the form `$(job-name)-$(index)-$(random-string)`, the Pod hostname takes the form `$(job-name)-$(index)`.\n\nMore completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, which is possible during upgrades due to version skew, the controller skips updates for the Job.",
"suspend": "suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. Defaults to false.",
"podReplacementPolicy": "podReplacementPolicy specifies when to create replacement Pods. Possible values are: - TerminatingOrFailed means that we recreate pods\n when they are terminating (has a metadata.deletionTimestamp) or failed.\n- Failed means to wait until a previously created Pod is fully terminated (has phase\n Failed or Succeeded) before creating a replacement Pod.\n\nWhen using podFailurePolicy, Failed is the the only allowed value. TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use. This is an beta field. To use this, enable the JobPodReplacementPolicy feature toggle. This is on by default.",
"podReplacementPolicy": "podReplacementPolicy specifies when to create replacement Pods. Possible values are: - TerminatingOrFailed means that we recreate pods\n when they are terminating (has a metadata.deletionTimestamp) or failed.\n- Failed means to wait until a previously created Pod is fully terminated (has phase\n Failed or Succeeded) before creating a replacement Pod.\n\nWhen using podFailurePolicy, Failed is the the only allowed value. TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use.",
"managedBy": "ManagedBy field indicates the controller that manages a Job. The k8s Job controller reconciles jobs which don't have this field at all or the field value is the reserved string `kubernetes.io/job-controller`, but skips reconciling Jobs with a custom value for this field. The value must be a valid domain-prefixed path (e.g. acme.io/foo) - all characters before the first \"/\" must be a valid subdomain as defined by RFC 1123. All characters trailing the first \"/\" must be valid HTTP Path characters as defined by RFC 3986. The value cannot exceed 63 characters. This field is immutable.\n\nThis field is beta-level. The job controller accepts setting the field when the feature gate JobManagedBy is enabled (enabled by default).",
}
@ -206,7 +206,7 @@ func (PodFailurePolicyRule) SwaggerDoc() map[string]string {
var map_SuccessPolicy = map[string]string{
"": "SuccessPolicy describes when a Job can be declared as succeeded based on the success of some indexes.",
"rules": "rules represents the list of alternative rules for the declaring the Jobs as successful before `.status.succeeded >= .spec.completions`. Once any of the rules are met, the \"SucceededCriteriaMet\" condition is added, and the lingering pods are removed. The terminal state for such a Job has the \"Complete\" condition. Additionally, these rules are evaluated in order; Once the Job meets one of the rules, other rules are ignored. At most 20 elements are allowed.",
"rules": "rules represents the list of alternative rules for the declaring the Jobs as successful before `.status.succeeded >= .spec.completions`. Once any of the rules are met, the \"SuccessCriteriaMet\" condition is added, and the lingering pods are removed. The terminal state for such a Job has the \"Complete\" condition. Additionally, these rules are evaluated in order; Once the Job meets one of the rules, other rules are ignored. At most 20 elements are allowed.",
}
func (SuccessPolicy) SwaggerDoc() map[string]string {

View File

@ -39,6 +39,8 @@ option go_package = "k8s.io/api/certificates/v1";
// This API can be used to request client certificates to authenticate to kube-apiserver
// (with the "kubernetes.io/kube-apiserver-client" signerName),
// or to obtain certificates from custom non-Kubernetes signers.
// +k8s:supportsSubresource=/status
// +k8s:supportsSubresource=/approval
message CertificateSigningRequest {
// +optional
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
@ -203,6 +205,11 @@ message CertificateSigningRequestStatus {
// +listType=map
// +listMapKey=type
// +optional
// +k8s:listType=map
// +k8s:listMapKey=type
// +k8s:optional
// +k8s:item(type: "Approved")=+k8s:zeroOrOneOfMember
// +k8s:item(type: "Denied")=+k8s:zeroOrOneOfMember
repeated CertificateSigningRequestCondition conditions = 1;
// certificate is populated with an issued certificate by the signer after an Approved condition is present.

View File

@ -39,6 +39,8 @@ import (
// This API can be used to request client certificates to authenticate to kube-apiserver
// (with the "kubernetes.io/kube-apiserver-client" signerName),
// or to obtain certificates from custom non-Kubernetes signers.
// +k8s:supportsSubresource=/status
// +k8s:supportsSubresource=/approval
type CertificateSigningRequest struct {
metav1.TypeMeta `json:",inline"`
// +optional
@ -178,6 +180,11 @@ type CertificateSigningRequestStatus struct {
// +listType=map
// +listMapKey=type
// +optional
// +k8s:listType=map
// +k8s:listMapKey=type
// +k8s:optional
// +k8s:item(type: "Approved")=+k8s:zeroOrOneOfMember
// +k8s:item(type: "Denied")=+k8s:zeroOrOneOfMember
Conditions []CertificateSigningRequestCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"`
// certificate is populated with an issued certificate by the signer after an Approved condition is present.

File diff suppressed because it is too large Load Diff

View File

@ -101,3 +101,208 @@ message ClusterTrustBundleSpec {
optional string trustBundle = 2;
}
// PodCertificateRequest encodes a pod requesting a certificate from a given
// signer.
//
// Kubelets use this API to implement podCertificate projected volumes
message PodCertificateRequest {
// metadata contains the object metadata.
//
// +optional
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
// spec contains the details about the certificate being requested.
optional PodCertificateRequestSpec spec = 2;
// status contains the issued certificate, and a standard set of conditions.
// +optional
optional PodCertificateRequestStatus status = 3;
}
// PodCertificateRequestList is a collection of PodCertificateRequest objects
message PodCertificateRequestList {
// metadata contains the list metadata.
//
// +optional
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
// items is a collection of PodCertificateRequest objects
repeated PodCertificateRequest items = 2;
}
// PodCertificateRequestSpec describes the certificate request. All fields are
// immutable after creation.
message PodCertificateRequestSpec {
// signerName indicates the requested signer.
//
// All signer names beginning with `kubernetes.io` are reserved for use by
// the Kubernetes project. There is currently one well-known signer
// documented by the Kubernetes project,
// `kubernetes.io/kube-apiserver-client-pod`, which will issue client
// certificates understood by kube-apiserver. It is currently
// unimplemented.
//
// +required
optional string signerName = 1;
// podName is the name of the pod into which the certificate will be mounted.
//
// +required
optional string podName = 2;
// podUID is the UID of the pod into which the certificate will be mounted.
//
// +required
optional string podUID = 3;
// serviceAccountName is the name of the service account the pod is running as.
//
// +required
optional string serviceAccountName = 4;
// serviceAccountUID is the UID of the service account the pod is running as.
//
// +required
optional string serviceAccountUID = 5;
// nodeName is the name of the node the pod is assigned to.
//
// +required
optional string nodeName = 6;
// nodeUID is the UID of the node the pod is assigned to.
//
// +required
optional string nodeUID = 7;
// maxExpirationSeconds is the maximum lifetime permitted for the
// certificate.
//
// If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver
// will reject values shorter than 3600 (1 hour). The maximum allowable
// value is 7862400 (91 days).
//
// The signer implementation is then free to issue a certificate with any
// lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600
// seconds (1 hour). This constraint is enforced by kube-apiserver.
// `kubernetes.io` signers will never issue certificates with a lifetime
// longer than 24 hours.
//
// +optional
// +default=86400
optional int32 maxExpirationSeconds = 8;
// pkixPublicKey is the PKIX-serialized public key the signer will issue the
// certificate to.
//
// The key must be one of RSA3072, RSA4096, ECDSAP256, ECDSAP384, ECDSAP521,
// or ED25519. Note that this list may be expanded in the future.
//
// Signer implementations do not need to support all key types supported by
// kube-apiserver and kubelet. If a signer does not support the key type
// used for a given PodCertificateRequest, it must deny the request by
// setting a status.conditions entry with a type of "Denied" and a reason of
// "UnsupportedKeyType". It may also suggest a key type that it does support
// in the message field.
//
// +required
optional bytes pkixPublicKey = 9;
// proofOfPossession proves that the requesting kubelet holds the private
// key corresponding to pkixPublicKey.
//
// It is contructed by signing the ASCII bytes of the pod's UID using
// `pkixPublicKey`.
//
// kube-apiserver validates the proof of possession during creation of the
// PodCertificateRequest.
//
// If the key is an RSA key, then the signature is over the ASCII bytes of
// the pod UID, using RSASSA-PSS from RFC 8017 (as implemented by the golang
// function crypto/rsa.SignPSS with nil options).
//
// If the key is an ECDSA key, then the signature is as described by [SEC 1,
// Version 2.0](https://www.secg.org/sec1-v2.pdf) (as implemented by the
// golang library function crypto/ecdsa.SignASN1)
//
// If the key is an ED25519 key, the the signature is as described by the
// [ED25519 Specification](https://ed25519.cr.yp.to/) (as implemented by
// the golang library crypto/ed25519.Sign).
//
// +required
optional bytes proofOfPossession = 10;
}
// PodCertificateRequestStatus describes the status of the request, and holds
// the certificate data if the request is issued.
message PodCertificateRequestStatus {
// conditions applied to the request.
//
// The types "Issued", "Denied", and "Failed" have special handling. At
// most one of these conditions may be present, and they must have status
// "True".
//
// If the request is denied with `Reason=UnsupportedKeyType`, the signer may
// suggest a key type that will work in the message field.
//
// +patchMergeKey=type
// +patchStrategy=merge
// +listType=map
// +listMapKey=type
// +optional
repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 1;
// certificateChain is populated with an issued certificate by the signer.
// This field is set via the /status subresource. Once populated, this field
// is immutable.
//
// If the certificate signing request is denied, a condition of type
// "Denied" is added and this field remains empty. If the signer cannot
// issue the certificate, a condition of type "Failed" is added and this
// field remains empty.
//
// Validation requirements:
// 1. certificateChain must consist of one or more PEM-formatted certificates.
// 2. Each entry must be a valid PEM-wrapped, DER-encoded ASN.1 Certificate as
// described in section 4 of RFC5280.
//
// If more than one block is present, and the definition of the requested
// spec.signerName does not indicate otherwise, the first block is the
// issued certificate, and subsequent blocks should be treated as
// intermediate certificates and presented in TLS handshakes. When
// projecting the chain into a pod volume, kubelet will drop any data
// in-between the PEM blocks, as well as any PEM block headers.
//
// +optional
optional string certificateChain = 2;
// notBefore is the time at which the certificate becomes valid. The value
// must be the same as the notBefore value in the leaf certificate in
// certificateChain. This field is set via the /status subresource. Once
// populated, it is immutable. The signer must set this field at the same
// time it sets certificateChain.
//
// +optional
optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time notBefore = 4;
// beginRefreshAt is the time at which the kubelet should begin trying to
// refresh the certificate. This field is set via the /status subresource,
// and must be set at the same time as certificateChain. Once populated,
// this field is immutable.
//
// This field is only a hint. Kubelet may start refreshing before or after
// this time if necessary.
//
// +optional
optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time beginRefreshAt = 5;
// notAfter is the time at which the certificate expires. The value must be
// the same as the notAfter value in the leaf certificate in
// certificateChain. This field is set via the /status subresource. Once
// populated, it is immutable. The signer must set this field at the same
// time it sets certificateChain.
//
// +optional
optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time notAfter = 6;
}

View File

@ -53,6 +53,8 @@ func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&ClusterTrustBundle{},
&ClusterTrustBundleList{},
&PodCertificateRequest{},
&PodCertificateRequestList{},
)
// Add the watch version that applies

View File

@ -18,6 +18,7 @@ package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
)
// +genclient
@ -106,3 +107,233 @@ type ClusterTrustBundleList struct {
// items is a collection of ClusterTrustBundle objects
Items []ClusterTrustBundle `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +genclient
// +k8s:prerelease-lifecycle-gen:introduced=1.34
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodCertificateRequest encodes a pod requesting a certificate from a given
// signer.
//
// Kubelets use this API to implement podCertificate projected volumes
type PodCertificateRequest struct {
metav1.TypeMeta `json:",inline"`
// metadata contains the object metadata.
//
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// spec contains the details about the certificate being requested.
Spec PodCertificateRequestSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
// status contains the issued certificate, and a standard set of conditions.
// +optional
Status PodCertificateRequestStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// PodCertificateRequestSpec describes the certificate request. All fields are
// immutable after creation.
type PodCertificateRequestSpec struct {
// signerName indicates the requested signer.
//
// All signer names beginning with `kubernetes.io` are reserved for use by
// the Kubernetes project. There is currently one well-known signer
// documented by the Kubernetes project,
// `kubernetes.io/kube-apiserver-client-pod`, which will issue client
// certificates understood by kube-apiserver. It is currently
// unimplemented.
//
// +required
SignerName string `json:"signerName" protobuf:"bytes,1,opt,name=signerName"`
// podName is the name of the pod into which the certificate will be mounted.
//
// +required
PodName string `json:"podName" protobuf:"bytes,2,opt,name=podName"`
// podUID is the UID of the pod into which the certificate will be mounted.
//
// +required
PodUID types.UID `json:"podUID" protobuf:"bytes,3,opt,name=podUID"`
// serviceAccountName is the name of the service account the pod is running as.
//
// +required
ServiceAccountName string `json:"serviceAccountName" protobuf:"bytes,4,opt,name=serviceAccountName"`
// serviceAccountUID is the UID of the service account the pod is running as.
//
// +required
ServiceAccountUID types.UID `json:"serviceAccountUID" protobuf:"bytes,5,opt,name=serviceAccountUID"`
// nodeName is the name of the node the pod is assigned to.
//
// +required
NodeName types.NodeName `json:"nodeName" protobuf:"bytes,6,opt,name=nodeName"`
// nodeUID is the UID of the node the pod is assigned to.
//
// +required
NodeUID types.UID `json:"nodeUID" protobuf:"bytes,7,opt,name=nodeUID"`
// maxExpirationSeconds is the maximum lifetime permitted for the
// certificate.
//
// If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver
// will reject values shorter than 3600 (1 hour). The maximum allowable
// value is 7862400 (91 days).
//
// The signer implementation is then free to issue a certificate with any
// lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600
// seconds (1 hour). This constraint is enforced by kube-apiserver.
// `kubernetes.io` signers will never issue certificates with a lifetime
// longer than 24 hours.
//
// +optional
// +default=86400
MaxExpirationSeconds *int32 `json:"maxExpirationSeconds,omitempty" protobuf:"varint,8,opt,name=maxExpirationSeconds"`
// pkixPublicKey is the PKIX-serialized public key the signer will issue the
// certificate to.
//
// The key must be one of RSA3072, RSA4096, ECDSAP256, ECDSAP384, ECDSAP521,
// or ED25519. Note that this list may be expanded in the future.
//
// Signer implementations do not need to support all key types supported by
// kube-apiserver and kubelet. If a signer does not support the key type
// used for a given PodCertificateRequest, it must deny the request by
// setting a status.conditions entry with a type of "Denied" and a reason of
// "UnsupportedKeyType". It may also suggest a key type that it does support
// in the message field.
//
// +required
PKIXPublicKey []byte `json:"pkixPublicKey" protobuf:"bytes,9,opt,name=pkixPublicKey"`
// proofOfPossession proves that the requesting kubelet holds the private
// key corresponding to pkixPublicKey.
//
// It is contructed by signing the ASCII bytes of the pod's UID using
// `pkixPublicKey`.
//
// kube-apiserver validates the proof of possession during creation of the
// PodCertificateRequest.
//
// If the key is an RSA key, then the signature is over the ASCII bytes of
// the pod UID, using RSASSA-PSS from RFC 8017 (as implemented by the golang
// function crypto/rsa.SignPSS with nil options).
//
// If the key is an ECDSA key, then the signature is as described by [SEC 1,
// Version 2.0](https://www.secg.org/sec1-v2.pdf) (as implemented by the
// golang library function crypto/ecdsa.SignASN1)
//
// If the key is an ED25519 key, the the signature is as described by the
// [ED25519 Specification](https://ed25519.cr.yp.to/) (as implemented by
// the golang library crypto/ed25519.Sign).
//
// +required
ProofOfPossession []byte `json:"proofOfPossession" protobuf:"bytes,10,opt,name=proofOfPossession"`
}
// PodCertificateRequestStatus describes the status of the request, and holds
// the certificate data if the request is issued.
type PodCertificateRequestStatus struct {
// conditions applied to the request.
//
// The types "Issued", "Denied", and "Failed" have special handling. At
// most one of these conditions may be present, and they must have status
// "True".
//
// If the request is denied with `Reason=UnsupportedKeyType`, the signer may
// suggest a key type that will work in the message field.
//
// +patchMergeKey=type
// +patchStrategy=merge
// +listType=map
// +listMapKey=type
// +optional
Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`
// certificateChain is populated with an issued certificate by the signer.
// This field is set via the /status subresource. Once populated, this field
// is immutable.
//
// If the certificate signing request is denied, a condition of type
// "Denied" is added and this field remains empty. If the signer cannot
// issue the certificate, a condition of type "Failed" is added and this
// field remains empty.
//
// Validation requirements:
// 1. certificateChain must consist of one or more PEM-formatted certificates.
// 2. Each entry must be a valid PEM-wrapped, DER-encoded ASN.1 Certificate as
// described in section 4 of RFC5280.
//
// If more than one block is present, and the definition of the requested
// spec.signerName does not indicate otherwise, the first block is the
// issued certificate, and subsequent blocks should be treated as
// intermediate certificates and presented in TLS handshakes. When
// projecting the chain into a pod volume, kubelet will drop any data
// in-between the PEM blocks, as well as any PEM block headers.
//
// +optional
CertificateChain string `json:"certificateChain,omitempty" protobuf:"bytes,2,opt,name=certificateChain"`
// notBefore is the time at which the certificate becomes valid. The value
// must be the same as the notBefore value in the leaf certificate in
// certificateChain. This field is set via the /status subresource. Once
// populated, it is immutable. The signer must set this field at the same
// time it sets certificateChain.
//
// +optional
NotBefore *metav1.Time `json:"notBefore,omitempty" protobuf:"bytes,4,opt,name=notBefore"`
// beginRefreshAt is the time at which the kubelet should begin trying to
// refresh the certificate. This field is set via the /status subresource,
// and must be set at the same time as certificateChain. Once populated,
// this field is immutable.
//
// This field is only a hint. Kubelet may start refreshing before or after
// this time if necessary.
//
// +optional
BeginRefreshAt *metav1.Time `json:"beginRefreshAt,omitempty" protobuf:"bytes,5,opt,name=beginRefreshAt"`
// notAfter is the time at which the certificate expires. The value must be
// the same as the notAfter value in the leaf certificate in
// certificateChain. This field is set via the /status subresource. Once
// populated, it is immutable. The signer must set this field at the same
// time it sets certificateChain.
//
// +optional
NotAfter *metav1.Time `json:"notAfter,omitempty" protobuf:"bytes,6,opt,name=notAfter"`
}
// Well-known condition types for PodCertificateRequests
const (
// Denied indicates the request was denied by the signer.
PodCertificateRequestConditionTypeDenied string = "Denied"
// Failed indicates the signer failed to issue the certificate.
PodCertificateRequestConditionTypeFailed string = "Failed"
// Issued indicates the certificate has been issued.
PodCertificateRequestConditionTypeIssued string = "Issued"
)
// Well-known condition reasons for PodCertificateRequests
const (
// UnsupportedKeyType should be set on "Denied" conditions when the signer
// doesn't support the key type of publicKey.
PodCertificateRequestConditionUnsupportedKeyType string = "UnsupportedKeyType"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:prerelease-lifecycle-gen:introduced=1.34
// PodCertificateRequestList is a collection of PodCertificateRequest objects
type PodCertificateRequestList struct {
metav1.TypeMeta `json:",inline"`
// metadata contains the list metadata.
//
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// items is a collection of PodCertificateRequest objects
Items []PodCertificateRequest `json:"items" protobuf:"bytes,2,rep,name=items"`
}

View File

@ -57,4 +57,56 @@ func (ClusterTrustBundleSpec) SwaggerDoc() map[string]string {
return map_ClusterTrustBundleSpec
}
var map_PodCertificateRequest = map[string]string{
"": "PodCertificateRequest encodes a pod requesting a certificate from a given signer.\n\nKubelets use this API to implement podCertificate projected volumes",
"metadata": "metadata contains the object metadata.",
"spec": "spec contains the details about the certificate being requested.",
"status": "status contains the issued certificate, and a standard set of conditions.",
}
func (PodCertificateRequest) SwaggerDoc() map[string]string {
return map_PodCertificateRequest
}
var map_PodCertificateRequestList = map[string]string{
"": "PodCertificateRequestList is a collection of PodCertificateRequest objects",
"metadata": "metadata contains the list metadata.",
"items": "items is a collection of PodCertificateRequest objects",
}
func (PodCertificateRequestList) SwaggerDoc() map[string]string {
return map_PodCertificateRequestList
}
var map_PodCertificateRequestSpec = map[string]string{
"": "PodCertificateRequestSpec describes the certificate request. All fields are immutable after creation.",
"signerName": "signerName indicates the requested signer.\n\nAll signer names beginning with `kubernetes.io` are reserved for use by the Kubernetes project. There is currently one well-known signer documented by the Kubernetes project, `kubernetes.io/kube-apiserver-client-pod`, which will issue client certificates understood by kube-apiserver. It is currently unimplemented.",
"podName": "podName is the name of the pod into which the certificate will be mounted.",
"podUID": "podUID is the UID of the pod into which the certificate will be mounted.",
"serviceAccountName": "serviceAccountName is the name of the service account the pod is running as.",
"serviceAccountUID": "serviceAccountUID is the UID of the service account the pod is running as.",
"nodeName": "nodeName is the name of the node the pod is assigned to.",
"nodeUID": "nodeUID is the UID of the node the pod is assigned to.",
"maxExpirationSeconds": "maxExpirationSeconds is the maximum lifetime permitted for the certificate.\n\nIf omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver will reject values shorter than 3600 (1 hour). The maximum allowable value is 7862400 (91 days).\n\nThe signer implementation is then free to issue a certificate with any lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 seconds (1 hour). This constraint is enforced by kube-apiserver. `kubernetes.io` signers will never issue certificates with a lifetime longer than 24 hours.",
"pkixPublicKey": "pkixPublicKey is the PKIX-serialized public key the signer will issue the certificate to.\n\nThe key must be one of RSA3072, RSA4096, ECDSAP256, ECDSAP384, ECDSAP521, or ED25519. Note that this list may be expanded in the future.\n\nSigner implementations do not need to support all key types supported by kube-apiserver and kubelet. If a signer does not support the key type used for a given PodCertificateRequest, it must deny the request by setting a status.conditions entry with a type of \"Denied\" and a reason of \"UnsupportedKeyType\". It may also suggest a key type that it does support in the message field.",
"proofOfPossession": "proofOfPossession proves that the requesting kubelet holds the private key corresponding to pkixPublicKey.\n\nIt is contructed by signing the ASCII bytes of the pod's UID using `pkixPublicKey`.\n\nkube-apiserver validates the proof of possession during creation of the PodCertificateRequest.\n\nIf the key is an RSA key, then the signature is over the ASCII bytes of the pod UID, using RSASSA-PSS from RFC 8017 (as implemented by the golang function crypto/rsa.SignPSS with nil options).\n\nIf the key is an ECDSA key, then the signature is as described by [SEC 1, Version 2.0](https://www.secg.org/sec1-v2.pdf) (as implemented by the golang library function crypto/ecdsa.SignASN1)\n\nIf the key is an ED25519 key, the the signature is as described by the [ED25519 Specification](https://ed25519.cr.yp.to/) (as implemented by the golang library crypto/ed25519.Sign).",
}
func (PodCertificateRequestSpec) SwaggerDoc() map[string]string {
return map_PodCertificateRequestSpec
}
var map_PodCertificateRequestStatus = map[string]string{
"": "PodCertificateRequestStatus describes the status of the request, and holds the certificate data if the request is issued.",
"conditions": "conditions applied to the request.\n\nThe types \"Issued\", \"Denied\", and \"Failed\" have special handling. At most one of these conditions may be present, and they must have status \"True\".\n\nIf the request is denied with `Reason=UnsupportedKeyType`, the signer may suggest a key type that will work in the message field.",
"certificateChain": "certificateChain is populated with an issued certificate by the signer. This field is set via the /status subresource. Once populated, this field is immutable.\n\nIf the certificate signing request is denied, a condition of type \"Denied\" is added and this field remains empty. If the signer cannot issue the certificate, a condition of type \"Failed\" is added and this field remains empty.\n\nValidation requirements:\n 1. certificateChain must consist of one or more PEM-formatted certificates.\n 2. Each entry must be a valid PEM-wrapped, DER-encoded ASN.1 Certificate as\n described in section 4 of RFC5280.\n\nIf more than one block is present, and the definition of the requested spec.signerName does not indicate otherwise, the first block is the issued certificate, and subsequent blocks should be treated as intermediate certificates and presented in TLS handshakes. When projecting the chain into a pod volume, kubelet will drop any data in-between the PEM blocks, as well as any PEM block headers.",
"notBefore": "notBefore is the time at which the certificate becomes valid. The value must be the same as the notBefore value in the leaf certificate in certificateChain. This field is set via the /status subresource. Once populated, it is immutable. The signer must set this field at the same time it sets certificateChain.",
"beginRefreshAt": "beginRefreshAt is the time at which the kubelet should begin trying to refresh the certificate. This field is set via the /status subresource, and must be set at the same time as certificateChain. Once populated, this field is immutable.\n\nThis field is only a hint. Kubelet may start refreshing before or after this time if necessary.",
"notAfter": "notAfter is the time at which the certificate expires. The value must be the same as the notAfter value in the leaf certificate in certificateChain. This field is set via the /status subresource. Once populated, it is immutable. The signer must set this field at the same time it sets certificateChain.",
}
func (PodCertificateRequestStatus) SwaggerDoc() map[string]string {
return map_PodCertificateRequestStatus
}
// AUTO-GENERATED FUNCTIONS END HERE

View File

@ -22,6 +22,7 @@ limitations under the License.
package v1alpha1
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
@ -100,3 +101,130 @@ func (in *ClusterTrustBundleSpec) DeepCopy() *ClusterTrustBundleSpec {
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodCertificateRequest) DeepCopyInto(out *PodCertificateRequest) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodCertificateRequest.
func (in *PodCertificateRequest) DeepCopy() *PodCertificateRequest {
if in == nil {
return nil
}
out := new(PodCertificateRequest)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PodCertificateRequest) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodCertificateRequestList) DeepCopyInto(out *PodCertificateRequestList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]PodCertificateRequest, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodCertificateRequestList.
func (in *PodCertificateRequestList) DeepCopy() *PodCertificateRequestList {
if in == nil {
return nil
}
out := new(PodCertificateRequestList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PodCertificateRequestList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodCertificateRequestSpec) DeepCopyInto(out *PodCertificateRequestSpec) {
*out = *in
if in.MaxExpirationSeconds != nil {
in, out := &in.MaxExpirationSeconds, &out.MaxExpirationSeconds
*out = new(int32)
**out = **in
}
if in.PKIXPublicKey != nil {
in, out := &in.PKIXPublicKey, &out.PKIXPublicKey
*out = make([]byte, len(*in))
copy(*out, *in)
}
if in.ProofOfPossession != nil {
in, out := &in.ProofOfPossession, &out.ProofOfPossession
*out = make([]byte, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodCertificateRequestSpec.
func (in *PodCertificateRequestSpec) DeepCopy() *PodCertificateRequestSpec {
if in == nil {
return nil
}
out := new(PodCertificateRequestSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodCertificateRequestStatus) DeepCopyInto(out *PodCertificateRequestStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]v1.Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.NotBefore != nil {
in, out := &in.NotBefore, &out.NotBefore
*out = (*in).DeepCopy()
}
if in.BeginRefreshAt != nil {
in, out := &in.BeginRefreshAt, &out.BeginRefreshAt
*out = (*in).DeepCopy()
}
if in.NotAfter != nil {
in, out := &in.NotAfter, &out.NotAfter
*out = (*in).DeepCopy()
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodCertificateRequestStatus.
func (in *PodCertificateRequestStatus) DeepCopy() *PodCertificateRequestStatus {
if in == nil {
return nil
}
out := new(PodCertificateRequestStatus)
in.DeepCopyInto(out)
return out
}

View File

@ -56,3 +56,39 @@ func (in *ClusterTrustBundleList) APILifecycleDeprecated() (major, minor int) {
func (in *ClusterTrustBundleList) APILifecycleRemoved() (major, minor int) {
return 1, 37
}
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
func (in *PodCertificateRequest) APILifecycleIntroduced() (major, minor int) {
return 1, 34
}
// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
func (in *PodCertificateRequest) APILifecycleDeprecated() (major, minor int) {
return 1, 37
}
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
func (in *PodCertificateRequest) APILifecycleRemoved() (major, minor int) {
return 1, 40
}
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
func (in *PodCertificateRequestList) APILifecycleIntroduced() (major, minor int) {
return 1, 34
}
// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
func (in *PodCertificateRequestList) APILifecycleDeprecated() (major, minor int) {
return 1, 37
}
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
func (in *PodCertificateRequestList) APILifecycleRemoved() (major, minor int) {
return 1, 40
}

View File

@ -30,6 +30,8 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
option go_package = "k8s.io/api/certificates/v1beta1";
// Describes a certificate signing request
// +k8s:supportsSubresource=/status
// +k8s:supportsSubresource=/approval
message CertificateSigningRequest {
// +optional
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
@ -182,6 +184,11 @@ message CertificateSigningRequestStatus {
// +listType=map
// +listMapKey=type
// +optional
// +k8s:listType=map
// +k8s:listMapKey=type
// +k8s:optional
// +k8s:item(type: "Approved")=+k8s:zeroOrOneOfMember
// +k8s:item(type: "Denied")=+k8s:zeroOrOneOfMember
repeated CertificateSigningRequestCondition conditions = 1;
// If request was approved, the controller will place the issued certificate here.

View File

@ -31,6 +31,8 @@ import (
// +k8s:prerelease-lifecycle-gen:replacement=certificates.k8s.io,v1,CertificateSigningRequest
// Describes a certificate signing request
// +k8s:supportsSubresource=/status
// +k8s:supportsSubresource=/approval
type CertificateSigningRequest struct {
metav1.TypeMeta `json:",inline"`
// +optional
@ -175,6 +177,11 @@ type CertificateSigningRequestStatus struct {
// +listType=map
// +listMapKey=type
// +optional
// +k8s:listType=map
// +k8s:listMapKey=type
// +k8s:optional
// +k8s:item(type: "Approved")=+k8s:zeroOrOneOfMember
// +k8s:item(type: "Denied")=+k8s:zeroOrOneOfMember
Conditions []CertificateSigningRequestCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"`
// If request was approved, the controller will place the issued certificate here.

File diff suppressed because it is too large Load Diff

View File

@ -737,8 +737,8 @@ message Container {
repeated ContainerPort ports = 6;
// List of sources to populate environment variables in the container.
// The keys defined within a source must be a C_IDENTIFIER. All invalid keys
// will be reported as an event when the container is starting. When a key exists in multiple
// The keys defined within a source may consist of any printable ASCII characters except '='.
// When a key exists in multiple
// sources, the value associated with the last source will take precedence.
// Values defined by an Env with a duplicate key will take precedence.
// Cannot be updated.
@ -768,10 +768,10 @@ message Container {
repeated ContainerResizePolicy resizePolicy = 23;
// RestartPolicy defines the restart behavior of individual containers in a pod.
// This field may only be set for init containers, and the only allowed value is "Always".
// For non-init containers or when this field is not specified,
// This overrides the pod-level restart policy. When this field is not specified,
// the restart behavior is defined by the Pod's restart policy and the container type.
// Setting the RestartPolicy as "Always" for the init container will have the following effect:
// Additionally, setting the RestartPolicy as "Always" for the init container will
// have the following effect:
// this init container will be continually restarted on
// exit until all regular containers have terminated. Once all regular
// containers have completed, all init containers with restartPolicy "Always"
@ -786,6 +786,22 @@ message Container {
// +optional
optional string restartPolicy = 24;
// Represents a list of rules to be checked to determine if the
// container should be restarted on exit. The rules are evaluated in
// order. Once a rule matches a container exit condition, the remaining
// rules are ignored. If no rule matches the container exit condition,
// the Container-level restart policy determines the whether the container
// is restarted or not. Constraints on the rules:
// - At most 20 rules are allowed.
// - Rules can have the same action.
// - Identical rules are not forbidden in validations.
// When rules are specified, container MUST set RestartPolicy explicitly
// even it if matches the Pod's RestartPolicy.
// +featureGate=ContainerRestartRules
// +optional
// +listType=atomic
repeated ContainerRestartRule restartPolicyRules = 25;
// Pod volumes to mount into the container's filesystem.
// Cannot be updated.
// +optional
@ -888,6 +904,19 @@ message Container {
optional bool tty = 18;
}
// ContainerExtendedResourceRequest has the mapping of container name,
// extended resource name to the device request name.
message ContainerExtendedResourceRequest {
// The name of the container requesting resources.
optional string containerName = 1;
// The name of the extended resource in that container which gets backed by DRA.
optional string resourceName = 2;
// The name of the request in the special ResourceClaim which corresponds to the extended resource.
optional string requestName = 3;
}
// Describe a container image
message ContainerImage {
// Names by which this image is known.
@ -942,6 +971,39 @@ message ContainerResizePolicy {
optional string restartPolicy = 2;
}
// ContainerRestartRule describes how a container exit is handled.
message ContainerRestartRule {
// Specifies the action taken on a container exit if the requirements
// are satisfied. The only possible value is "Restart" to restart the
// container.
// +required
optional string action = 1;
// Represents the exit codes to check on container exits.
// +optional
// +oneOf=when
optional ContainerRestartRuleOnExitCodes exitCodes = 2;
}
// ContainerRestartRuleOnExitCodes describes the condition
// for handling an exited container based on its exit codes.
message ContainerRestartRuleOnExitCodes {
// Represents the relationship between the container exit code(s) and the
// specified values. Possible values are:
// - In: the requirement is satisfied if the container exit code is in the
// set of specified values.
// - NotIn: the requirement is satisfied if the container exit code is
// not in the set of specified values.
// +required
optional string operator = 1;
// Specifies the set of values to check for container exit codes.
// At most 255 elements are allowed.
// +optional
// +listType=set
repeated int32 values = 2;
}
// ContainerState holds a possible state of container.
// Only one of its members may be specified.
// If none of them is specified, the default one is ContainerStateWaiting.
@ -1344,7 +1406,8 @@ message EndpointsList {
// EnvFromSource represents the source of a set of ConfigMaps or Secrets
message EnvFromSource {
// Optional text to prepend to the name of each environment variable. Must be a C_IDENTIFIER.
// Optional text to prepend to the name of each environment variable.
// May consist of any printable ASCII characters except '='.
// +optional
optional string prefix = 1;
@ -1359,7 +1422,8 @@ message EnvFromSource {
// EnvVar represents an environment variable present in a Container.
message EnvVar {
// Name of the environment variable. Must be a C_IDENTIFIER.
// Name of the environment variable.
// May consist of any printable ASCII characters except '='.
optional string name = 1;
// Variable references $(VAR_NAME) are expanded
@ -1398,6 +1462,13 @@ message EnvVarSource {
// Selects a key of a secret in the pod's namespace
// +optional
optional SecretKeySelector secretKeyRef = 4;
// FileKeyRef selects a key of the env file.
// Requires the EnvFiles feature gate to be enabled.
//
// +featureGate=EnvFiles
// +optional
optional FileKeySelector fileKeyRef = 5;
}
// An EphemeralContainer is a temporary container that you may add to an existing Pod for
@ -1479,8 +1550,8 @@ message EphemeralContainerCommon {
repeated ContainerPort ports = 6;
// List of sources to populate environment variables in the container.
// The keys defined within a source must be a C_IDENTIFIER. All invalid keys
// will be reported as an event when the container is starting. When a key exists in multiple
// The keys defined within a source may consist of any printable ASCII characters except '='.
// When a key exists in multiple
// sources, the value associated with the last source will take precedence.
// Values defined by an Env with a duplicate key will take precedence.
// Cannot be updated.
@ -1510,12 +1581,19 @@ message EphemeralContainerCommon {
// Restart policy for the container to manage the restart behavior of each
// container within a pod.
// This may only be set for init containers. You cannot set this field on
// ephemeral containers.
// You cannot set this field on ephemeral containers.
// +featureGate=SidecarContainers
// +optional
optional string restartPolicy = 24;
// Represents a list of rules to be checked to determine if the
// container should be restarted on exit. You cannot set this field on
// ephemeral containers.
// +featureGate=ContainerRestartRules
// +optional
// +listType=atomic
repeated ContainerRestartRule restartPolicyRules = 25;
// Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers.
// Cannot be updated.
// +optional
@ -1776,6 +1854,36 @@ message FCVolumeSource {
repeated string wwids = 5;
}
// FileKeySelector selects a key of the env file.
// +structType=atomic
message FileKeySelector {
// The name of the volume mount containing the env file.
// +required
optional string volumeName = 1;
// The path within the volume from which to select the file.
// Must be relative and may not contain the '..' path or start with '..'.
// +required
optional string path = 2;
// The key within the env file. An invalid key will prevent the pod from starting.
// The keys defined within a source may consist of any printable ASCII characters except '='.
// During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters.
// +required
optional string key = 3;
// Specify whether the file or its key must be defined. If the file or key
// does not exist, then the env var is not published.
// If optional is set to true and the specified key does not exist,
// the environment variable will not be set in the Pod's containers.
//
// If optional is set to false and the specified key does not exist,
// an error will be returned during Pod creation.
// +optional
// +default=false
optional bool optional = 4;
}
// FlexPersistentVolumeSource represents a generic persistent volume resource that is
// provisioned/attached using an exec based plugin.
message FlexPersistentVolumeSource {
@ -1949,7 +2057,6 @@ message GlusterfsPersistentVolumeSource {
// Glusterfs volumes do not support ownership management or SELinux relabeling.
message GlusterfsVolumeSource {
// endpoints is the endpoint name that details Glusterfs topology.
// More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
optional string endpoints = 1;
// path is the Glusterfs volume path.
@ -3160,15 +3267,13 @@ message PersistentVolumeClaimSpec {
// volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim.
// If specified, the CSI driver will create or update the volume with the attributes defined
// in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName,
// it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass
// will be applied to the claim but it's not allowed to reset this field to empty string once it is set.
// If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass
// will be set by the persistentvolume controller if it exists.
// it can be changed after the claim is created. An empty string or nil value indicates that no
// VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state,
// this field can be reset to its previous value (including nil) to cancel the modification.
// If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be
// set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
// exists.
// More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
// (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).
// +featureGate=VolumeAttributesClass
// +optional
optional string volumeAttributesClassName = 9;
@ -3267,14 +3372,12 @@ message PersistentVolumeClaimStatus {
// currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using.
// When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim
// This is a beta field and requires enabling VolumeAttributesClass feature (off by default).
// +featureGate=VolumeAttributesClass
// +optional
optional string currentVolumeAttributesClassName = 8;
// ModifyVolumeStatus represents the status object of ControllerModifyVolume operation.
// When this is unset, there is no ModifyVolume operation being attempted.
// This is a beta field and requires enabling VolumeAttributesClass feature (off by default).
// +featureGate=VolumeAttributesClass
// +optional
optional ModifyVolumeStatus modifyVolumeStatus = 9;
@ -3515,7 +3618,6 @@ message PersistentVolumeSpec {
// after a volume has been updated successfully to a new class.
// For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound
// PersistentVolumeClaims during the binding process.
// This is a beta field and requires enabling VolumeAttributesClass feature (off by default).
// +featureGate=VolumeAttributesClass
// +optional
optional string volumeAttributesClassName = 10;
@ -3684,8 +3786,8 @@ message PodAntiAffinity {
// most preferred is the one with the greatest sum of weights, i.e.
// for each node that meets all of the scheduling requirements (resource
// request, requiredDuringScheduling anti-affinity expressions, etc.),
// compute a sum by iterating through the elements of this field and adding
// "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
// compute a sum by iterating through the elements of this field and subtracting
// "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the
// node(s) with the highest sum are the most preferred.
// +optional
// +listType=atomic
@ -3725,6 +3827,79 @@ message PodAttachOptions {
optional string container = 5;
}
// PodCertificateProjection provides a private key and X.509 certificate in the
// pod filesystem.
message PodCertificateProjection {
// Kubelet's generated CSRs will be addressed to this signer.
//
// +required
optional string signerName = 1;
// The type of keypair Kubelet will generate for the pod.
//
// Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384",
// "ECDSAP521", and "ED25519".
//
// +required
optional string keyType = 2;
// maxExpirationSeconds is the maximum lifetime permitted for the
// certificate.
//
// Kubelet copies this value verbatim into the PodCertificateRequests it
// generates for this projection.
//
// If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver
// will reject values shorter than 3600 (1 hour). The maximum allowable
// value is 7862400 (91 days).
//
// The signer implementation is then free to issue a certificate with any
// lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600
// seconds (1 hour). This constraint is enforced by kube-apiserver.
// `kubernetes.io` signers will never issue certificates with a lifetime
// longer than 24 hours.
//
// +optional
optional int32 maxExpirationSeconds = 3;
// Write the credential bundle at this path in the projected volume.
//
// The credential bundle is a single file that contains multiple PEM blocks.
// The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private
// key.
//
// The remaining blocks are CERTIFICATE blocks, containing the issued
// certificate chain from the signer (leaf and any intermediates).
//
// Using credentialBundlePath lets your Pod's application code make a single
// atomic read that retrieves a consistent key and certificate chain. If you
// project them to separate files, your application code will need to
// additionally check that the leaf certificate was issued to the key.
//
// +optional
optional string credentialBundlePath = 4;
// Write the key at this path in the projected volume.
//
// Most applications should use credentialBundlePath. When using keyPath
// and certificateChainPath, your application needs to check that the key
// and leaf certificate are consistent, because it is possible to read the
// files mid-rotation.
//
// +optional
optional string keyPath = 5;
// Write the certificate chain at this path in the projected volume.
//
// Most applications should use credentialBundlePath. When using keyPath
// and certificateChainPath, your application needs to check that the key
// and leaf certificate are consistent, because it is possible to read the
// files mid-rotation.
//
// +optional
optional string certificateChainPath = 6;
}
// PodCondition contains details for the current condition of this pod.
message PodCondition {
// Type is the type of the condition.
@ -3829,6 +4004,20 @@ message PodExecOptions {
repeated string command = 6;
}
// PodExtendedResourceClaimStatus is stored in the PodStatus for the extended
// resource requests backed by DRA. It stores the generated name for
// the corresponding special ResourceClaim created by the scheduler.
message PodExtendedResourceClaimStatus {
// RequestMappings identifies the mapping of <container, extended resource backed by DRA> to device request
// in the generated ResourceClaim.
// +listType=atomic
repeated ContainerExtendedResourceRequest requestMappings = 1;
// ResourceClaimName is the name of the ResourceClaim that was
// generated for the Pod in the namespace of the Pod.
optional string resourceClaimName = 2;
}
// PodIP represents a single IP address allocated to the pod.
message PodIP {
// IP is the IP address assigned to the pod
@ -4269,7 +4458,9 @@ message PodSpec {
optional string nodeName = 10;
// Host networking requested for this pod. Use the host's network namespace.
// If this option is set, the ports that will be used must be specified.
// When using HostNetwork you should specify ports so the scheduler is aware.
// When `hostNetwork` is true, specified `hostPort` fields in port definitions must match `containerPort`,
// and unspecified `hostPort` fields in port definitions are defaulted to match `containerPort`.
// Default to false.
// +k8s:conversion-gen=false
// +optional
@ -4434,6 +4625,7 @@ message PodSpec {
// - spec.hostPID
// - spec.hostIPC
// - spec.hostUsers
// - spec.resources
// - spec.securityContext.appArmorProfile
// - spec.securityContext.seLinuxOptions
// - spec.securityContext.seccompProfile
@ -4504,7 +4696,7 @@ message PodSpec {
// Resources is the total amount of CPU and Memory resources required by all
// containers in the pod. It supports specifying Requests and Limits for
// "cpu" and "memory" resource names only. ResourceClaims are not supported.
// "cpu", "memory" and "hugepages-" resource names only. ResourceClaims are not supported.
//
// This field enables fine-grained control over resource allocation for the
// entire pod, allowing resource sharing among containers in a pod.
@ -4516,6 +4708,21 @@ message PodSpec {
// +featureGate=PodLevelResources
// +optional
optional ResourceRequirements resources = 40;
// HostnameOverride specifies an explicit override for the pod's hostname as perceived by the pod.
// This field only specifies the pod's hostname and does not affect its DNS records.
// When this field is set to a non-empty string:
// - It takes precedence over the values set in `hostname` and `subdomain`.
// - The Pod's hostname will be set to this value.
// - `setHostnameAsFQDN` must be nil or set to false.
// - `hostNetwork` must be set to false.
//
// This field must be a valid DNS subdomain as defined in RFC 1123 and contain at most 64 characters.
// Requires the HostnameOverride feature gate to be enabled.
//
// +featureGate=HostnameOverride
// +optional
optional string hostnameOverride = 41;
}
// PodStatus represents information about the status of a pod. Status may trail the actual
@ -4674,6 +4881,11 @@ message PodStatus {
// +featureGate=DynamicResourceAllocation
// +optional
repeated PodResourceClaimStatus resourceClaimStatuses = 15;
// Status of extended resource claim backed by DRA.
// +featureGate=DRAExtendedResource
// +optional
optional PodExtendedResourceClaimStatus extendedResourceClaimStatus = 18;
}
// PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded
@ -5298,7 +5510,7 @@ message ResourceRequirements {
// Claims lists the names of resources, defined in spec.resourceClaims,
// that are used by this container.
//
// This is an alpha field and requires enabling the
// This field depends on the
// DynamicResourceAllocation feature gate.
//
// This field is immutable. It can only be set for containers.
@ -6301,7 +6513,6 @@ message Taint {
optional string effect = 3;
// TimeAdded represents the time at which the taint was added.
// It is only written for NoExecute taints.
// +optional
optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time timeAdded = 4;
}
@ -6682,6 +6893,44 @@ message VolumeProjection {
// +featureGate=ClusterTrustBundleProjection
// +optional
optional ClusterTrustBundleProjection clusterTrustBundle = 5;
// Projects an auto-rotating credential bundle (private key and certificate
// chain) that the pod can use either as a TLS client or server.
//
// Kubelet generates a private key and uses it to send a
// PodCertificateRequest to the named signer. Once the signer approves the
// request and issues a certificate chain, Kubelet writes the key and
// certificate chain to the pod filesystem. The pod does not start until
// certificates have been issued for each podCertificate projected volume
// source in its spec.
//
// Kubelet will begin trying to rotate the certificate at the time indicated
// by the signer using the PodCertificateRequest.Status.BeginRefreshAt
// timestamp.
//
// Kubelet can write a single file, indicated by the credentialBundlePath
// field, or separate files, indicated by the keyPath and
// certificateChainPath fields.
//
// The credential bundle is a single file in PEM format. The first PEM
// entry is the private key (in PKCS#8 format), and the remaining PEM
// entries are the certificate chain issued by the signer (typically,
// signers will return their certificate chain in leaf-to-root order).
//
// Prefer using the credential bundle format, since your application code
// can read it atomically. If you use keyPath and certificateChainPath,
// your application must make two separate file reads. If these coincide
// with a certificate rotation, it is possible that the private key and leaf
// certificate you read may not correspond to each other. Your application
// will need to check for this condition, and re-read until they are
// consistent.
//
// The named signer controls chooses the format of the certificate it
// issues; consult the signer implementation's documentation to learn how to
// use the certificates it issues.
//
// +featureGate=PodCertificateProjection +optional
optional PodCertificateProjection podCertificate = 6;
}
// VolumeResourceRequirements describes the storage resource requirements for a volume.
@ -6753,13 +7002,12 @@ message VolumeSource {
// iscsi represents an ISCSI Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod.
// More info: https://examples.k8s.io/volumes/iscsi/README.md
// More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi
// +optional
optional ISCSIVolumeSource iscsi = 8;
// glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.
// Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported.
// More info: https://examples.k8s.io/volumes/glusterfs/README.md
// +optional
optional GlusterfsVolumeSource glusterfs = 9;
@ -6771,7 +7019,6 @@ message VolumeSource {
// rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.
// Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported.
// More info: https://examples.k8s.io/volumes/rbd/README.md
// +optional
optional RBDVolumeSource rbd = 11;

320
vendor/k8s.io/api/core/v1/types.go generated vendored
View File

@ -91,12 +91,11 @@ type VolumeSource struct {
NFS *NFSVolumeSource `json:"nfs,omitempty" protobuf:"bytes,7,opt,name=nfs"`
// iscsi represents an ISCSI Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod.
// More info: https://examples.k8s.io/volumes/iscsi/README.md
// More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi
// +optional
ISCSI *ISCSIVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,8,opt,name=iscsi"`
// glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.
// Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported.
// More info: https://examples.k8s.io/volumes/glusterfs/README.md
// +optional
Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,9,opt,name=glusterfs"`
// persistentVolumeClaimVolumeSource represents a reference to a
@ -106,7 +105,6 @@ type VolumeSource struct {
PersistentVolumeClaim *PersistentVolumeClaimVolumeSource `json:"persistentVolumeClaim,omitempty" protobuf:"bytes,10,opt,name=persistentVolumeClaim"`
// rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.
// Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported.
// More info: https://examples.k8s.io/volumes/rbd/README.md
// +optional
RBD *RBDVolumeSource `json:"rbd,omitempty" protobuf:"bytes,11,opt,name=rbd"`
// flexVolume represents a generic volume resource that is
@ -437,7 +435,6 @@ type PersistentVolumeSpec struct {
// after a volume has been updated successfully to a new class.
// For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound
// PersistentVolumeClaims during the binding process.
// This is a beta field and requires enabling VolumeAttributesClass feature (off by default).
// +featureGate=VolumeAttributesClass
// +optional
VolumeAttributesClassName *string `json:"volumeAttributesClassName,omitempty" protobuf:"bytes,10,opt,name=volumeAttributesClassName"`
@ -616,15 +613,13 @@ type PersistentVolumeClaimSpec struct {
// volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim.
// If specified, the CSI driver will create or update the volume with the attributes defined
// in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName,
// it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass
// will be applied to the claim but it's not allowed to reset this field to empty string once it is set.
// If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass
// will be set by the persistentvolume controller if it exists.
// it can be changed after the claim is created. An empty string or nil value indicates that no
// VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state,
// this field can be reset to its previous value (including nil) to cancel the modification.
// If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be
// set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
// exists.
// More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
// (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).
// +featureGate=VolumeAttributesClass
// +optional
VolumeAttributesClassName *string `json:"volumeAttributesClassName,omitempty" protobuf:"bytes,9,opt,name=volumeAttributesClassName"`
@ -851,13 +846,11 @@ type PersistentVolumeClaimStatus struct {
AllocatedResourceStatuses map[ResourceName]ClaimResourceStatus `json:"allocatedResourceStatuses,omitempty" protobuf:"bytes,7,rep,name=allocatedResourceStatuses"`
// currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using.
// When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim
// This is a beta field and requires enabling VolumeAttributesClass feature (off by default).
// +featureGate=VolumeAttributesClass
// +optional
CurrentVolumeAttributesClassName *string `json:"currentVolumeAttributesClassName,omitempty" protobuf:"bytes,8,opt,name=currentVolumeAttributesClassName"`
// ModifyVolumeStatus represents the status object of ControllerModifyVolume operation.
// When this is unset, there is no ModifyVolume operation being attempted.
// This is a beta field and requires enabling VolumeAttributesClass feature (off by default).
// +featureGate=VolumeAttributesClass
// +optional
ModifyVolumeStatus *ModifyVolumeStatus `json:"modifyVolumeStatus,omitempty" protobuf:"bytes,9,opt,name=modifyVolumeStatus"`
@ -972,7 +965,6 @@ type EmptyDirVolumeSource struct {
// Glusterfs volumes do not support ownership management or SELinux relabeling.
type GlusterfsVolumeSource struct {
// endpoints is the endpoint name that details Glusterfs topology.
// More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
EndpointsName string `json:"endpoints" protobuf:"bytes,1,opt,name=endpoints"`
// path is the Glusterfs volume path.
@ -1993,6 +1985,79 @@ type ClusterTrustBundleProjection struct {
Path string `json:"path" protobuf:"bytes,4,rep,name=path"`
}
// PodCertificateProjection provides a private key and X.509 certificate in the
// pod filesystem.
type PodCertificateProjection struct {
// Kubelet's generated CSRs will be addressed to this signer.
//
// +required
SignerName string `json:"signerName,omitempty" protobuf:"bytes,1,rep,name=signerName"`
// The type of keypair Kubelet will generate for the pod.
//
// Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384",
// "ECDSAP521", and "ED25519".
//
// +required
KeyType string `json:"keyType,omitempty" protobuf:"bytes,2,rep,name=keyType"`
// maxExpirationSeconds is the maximum lifetime permitted for the
// certificate.
//
// Kubelet copies this value verbatim into the PodCertificateRequests it
// generates for this projection.
//
// If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver
// will reject values shorter than 3600 (1 hour). The maximum allowable
// value is 7862400 (91 days).
//
// The signer implementation is then free to issue a certificate with any
// lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600
// seconds (1 hour). This constraint is enforced by kube-apiserver.
// `kubernetes.io` signers will never issue certificates with a lifetime
// longer than 24 hours.
//
// +optional
MaxExpirationSeconds *int32 `json:"maxExpirationSeconds,omitempty" protobuf:"varint,3,opt,name=maxExpirationSeconds"`
// Write the credential bundle at this path in the projected volume.
//
// The credential bundle is a single file that contains multiple PEM blocks.
// The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private
// key.
//
// The remaining blocks are CERTIFICATE blocks, containing the issued
// certificate chain from the signer (leaf and any intermediates).
//
// Using credentialBundlePath lets your Pod's application code make a single
// atomic read that retrieves a consistent key and certificate chain. If you
// project them to separate files, your application code will need to
// additionally check that the leaf certificate was issued to the key.
//
// +optional
CredentialBundlePath string `json:"credentialBundlePath,omitempty" protobuf:"bytes,4,rep,name=credentialBundlePath"`
// Write the key at this path in the projected volume.
//
// Most applications should use credentialBundlePath. When using keyPath
// and certificateChainPath, your application needs to check that the key
// and leaf certificate are consistent, because it is possible to read the
// files mid-rotation.
//
// +optional
KeyPath string `json:"keyPath,omitempty" protobuf:"bytes,5,rep,name=keyPath"`
// Write the certificate chain at this path in the projected volume.
//
// Most applications should use credentialBundlePath. When using keyPath
// and certificateChainPath, your application needs to check that the key
// and leaf certificate are consistent, because it is possible to read the
// files mid-rotation.
//
// +optional
CertificateChainPath string `json:"certificateChainPath,omitempty" protobuf:"bytes,6,rep,name=certificateChainPath"`
}
// Represents a projected volume source
type ProjectedVolumeSource struct {
// sources is the list of volume projections. Each entry in this list
@ -2043,6 +2108,44 @@ type VolumeProjection struct {
// +featureGate=ClusterTrustBundleProjection
// +optional
ClusterTrustBundle *ClusterTrustBundleProjection `json:"clusterTrustBundle,omitempty" protobuf:"bytes,5,opt,name=clusterTrustBundle"`
// Projects an auto-rotating credential bundle (private key and certificate
// chain) that the pod can use either as a TLS client or server.
//
// Kubelet generates a private key and uses it to send a
// PodCertificateRequest to the named signer. Once the signer approves the
// request and issues a certificate chain, Kubelet writes the key and
// certificate chain to the pod filesystem. The pod does not start until
// certificates have been issued for each podCertificate projected volume
// source in its spec.
//
// Kubelet will begin trying to rotate the certificate at the time indicated
// by the signer using the PodCertificateRequest.Status.BeginRefreshAt
// timestamp.
//
// Kubelet can write a single file, indicated by the credentialBundlePath
// field, or separate files, indicated by the keyPath and
// certificateChainPath fields.
//
// The credential bundle is a single file in PEM format. The first PEM
// entry is the private key (in PKCS#8 format), and the remaining PEM
// entries are the certificate chain issued by the signer (typically,
// signers will return their certificate chain in leaf-to-root order).
//
// Prefer using the credential bundle format, since your application code
// can read it atomically. If you use keyPath and certificateChainPath,
// your application must make two separate file reads. If these coincide
// with a certificate rotation, it is possible that the private key and leaf
// certificate you read may not correspond to each other. Your application
// will need to check for this condition, and re-read until they are
// consistent.
//
// The named signer controls chooses the format of the certificate it
// issues; consult the signer implementation's documentation to learn how to
// use the certificates it issues.
//
// +featureGate=PodCertificateProjection +optional
PodCertificate *PodCertificateProjection `json:"podCertificate,omitempty" protobuf:"bytes,6,opt,name=podCertificate"`
}
const (
@ -2351,7 +2454,8 @@ type VolumeDevice struct {
// EnvVar represents an environment variable present in a Container.
type EnvVar struct {
// Name of the environment variable. Must be a C_IDENTIFIER.
// Name of the environment variable.
// May consist of any printable ASCII characters except '='.
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// Optional: no more than one of the following may be specified.
@ -2388,6 +2492,39 @@ type EnvVarSource struct {
// Selects a key of a secret in the pod's namespace
// +optional
SecretKeyRef *SecretKeySelector `json:"secretKeyRef,omitempty" protobuf:"bytes,4,opt,name=secretKeyRef"`
// FileKeyRef selects a key of the env file.
// Requires the EnvFiles feature gate to be enabled.
//
// +featureGate=EnvFiles
// +optional
FileKeyRef *FileKeySelector `json:"fileKeyRef,omitempty" protobuf:"bytes,5,opt,name=fileKeyRef"`
}
// FileKeySelector selects a key of the env file.
// +structType=atomic
type FileKeySelector struct {
// The name of the volume mount containing the env file.
// +required
VolumeName string `json:"volumeName" protobuf:"bytes,1,opt,name=volumeName"`
// The path within the volume from which to select the file.
// Must be relative and may not contain the '..' path or start with '..'.
// +required
Path string `json:"path" protobuf:"bytes,2,opt,name=path"`
// The key within the env file. An invalid key will prevent the pod from starting.
// The keys defined within a source may consist of any printable ASCII characters except '='.
// During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters.
// +required
Key string `json:"key" protobuf:"bytes,3,opt,name=key"`
// Specify whether the file or its key must be defined. If the file or key
// does not exist, then the env var is not published.
// If optional is set to true and the specified key does not exist,
// the environment variable will not be set in the Pod's containers.
//
// If optional is set to false and the specified key does not exist,
// an error will be returned during Pod creation.
// +optional
// +default=false
Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"`
}
// ObjectFieldSelector selects an APIVersioned field of an object.
@ -2439,7 +2576,8 @@ type SecretKeySelector struct {
// EnvFromSource represents the source of a set of ConfigMaps or Secrets
type EnvFromSource struct {
// Optional text to prepend to the name of each environment variable. Must be a C_IDENTIFIER.
// Optional text to prepend to the name of each environment variable.
// May consist of any printable ASCII characters except '='.
// +optional
Prefix string `json:"prefix,omitempty" protobuf:"bytes,1,opt,name=prefix"`
// The ConfigMap to select from
@ -2697,7 +2835,7 @@ type ResourceRequirements struct {
// Claims lists the names of resources, defined in spec.resourceClaims,
// that are used by this container.
//
// This is an alpha field and requires enabling the
// This field depends on the
// DynamicResourceAllocation feature gate.
//
// This field is immutable. It can only be set for containers.
@ -2805,8 +2943,8 @@ type Container struct {
// +listMapKey=protocol
Ports []ContainerPort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"containerPort" protobuf:"bytes,6,rep,name=ports"`
// List of sources to populate environment variables in the container.
// The keys defined within a source must be a C_IDENTIFIER. All invalid keys
// will be reported as an event when the container is starting. When a key exists in multiple
// The keys defined within a source may consist of any printable ASCII characters except '='.
// When a key exists in multiple
// sources, the value associated with the last source will take precedence.
// Values defined by an Env with a duplicate key will take precedence.
// Cannot be updated.
@ -2832,10 +2970,10 @@ type Container struct {
// +listType=atomic
ResizePolicy []ContainerResizePolicy `json:"resizePolicy,omitempty" protobuf:"bytes,23,rep,name=resizePolicy"`
// RestartPolicy defines the restart behavior of individual containers in a pod.
// This field may only be set for init containers, and the only allowed value is "Always".
// For non-init containers or when this field is not specified,
// This overrides the pod-level restart policy. When this field is not specified,
// the restart behavior is defined by the Pod's restart policy and the container type.
// Setting the RestartPolicy as "Always" for the init container will have the following effect:
// Additionally, setting the RestartPolicy as "Always" for the init container will
// have the following effect:
// this init container will be continually restarted on
// exit until all regular containers have terminated. Once all regular
// containers have completed, all init containers with restartPolicy "Always"
@ -2849,6 +2987,21 @@ type Container struct {
// +featureGate=SidecarContainers
// +optional
RestartPolicy *ContainerRestartPolicy `json:"restartPolicy,omitempty" protobuf:"bytes,24,opt,name=restartPolicy,casttype=ContainerRestartPolicy"`
// Represents a list of rules to be checked to determine if the
// container should be restarted on exit. The rules are evaluated in
// order. Once a rule matches a container exit condition, the remaining
// rules are ignored. If no rule matches the container exit condition,
// the Container-level restart policy determines the whether the container
// is restarted or not. Constraints on the rules:
// - At most 20 rules are allowed.
// - Rules can have the same action.
// - Identical rules are not forbidden in validations.
// When rules are specified, container MUST set RestartPolicy explicitly
// even it if matches the Pod's RestartPolicy.
// +featureGate=ContainerRestartRules
// +optional
// +listType=atomic
RestartPolicyRules []ContainerRestartRule `json:"restartPolicyRules,omitempty" protobuf:"bytes,25,rep,name=restartPolicyRules"`
// Pod volumes to mount into the container's filesystem.
// Cannot be updated.
// +optional
@ -3478,11 +3631,64 @@ const (
)
// ContainerRestartPolicy is the restart policy for a single container.
// This may only be set for init containers and only allowed value is "Always".
// The only allowed values are "Always", "Never", and "OnFailure".
type ContainerRestartPolicy string
const (
ContainerRestartPolicyAlways ContainerRestartPolicy = "Always"
ContainerRestartPolicyAlways ContainerRestartPolicy = "Always"
ContainerRestartPolicyNever ContainerRestartPolicy = "Never"
ContainerRestartPolicyOnFailure ContainerRestartPolicy = "OnFailure"
)
// ContainerRestartRule describes how a container exit is handled.
type ContainerRestartRule struct {
// Specifies the action taken on a container exit if the requirements
// are satisfied. The only possible value is "Restart" to restart the
// container.
// +required
Action ContainerRestartRuleAction `json:"action,omitempty" proto:"bytes,1,opt,name=action" protobuf:"bytes,1,opt,name=action,casttype=ContainerRestartRuleAction"`
// Represents the exit codes to check on container exits.
// +optional
// +oneOf=when
ExitCodes *ContainerRestartRuleOnExitCodes `json:"exitCodes,omitempty" proto:"bytes,2,opt,name=exitCodes" protobuf:"bytes,2,opt,name=exitCodes"`
}
// ContainerRestartRuleAction describes the action to take when the
// container exits.
type ContainerRestartRuleAction string
// The only valid action is Restart.
const (
ContainerRestartRuleActionRestart ContainerRestartRuleAction = "Restart"
)
// ContainerRestartRuleOnExitCodes describes the condition
// for handling an exited container based on its exit codes.
type ContainerRestartRuleOnExitCodes struct {
// Represents the relationship between the container exit code(s) and the
// specified values. Possible values are:
// - In: the requirement is satisfied if the container exit code is in the
// set of specified values.
// - NotIn: the requirement is satisfied if the container exit code is
// not in the set of specified values.
// +required
Operator ContainerRestartRuleOnExitCodesOperator `json:"operator,omitempty" proto:"bytes,1,opt,name=operator" protobuf:"bytes,1,opt,name=operator,casttype=ContainerRestartRuleOnExitCodesOperator"`
// Specifies the set of values to check for container exit codes.
// At most 255 elements are allowed.
// +optional
// +listType=set
Values []int32 `json:"values,omitempty" proto:"varint,2,rep,name=values" protobuf:"varint,2,rep,name=values"`
}
// ContainerRestartRuleOnExitCodesOperator describes the operator
// to take for the exit codes.
type ContainerRestartRuleOnExitCodesOperator string
const (
ContainerRestartRuleOnExitCodesOpIn ContainerRestartRuleOnExitCodesOperator = "In"
ContainerRestartRuleOnExitCodesOpNotIn ContainerRestartRuleOnExitCodesOperator = "NotIn"
)
// DNSPolicy defines how a pod's DNS will be configured.
@ -3678,8 +3884,8 @@ type PodAntiAffinity struct {
// most preferred is the one with the greatest sum of weights, i.e.
// for each node that meets all of the scheduling requirements (resource
// request, requiredDuringScheduling anti-affinity expressions, etc.),
// compute a sum by iterating through the elements of this field and adding
// "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
// compute a sum by iterating through the elements of this field and subtracting
// "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the
// node(s) with the highest sum are the most preferred.
// +optional
// +listType=atomic
@ -3806,7 +4012,6 @@ type Taint struct {
// Valid effects are NoSchedule, PreferNoSchedule and NoExecute.
Effect TaintEffect `json:"effect" protobuf:"bytes,3,opt,name=effect,casttype=TaintEffect"`
// TimeAdded represents the time at which the taint was added.
// It is only written for NoExecute taints.
// +optional
TimeAdded *metav1.Time `json:"timeAdded,omitempty" protobuf:"bytes,4,opt,name=timeAdded"`
}
@ -3983,7 +4188,9 @@ type PodSpec struct {
// +optional
NodeName string `json:"nodeName,omitempty" protobuf:"bytes,10,opt,name=nodeName"`
// Host networking requested for this pod. Use the host's network namespace.
// If this option is set, the ports that will be used must be specified.
// When using HostNetwork you should specify ports so the scheduler is aware.
// When `hostNetwork` is true, specified `hostPort` fields in port definitions must match `containerPort`,
// and unspecified `hostPort` fields in port definitions are defaulted to match `containerPort`.
// Default to false.
// +k8s:conversion-gen=false
// +optional
@ -4126,6 +4333,7 @@ type PodSpec struct {
// - spec.hostPID
// - spec.hostIPC
// - spec.hostUsers
// - spec.resources
// - spec.securityContext.appArmorProfile
// - spec.securityContext.seLinuxOptions
// - spec.securityContext.seccompProfile
@ -4194,7 +4402,7 @@ type PodSpec struct {
ResourceClaims []PodResourceClaim `json:"resourceClaims,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"name" protobuf:"bytes,39,rep,name=resourceClaims"`
// Resources is the total amount of CPU and Memory resources required by all
// containers in the pod. It supports specifying Requests and Limits for
// "cpu" and "memory" resource names only. ResourceClaims are not supported.
// "cpu", "memory" and "hugepages-" resource names only. ResourceClaims are not supported.
//
// This field enables fine-grained control over resource allocation for the
// entire pod, allowing resource sharing among containers in a pod.
@ -4206,6 +4414,20 @@ type PodSpec struct {
// +featureGate=PodLevelResources
// +optional
Resources *ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,40,opt,name=resources"`
// HostnameOverride specifies an explicit override for the pod's hostname as perceived by the pod.
// This field only specifies the pod's hostname and does not affect its DNS records.
// When this field is set to a non-empty string:
// - It takes precedence over the values set in `hostname` and `subdomain`.
// - The Pod's hostname will be set to this value.
// - `setHostnameAsFQDN` must be nil or set to false.
// - `hostNetwork` must be set to false.
//
// This field must be a valid DNS subdomain as defined in RFC 1123 and contain at most 64 characters.
// Requires the HostnameOverride feature gate to be enabled.
//
// +featureGate=HostnameOverride
// +optional
HostnameOverride *string `json:"hostnameOverride,omitempty" protobuf:"bytes,41,opt,name=hostnameOverride"`
}
// PodResourceClaim references exactly one ResourceClaim, either directly
@ -4267,6 +4489,31 @@ type PodResourceClaimStatus struct {
ResourceClaimName *string `json:"resourceClaimName,omitempty" protobuf:"bytes,2,opt,name=resourceClaimName"`
}
// PodExtendedResourceClaimStatus is stored in the PodStatus for the extended
// resource requests backed by DRA. It stores the generated name for
// the corresponding special ResourceClaim created by the scheduler.
type PodExtendedResourceClaimStatus struct {
// RequestMappings identifies the mapping of <container, extended resource backed by DRA> to device request
// in the generated ResourceClaim.
// +listType=atomic
RequestMappings []ContainerExtendedResourceRequest `json:"requestMappings" protobuf:"bytes,1,rep,name=requestMappings"`
// ResourceClaimName is the name of the ResourceClaim that was
// generated for the Pod in the namespace of the Pod.
ResourceClaimName string `json:"resourceClaimName" protobuf:"bytes,2,name=resourceClaimName"`
}
// ContainerExtendedResourceRequest has the mapping of container name,
// extended resource name to the device request name.
type ContainerExtendedResourceRequest struct {
// The name of the container requesting resources.
ContainerName string `json:"containerName" protobuf:"bytes,1,name=containerName"`
// The name of the extended resource in that container which gets backed by DRA.
ResourceName string `json:"resourceName" protobuf:"bytes,2,name=resourceName"`
// The name of the request in the special ResourceClaim which corresponds to the extended resource.
RequestName string `json:"requestName" protobuf:"bytes,3,name=requestName"`
}
// OSName is the set of OS'es that can be used in OS.
type OSName string
@ -4799,8 +5046,8 @@ type EphemeralContainerCommon struct {
// +listMapKey=protocol
Ports []ContainerPort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"containerPort" protobuf:"bytes,6,rep,name=ports"`
// List of sources to populate environment variables in the container.
// The keys defined within a source must be a C_IDENTIFIER. All invalid keys
// will be reported as an event when the container is starting. When a key exists in multiple
// The keys defined within a source may consist of any printable ASCII characters except '='.
// When a key exists in multiple
// sources, the value associated with the last source will take precedence.
// Values defined by an Env with a duplicate key will take precedence.
// Cannot be updated.
@ -4826,11 +5073,17 @@ type EphemeralContainerCommon struct {
ResizePolicy []ContainerResizePolicy `json:"resizePolicy,omitempty" protobuf:"bytes,23,rep,name=resizePolicy"`
// Restart policy for the container to manage the restart behavior of each
// container within a pod.
// This may only be set for init containers. You cannot set this field on
// ephemeral containers.
// You cannot set this field on ephemeral containers.
// +featureGate=SidecarContainers
// +optional
RestartPolicy *ContainerRestartPolicy `json:"restartPolicy,omitempty" protobuf:"bytes,24,opt,name=restartPolicy,casttype=ContainerRestartPolicy"`
// Represents a list of rules to be checked to determine if the
// container should be restarted on exit. You cannot set this field on
// ephemeral containers.
// +featureGate=ContainerRestartRules
// +optional
// +listType=atomic
RestartPolicyRules []ContainerRestartRule `json:"restartPolicyRules,omitempty" protobuf:"bytes,25,rep,name=restartPolicyRules"`
// Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers.
// Cannot be updated.
// +optional
@ -5091,6 +5344,10 @@ type PodStatus struct {
// +featureGate=DynamicResourceAllocation
// +optional
ResourceClaimStatuses []PodResourceClaimStatus `json:"resourceClaimStatuses,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"name" protobuf:"bytes,15,rep,name=resourceClaimStatuses"`
// Status of extended resource claim backed by DRA.
// +featureGate=DRAExtendedResource
// +optional
ExtendedResourceClaimStatus *PodExtendedResourceClaimStatus `json:"extendedResourceClaimStatus,omitempty" protobuf:"bytes,18,opt,name=extendedResourceClaimStatus"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
@ -5311,6 +5568,7 @@ type ReplicationControllerCondition struct {
// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:prerelease-lifecycle-gen:introduced=1.0
// +k8s:supportsSubresource=/scale
// ReplicationController represents the configuration of a replication controller.
type ReplicationController struct {

View File

@ -356,11 +356,12 @@ var map_Container = map[string]string{
"args": "Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
"workingDir": "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.",
"ports": "List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.",
"envFrom": "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.",
"envFrom": "List of sources to populate environment variables in the container. The keys defined within a source may consist of any printable ASCII characters except '='. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.",
"env": "List of environment variables to set in the container. Cannot be updated.",
"resources": "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/",
"resizePolicy": "Resources resize policy for the container.",
"restartPolicy": "RestartPolicy defines the restart behavior of individual containers in a pod. This field may only be set for init containers, and the only allowed value is \"Always\". For non-init containers or when this field is not specified, the restart behavior is defined by the Pod's restart policy and the container type. Setting the RestartPolicy as \"Always\" for the init container will have the following effect: this init container will be continually restarted on exit until all regular containers have terminated. Once all regular containers have completed, all init containers with restartPolicy \"Always\" will be shut down. This lifecycle differs from normal init containers and is often referred to as a \"sidecar\" container. Although this init container still starts in the init container sequence, it does not wait for the container to complete before proceeding to the next init container. Instead, the next init container starts immediately after this init container is started, or after any startupProbe has successfully completed.",
"restartPolicy": "RestartPolicy defines the restart behavior of individual containers in a pod. This overrides the pod-level restart policy. When this field is not specified, the restart behavior is defined by the Pod's restart policy and the container type. Additionally, setting the RestartPolicy as \"Always\" for the init container will have the following effect: this init container will be continually restarted on exit until all regular containers have terminated. Once all regular containers have completed, all init containers with restartPolicy \"Always\" will be shut down. This lifecycle differs from normal init containers and is often referred to as a \"sidecar\" container. Although this init container still starts in the init container sequence, it does not wait for the container to complete before proceeding to the next init container. Instead, the next init container starts immediately after this init container is started, or after any startupProbe has successfully completed.",
"restartPolicyRules": "Represents a list of rules to be checked to determine if the container should be restarted on exit. The rules are evaluated in order. Once a rule matches a container exit condition, the remaining rules are ignored. If no rule matches the container exit condition, the Container-level restart policy determines the whether the container is restarted or not. Constraints on the rules: - At most 20 rules are allowed. - Rules can have the same action. - Identical rules are not forbidden in validations. When rules are specified, container MUST set RestartPolicy explicitly even it if matches the Pod's RestartPolicy.",
"volumeMounts": "Pod volumes to mount into the container's filesystem. Cannot be updated.",
"volumeDevices": "volumeDevices is the list of block devices to be used by the container.",
"livenessProbe": "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes",
@ -380,6 +381,17 @@ func (Container) SwaggerDoc() map[string]string {
return map_Container
}
var map_ContainerExtendedResourceRequest = map[string]string{
"": "ContainerExtendedResourceRequest has the mapping of container name, extended resource name to the device request name.",
"containerName": "The name of the container requesting resources.",
"resourceName": "The name of the extended resource in that container which gets backed by DRA.",
"requestName": "The name of the request in the special ResourceClaim which corresponds to the extended resource.",
}
func (ContainerExtendedResourceRequest) SwaggerDoc() map[string]string {
return map_ContainerExtendedResourceRequest
}
var map_ContainerImage = map[string]string{
"": "Describe a container image",
"names": "Names by which this image is known. e.g. [\"kubernetes.example/hyperkube:v1.0.7\", \"cloud-vendor.registry.example/cloud-vendor/hyperkube:v1.0.7\"]",
@ -413,6 +425,26 @@ func (ContainerResizePolicy) SwaggerDoc() map[string]string {
return map_ContainerResizePolicy
}
var map_ContainerRestartRule = map[string]string{
"": "ContainerRestartRule describes how a container exit is handled.",
"action": "Specifies the action taken on a container exit if the requirements are satisfied. The only possible value is \"Restart\" to restart the container.",
"exitCodes": "Represents the exit codes to check on container exits.",
}
func (ContainerRestartRule) SwaggerDoc() map[string]string {
return map_ContainerRestartRule
}
var map_ContainerRestartRuleOnExitCodes = map[string]string{
"": "ContainerRestartRuleOnExitCodes describes the condition for handling an exited container based on its exit codes.",
"operator": "Represents the relationship between the container exit code(s) and the specified values. Possible values are: - In: the requirement is satisfied if the container exit code is in the\n set of specified values.\n- NotIn: the requirement is satisfied if the container exit code is\n not in the set of specified values.",
"values": "Specifies the set of values to check for container exit codes. At most 255 elements are allowed.",
}
func (ContainerRestartRuleOnExitCodes) SwaggerDoc() map[string]string {
return map_ContainerRestartRuleOnExitCodes
}
var map_ContainerState = map[string]string{
"": "ContainerState holds a possible state of container. Only one of its members may be specified. If none of them is specified, the default one is ContainerStateWaiting.",
"waiting": "Details about a waiting container",
@ -597,7 +629,7 @@ func (EndpointsList) SwaggerDoc() map[string]string {
var map_EnvFromSource = map[string]string{
"": "EnvFromSource represents the source of a set of ConfigMaps or Secrets",
"prefix": "Optional text to prepend to the name of each environment variable. Must be a C_IDENTIFIER.",
"prefix": "Optional text to prepend to the name of each environment variable. May consist of any printable ASCII characters except '='.",
"configMapRef": "The ConfigMap to select from",
"secretRef": "The Secret to select from",
}
@ -608,7 +640,7 @@ func (EnvFromSource) SwaggerDoc() map[string]string {
var map_EnvVar = map[string]string{
"": "EnvVar represents an environment variable present in a Container.",
"name": "Name of the environment variable. Must be a C_IDENTIFIER.",
"name": "Name of the environment variable. May consist of any printable ASCII characters except '='.",
"value": "Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to \"\".",
"valueFrom": "Source for the environment variable's value. Cannot be used if value is not empty.",
}
@ -623,6 +655,7 @@ var map_EnvVarSource = map[string]string{
"resourceFieldRef": "Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.",
"configMapKeyRef": "Selects a key of a ConfigMap.",
"secretKeyRef": "Selects a key of a secret in the pod's namespace",
"fileKeyRef": "FileKeyRef selects a key of the env file. Requires the EnvFiles feature gate to be enabled.",
}
func (EnvVarSource) SwaggerDoc() map[string]string {
@ -646,11 +679,12 @@ var map_EphemeralContainerCommon = map[string]string{
"args": "Arguments to the entrypoint. The image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
"workingDir": "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.",
"ports": "Ports are not allowed for ephemeral containers.",
"envFrom": "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.",
"envFrom": "List of sources to populate environment variables in the container. The keys defined within a source may consist of any printable ASCII characters except '='. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.",
"env": "List of environment variables to set in the container. Cannot be updated.",
"resources": "Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod.",
"resizePolicy": "Resources resize policy for the container.",
"restartPolicy": "Restart policy for the container to manage the restart behavior of each container within a pod. This may only be set for init containers. You cannot set this field on ephemeral containers.",
"restartPolicy": "Restart policy for the container to manage the restart behavior of each container within a pod. You cannot set this field on ephemeral containers.",
"restartPolicyRules": "Represents a list of rules to be checked to determine if the container should be restarted on exit. You cannot set this field on ephemeral containers.",
"volumeMounts": "Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. Cannot be updated.",
"volumeDevices": "volumeDevices is the list of block devices to be used by the container.",
"livenessProbe": "Probes are not allowed for ephemeral containers.",
@ -754,6 +788,18 @@ func (FCVolumeSource) SwaggerDoc() map[string]string {
return map_FCVolumeSource
}
var map_FileKeySelector = map[string]string{
"": "FileKeySelector selects a key of the env file.",
"volumeName": "The name of the volume mount containing the env file.",
"path": "The path within the volume from which to select the file. Must be relative and may not contain the '..' path or start with '..'.",
"key": "The key within the env file. An invalid key will prevent the pod from starting. The keys defined within a source may consist of any printable ASCII characters except '='. During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters.",
"optional": "Specify whether the file or its key must be defined. If the file or key does not exist, then the env var is not published. If optional is set to true and the specified key does not exist, the environment variable will not be set in the Pod's containers.\n\nIf optional is set to false and the specified key does not exist, an error will be returned during Pod creation.",
}
func (FileKeySelector) SwaggerDoc() map[string]string {
return map_FileKeySelector
}
var map_FlexPersistentVolumeSource = map[string]string{
"": "FlexPersistentVolumeSource represents a generic persistent volume resource that is provisioned/attached using an exec based plugin.",
"driver": "driver is the name of the driver to use for this volume.",
@ -837,7 +883,7 @@ func (GlusterfsPersistentVolumeSource) SwaggerDoc() map[string]string {
var map_GlusterfsVolumeSource = map[string]string{
"": "Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling.",
"endpoints": "endpoints is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod",
"endpoints": "endpoints is the endpoint name that details Glusterfs topology.",
"path": "path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod",
"readOnly": "readOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod",
}
@ -1446,7 +1492,7 @@ var map_PersistentVolumeClaimSpec = map[string]string{
"volumeMode": "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.",
"dataSource": "dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource.",
"dataSourceRef": "dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn't specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn't set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef\n allows any non-core object, as well as PersistentVolumeClaim objects.\n* While dataSource ignores disallowed values (dropping them), dataSourceRef\n preserves all values, and generates an error if a disallowed value is\n specified.\n* While dataSource only allows local objects, dataSourceRef allows objects\n in any namespaces.\n(Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.",
"volumeAttributesClassName": "volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass will be applied to the claim but it's not allowed to reset this field to empty string once it is set. If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass will be set by the persistentvolume controller if it exists. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).",
"volumeAttributesClassName": "volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, it can be changed after the claim is created. An empty string or nil value indicates that no VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/",
}
func (PersistentVolumeClaimSpec) SwaggerDoc() map[string]string {
@ -1461,8 +1507,8 @@ var map_PersistentVolumeClaimStatus = map[string]string{
"conditions": "conditions is the current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'Resizing'.",
"allocatedResources": "allocatedResources tracks the resources allocated to a PVC including its capacity. Key names follow standard Kubernetes label syntax. Valid values are either:\n\t* Un-prefixed keys:\n\t\t- storage - the capacity of the volume.\n\t* Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\"\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\n\nCapacity reported here may be larger than the actual capacity when a volume expansion operation is requested. For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. If a volume expansion capacity request is lowered, allocatedResources is only lowered if there are no expansion operations in progress and if the actual volume capacity is equal or lower than the requested capacity.\n\nA controller that receives PVC update with previously unknown resourceName should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\n\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.",
"allocatedResourceStatuses": "allocatedResourceStatuses stores status of resource being resized for the given PVC. Key names follow standard Kubernetes label syntax. Valid values are either:\n\t* Un-prefixed keys:\n\t\t- storage - the capacity of the volume.\n\t* Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\"\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\n\nClaimResourceStatus can be in any of following states:\n\t- ControllerResizeInProgress:\n\t\tState set when resize controller starts resizing the volume in control-plane.\n\t- ControllerResizeFailed:\n\t\tState set when resize has failed in resize controller with a terminal error.\n\t- NodeResizePending:\n\t\tState set when resize controller has finished resizing the volume but further resizing of\n\t\tvolume is needed on the node.\n\t- NodeResizeInProgress:\n\t\tState set when kubelet starts resizing the volume.\n\t- NodeResizeFailed:\n\t\tState set when resizing has failed in kubelet with a terminal error. Transient errors don't set\n\t\tNodeResizeFailed.\nFor example: if expanding a PVC for more capacity - this field can be one of the following states:\n\t- pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeInProgress\"\n - pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeFailed\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizePending\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeInProgress\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeFailed\"\nWhen this field is not set, it means that no resize operation is in progress for the given PVC.\n\nA controller that receives PVC update with previously unknown resourceName or ClaimResourceStatus should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\n\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.",
"currentVolumeAttributesClassName": "currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using. When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim This is a beta field and requires enabling VolumeAttributesClass feature (off by default).",
"modifyVolumeStatus": "ModifyVolumeStatus represents the status object of ControllerModifyVolume operation. When this is unset, there is no ModifyVolume operation being attempted. This is a beta field and requires enabling VolumeAttributesClass feature (off by default).",
"currentVolumeAttributesClassName": "currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using. When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim",
"modifyVolumeStatus": "ModifyVolumeStatus represents the status object of ControllerModifyVolume operation. When this is unset, there is no ModifyVolume operation being attempted.",
}
func (PersistentVolumeClaimStatus) SwaggerDoc() map[string]string {
@ -1539,7 +1585,7 @@ var map_PersistentVolumeSpec = map[string]string{
"mountOptions": "mountOptions is the list of mount options, e.g. [\"ro\", \"soft\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options",
"volumeMode": "volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec.",
"nodeAffinity": "nodeAffinity defines constraints that limit what nodes this volume can be accessed from. This field influences the scheduling of pods that use this volume.",
"volumeAttributesClassName": "Name of VolumeAttributesClass to which this persistent volume belongs. Empty value is not allowed. When this field is not set, it indicates that this volume does not belong to any VolumeAttributesClass. This field is mutable and can be changed by the CSI driver after a volume has been updated successfully to a new class. For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound PersistentVolumeClaims during the binding process. This is a beta field and requires enabling VolumeAttributesClass feature (off by default).",
"volumeAttributesClassName": "Name of VolumeAttributesClass to which this persistent volume belongs. Empty value is not allowed. When this field is not set, it indicates that this volume does not belong to any VolumeAttributesClass. This field is mutable and can be changed by the CSI driver after a volume has been updated successfully to a new class. For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound PersistentVolumeClaims during the binding process.",
}
func (PersistentVolumeSpec) SwaggerDoc() map[string]string {
@ -1606,7 +1652,7 @@ func (PodAffinityTerm) SwaggerDoc() map[string]string {
var map_PodAntiAffinity = map[string]string{
"": "Pod anti affinity is a group of inter pod anti affinity scheduling rules.",
"requiredDuringSchedulingIgnoredDuringExecution": "If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.",
"preferredDuringSchedulingIgnoredDuringExecution": "The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.",
"preferredDuringSchedulingIgnoredDuringExecution": "The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and subtracting \"weight\" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.",
}
func (PodAntiAffinity) SwaggerDoc() map[string]string {
@ -1626,6 +1672,20 @@ func (PodAttachOptions) SwaggerDoc() map[string]string {
return map_PodAttachOptions
}
var map_PodCertificateProjection = map[string]string{
"": "PodCertificateProjection provides a private key and X.509 certificate in the pod filesystem.",
"signerName": "Kubelet's generated CSRs will be addressed to this signer.",
"keyType": "The type of keypair Kubelet will generate for the pod.\n\nValid values are \"RSA3072\", \"RSA4096\", \"ECDSAP256\", \"ECDSAP384\", \"ECDSAP521\", and \"ED25519\".",
"maxExpirationSeconds": "maxExpirationSeconds is the maximum lifetime permitted for the certificate.\n\nKubelet copies this value verbatim into the PodCertificateRequests it generates for this projection.\n\nIf omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver will reject values shorter than 3600 (1 hour). The maximum allowable value is 7862400 (91 days).\n\nThe signer implementation is then free to issue a certificate with any lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 seconds (1 hour). This constraint is enforced by kube-apiserver. `kubernetes.io` signers will never issue certificates with a lifetime longer than 24 hours.",
"credentialBundlePath": "Write the credential bundle at this path in the projected volume.\n\nThe credential bundle is a single file that contains multiple PEM blocks. The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private key.\n\nThe remaining blocks are CERTIFICATE blocks, containing the issued certificate chain from the signer (leaf and any intermediates).\n\nUsing credentialBundlePath lets your Pod's application code make a single atomic read that retrieves a consistent key and certificate chain. If you project them to separate files, your application code will need to additionally check that the leaf certificate was issued to the key.",
"keyPath": "Write the key at this path in the projected volume.\n\nMost applications should use credentialBundlePath. When using keyPath and certificateChainPath, your application needs to check that the key and leaf certificate are consistent, because it is possible to read the files mid-rotation.",
"certificateChainPath": "Write the certificate chain at this path in the projected volume.\n\nMost applications should use credentialBundlePath. When using keyPath and certificateChainPath, your application needs to check that the key and leaf certificate are consistent, because it is possible to read the files mid-rotation.",
}
func (PodCertificateProjection) SwaggerDoc() map[string]string {
return map_PodCertificateProjection
}
var map_PodCondition = map[string]string{
"": "PodCondition contains details for the current condition of this pod.",
"type": "Type is the type of the condition. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions",
@ -1676,6 +1736,16 @@ func (PodExecOptions) SwaggerDoc() map[string]string {
return map_PodExecOptions
}
var map_PodExtendedResourceClaimStatus = map[string]string{
"": "PodExtendedResourceClaimStatus is stored in the PodStatus for the extended resource requests backed by DRA. It stores the generated name for the corresponding special ResourceClaim created by the scheduler.",
"requestMappings": "RequestMappings identifies the mapping of <container, extended resource backed by DRA> to device request in the generated ResourceClaim.",
"resourceClaimName": "ResourceClaimName is the name of the ResourceClaim that was generated for the Pod in the namespace of the Pod.",
}
func (PodExtendedResourceClaimStatus) SwaggerDoc() map[string]string {
return map_PodExtendedResourceClaimStatus
}
var map_PodIP = map[string]string{
"": "PodIP represents a single IP address allocated to the pod.",
"ip": "IP is the IP address assigned to the pod",
@ -1824,7 +1894,7 @@ var map_PodSpec = map[string]string{
"serviceAccount": "DeprecatedServiceAccount is a deprecated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead.",
"automountServiceAccountToken": "AutomountServiceAccountToken indicates whether a service account token should be automatically mounted.",
"nodeName": "NodeName indicates in which node this pod is scheduled. If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName. Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod. This field should not be used to express a desire for the pod to be scheduled on a specific node. https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename",
"hostNetwork": "Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.",
"hostNetwork": "Host networking requested for this pod. Use the host's network namespace. When using HostNetwork you should specify ports so the scheduler is aware. When `hostNetwork` is true, specified `hostPort` fields in port definitions must match `containerPort`, and unspecified `hostPort` fields in port definitions are defaulted to match `containerPort`. Default to false.",
"hostPID": "Use the host's pid namespace. Optional: Default to false.",
"hostIPC": "Use the host's ipc namespace. Optional: Default to false.",
"shareProcessNamespace": "Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false.",
@ -1846,11 +1916,12 @@ var map_PodSpec = map[string]string{
"overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md",
"topologySpreadConstraints": "TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed.",
"setHostnameAsFQDN": "If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. If a pod does not have FQDN, this has no effect. Default to false.",
"os": "Specifies the OS of the containers in the pod. Some pod and container fields are restricted if this is set.\n\nIf the OS field is set to linux, the following fields must be unset: -securityContext.windowsOptions\n\nIf the OS field is set to windows, following fields must be unset: - spec.hostPID - spec.hostIPC - spec.hostUsers - spec.securityContext.appArmorProfile - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile - spec.securityContext.fsGroup - spec.securityContext.fsGroupChangePolicy - spec.securityContext.sysctls - spec.shareProcessNamespace - spec.securityContext.runAsUser - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups - spec.securityContext.supplementalGroupsPolicy - spec.containers[*].securityContext.appArmorProfile - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile - spec.containers[*].securityContext.capabilities - spec.containers[*].securityContext.readOnlyRootFilesystem - spec.containers[*].securityContext.privileged - spec.containers[*].securityContext.allowPrivilegeEscalation - spec.containers[*].securityContext.procMount - spec.containers[*].securityContext.runAsUser - spec.containers[*].securityContext.runAsGroup",
"os": "Specifies the OS of the containers in the pod. Some pod and container fields are restricted if this is set.\n\nIf the OS field is set to linux, the following fields must be unset: -securityContext.windowsOptions\n\nIf the OS field is set to windows, following fields must be unset: - spec.hostPID - spec.hostIPC - spec.hostUsers - spec.resources - spec.securityContext.appArmorProfile - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile - spec.securityContext.fsGroup - spec.securityContext.fsGroupChangePolicy - spec.securityContext.sysctls - spec.shareProcessNamespace - spec.securityContext.runAsUser - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups - spec.securityContext.supplementalGroupsPolicy - spec.containers[*].securityContext.appArmorProfile - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile - spec.containers[*].securityContext.capabilities - spec.containers[*].securityContext.readOnlyRootFilesystem - spec.containers[*].securityContext.privileged - spec.containers[*].securityContext.allowPrivilegeEscalation - spec.containers[*].securityContext.procMount - spec.containers[*].securityContext.runAsUser - spec.containers[*].securityContext.runAsGroup",
"hostUsers": "Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.",
"schedulingGates": "SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\n\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.",
"resourceClaims": "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\n\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\n\nThis field is immutable.",
"resources": "Resources is the total amount of CPU and Memory resources required by all containers in the pod. It supports specifying Requests and Limits for \"cpu\" and \"memory\" resource names only. ResourceClaims are not supported.\n\nThis field enables fine-grained control over resource allocation for the entire pod, allowing resource sharing among containers in a pod.\n\nThis is an alpha field and requires enabling the PodLevelResources feature gate.",
"resources": "Resources is the total amount of CPU and Memory resources required by all containers in the pod. It supports specifying Requests and Limits for \"cpu\", \"memory\" and \"hugepages-\" resource names only. ResourceClaims are not supported.\n\nThis field enables fine-grained control over resource allocation for the entire pod, allowing resource sharing among containers in a pod.\n\nThis is an alpha field and requires enabling the PodLevelResources feature gate.",
"hostnameOverride": "HostnameOverride specifies an explicit override for the pod's hostname as perceived by the pod. This field only specifies the pod's hostname and does not affect its DNS records. When this field is set to a non-empty string: - It takes precedence over the values set in `hostname` and `subdomain`. - The Pod's hostname will be set to this value. - `setHostnameAsFQDN` must be nil or set to false. - `hostNetwork` must be set to false.\n\nThis field must be a valid DNS subdomain as defined in RFC 1123 and contain at most 64 characters. Requires the HostnameOverride feature gate to be enabled.",
}
func (PodSpec) SwaggerDoc() map[string]string {
@ -1858,24 +1929,25 @@ func (PodSpec) SwaggerDoc() map[string]string {
}
var map_PodStatus = map[string]string{
"": "PodStatus represents information about the status of a pod. Status may trail the actual state of a system, especially if the node that hosts the pod cannot contact the control plane.",
"observedGeneration": "If set, this represents the .metadata.generation that the pod status was set based upon. This is an alpha field. Enable PodObservedGenerationTracking to be able to use this field.",
"phase": "The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle. The conditions array, the reason and message fields, and the individual container status arrays contain more detail about the pod's status. There are five possible phase values:\n\nPending: The pod has been accepted by the Kubernetes system, but one or more of the container images has not been created. This includes time before being scheduled as well as time spent downloading images over the network, which could take a while. Running: The pod has been bound to a node, and all of the containers have been created. At least one container is still running, or is in the process of starting or restarting. Succeeded: All containers in the pod have terminated in success, and will not be restarted. Failed: All containers in the pod have terminated, and at least one container has terminated in failure. The container either exited with non-zero status or was terminated by the system. Unknown: For some reason the state of the pod could not be obtained, typically due to an error in communicating with the host of the pod.\n\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase",
"conditions": "Current service state of pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions",
"message": "A human readable message indicating details about why the pod is in this condition.",
"reason": "A brief CamelCase message indicating details about why the pod is in this state. e.g. 'Evicted'",
"nominatedNodeName": "nominatedNodeName is set only when this pod preempts other pods on the node, but it cannot be scheduled right away as preemption victims receive their graceful termination periods. This field does not guarantee that the pod will be scheduled on this node. Scheduler may decide to place the pod elsewhere if other nodes become available sooner. Scheduler may also decide to give the resources on this node to a higher priority pod that is created after preemption. As a result, this field may be different than PodSpec.nodeName when the pod is scheduled.",
"hostIP": "hostIP holds the IP address of the host to which the pod is assigned. Empty if the pod has not started yet. A pod can be assigned to a node that has a problem in kubelet which in turns mean that HostIP will not be updated even if there is a node is assigned to pod",
"hostIPs": "hostIPs holds the IP addresses allocated to the host. If this field is specified, the first entry must match the hostIP field. This list is empty if the pod has not started yet. A pod can be assigned to a node that has a problem in kubelet which in turns means that HostIPs will not be updated even if there is a node is assigned to this pod.",
"podIP": "podIP address allocated to the pod. Routable at least within the cluster. Empty if not yet allocated.",
"podIPs": "podIPs holds the IP addresses allocated to the pod. If this field is specified, the 0th entry must match the podIP field. Pods may be allocated at most 1 value for each of IPv4 and IPv6. This list is empty if no IPs have been allocated yet.",
"startTime": "RFC 3339 date and time at which the object was acknowledged by the Kubelet. This is before the Kubelet pulled the container image(s) for the pod.",
"initContainerStatuses": "Statuses of init containers in this pod. The most recent successful non-restartable init container will have ready = true, the most recently started container will have startTime set. Each init container in the pod should have at most one status in this list, and all statuses should be for containers in the pod. However this is not enforced. If a status for a non-existent container is present in the list, or the list has duplicate names, the behavior of various Kubernetes components is not defined and those statuses might be ignored. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-and-container-status",
"containerStatuses": "Statuses of containers in this pod. Each container in the pod should have at most one status in this list, and all statuses should be for containers in the pod. However this is not enforced. If a status for a non-existent container is present in the list, or the list has duplicate names, the behavior of various Kubernetes components is not defined and those statuses might be ignored. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status",
"qosClass": "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-qos/#quality-of-service-classes",
"ephemeralContainerStatuses": "Statuses for any ephemeral containers that have run in this pod. Each ephemeral container in the pod should have at most one status in this list, and all statuses should be for containers in the pod. However this is not enforced. If a status for a non-existent container is present in the list, or the list has duplicate names, the behavior of various Kubernetes components is not defined and those statuses might be ignored. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status",
"resize": "Status of resources resize desired for pod's containers. It is empty if no resources resize is pending. Any changes to container resources will automatically set this to \"Proposed\" Deprecated: Resize status is moved to two pod conditions PodResizePending and PodResizeInProgress. PodResizePending will track states where the spec has been resized, but the Kubelet has not yet allocated the resources. PodResizeInProgress will track in-progress resizes, and should be present whenever allocated resources != acknowledged resources.",
"resourceClaimStatuses": "Status of resource claims.",
"": "PodStatus represents information about the status of a pod. Status may trail the actual state of a system, especially if the node that hosts the pod cannot contact the control plane.",
"observedGeneration": "If set, this represents the .metadata.generation that the pod status was set based upon. This is an alpha field. Enable PodObservedGenerationTracking to be able to use this field.",
"phase": "The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle. The conditions array, the reason and message fields, and the individual container status arrays contain more detail about the pod's status. There are five possible phase values:\n\nPending: The pod has been accepted by the Kubernetes system, but one or more of the container images has not been created. This includes time before being scheduled as well as time spent downloading images over the network, which could take a while. Running: The pod has been bound to a node, and all of the containers have been created. At least one container is still running, or is in the process of starting or restarting. Succeeded: All containers in the pod have terminated in success, and will not be restarted. Failed: All containers in the pod have terminated, and at least one container has terminated in failure. The container either exited with non-zero status or was terminated by the system. Unknown: For some reason the state of the pod could not be obtained, typically due to an error in communicating with the host of the pod.\n\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase",
"conditions": "Current service state of pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions",
"message": "A human readable message indicating details about why the pod is in this condition.",
"reason": "A brief CamelCase message indicating details about why the pod is in this state. e.g. 'Evicted'",
"nominatedNodeName": "nominatedNodeName is set only when this pod preempts other pods on the node, but it cannot be scheduled right away as preemption victims receive their graceful termination periods. This field does not guarantee that the pod will be scheduled on this node. Scheduler may decide to place the pod elsewhere if other nodes become available sooner. Scheduler may also decide to give the resources on this node to a higher priority pod that is created after preemption. As a result, this field may be different than PodSpec.nodeName when the pod is scheduled.",
"hostIP": "hostIP holds the IP address of the host to which the pod is assigned. Empty if the pod has not started yet. A pod can be assigned to a node that has a problem in kubelet which in turns mean that HostIP will not be updated even if there is a node is assigned to pod",
"hostIPs": "hostIPs holds the IP addresses allocated to the host. If this field is specified, the first entry must match the hostIP field. This list is empty if the pod has not started yet. A pod can be assigned to a node that has a problem in kubelet which in turns means that HostIPs will not be updated even if there is a node is assigned to this pod.",
"podIP": "podIP address allocated to the pod. Routable at least within the cluster. Empty if not yet allocated.",
"podIPs": "podIPs holds the IP addresses allocated to the pod. If this field is specified, the 0th entry must match the podIP field. Pods may be allocated at most 1 value for each of IPv4 and IPv6. This list is empty if no IPs have been allocated yet.",
"startTime": "RFC 3339 date and time at which the object was acknowledged by the Kubelet. This is before the Kubelet pulled the container image(s) for the pod.",
"initContainerStatuses": "Statuses of init containers in this pod. The most recent successful non-restartable init container will have ready = true, the most recently started container will have startTime set. Each init container in the pod should have at most one status in this list, and all statuses should be for containers in the pod. However this is not enforced. If a status for a non-existent container is present in the list, or the list has duplicate names, the behavior of various Kubernetes components is not defined and those statuses might be ignored. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-and-container-status",
"containerStatuses": "Statuses of containers in this pod. Each container in the pod should have at most one status in this list, and all statuses should be for containers in the pod. However this is not enforced. If a status for a non-existent container is present in the list, or the list has duplicate names, the behavior of various Kubernetes components is not defined and those statuses might be ignored. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status",
"qosClass": "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-qos/#quality-of-service-classes",
"ephemeralContainerStatuses": "Statuses for any ephemeral containers that have run in this pod. Each ephemeral container in the pod should have at most one status in this list, and all statuses should be for containers in the pod. However this is not enforced. If a status for a non-existent container is present in the list, or the list has duplicate names, the behavior of various Kubernetes components is not defined and those statuses might be ignored. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status",
"resize": "Status of resources resize desired for pod's containers. It is empty if no resources resize is pending. Any changes to container resources will automatically set this to \"Proposed\" Deprecated: Resize status is moved to two pod conditions PodResizePending and PodResizeInProgress. PodResizePending will track states where the spec has been resized, but the Kubelet has not yet allocated the resources. PodResizeInProgress will track in-progress resizes, and should be present whenever allocated resources != acknowledged resources.",
"resourceClaimStatuses": "Status of resource claims.",
"extendedResourceClaimStatus": "Status of extended resource claim backed by DRA.",
}
func (PodStatus) SwaggerDoc() map[string]string {
@ -2205,7 +2277,7 @@ var map_ResourceRequirements = map[string]string{
"": "ResourceRequirements describes the compute resource requirements.",
"limits": "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/",
"requests": "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/",
"claims": "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container.\n\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\n\nThis field is immutable. It can only be set for containers.",
"claims": "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container.\n\nThis field depends on the DynamicResourceAllocation feature gate.\n\nThis field is immutable. It can only be set for containers.",
}
func (ResourceRequirements) SwaggerDoc() map[string]string {
@ -2587,7 +2659,7 @@ var map_Taint = map[string]string{
"key": "Required. The taint key to be applied to a node.",
"value": "The taint value corresponding to the taint key.",
"effect": "Required. The effect of the taint on pods that do not tolerate the taint. Valid effects are NoSchedule, PreferNoSchedule and NoExecute.",
"timeAdded": "TimeAdded represents the time at which the taint was added. It is only written for NoExecute taints.",
"timeAdded": "TimeAdded represents the time at which the taint was added.",
}
func (Taint) SwaggerDoc() map[string]string {
@ -2727,6 +2799,7 @@ var map_VolumeProjection = map[string]string{
"configMap": "configMap information about the configMap data to project",
"serviceAccountToken": "serviceAccountToken is information about the serviceAccountToken data to project",
"clusterTrustBundle": "ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field of ClusterTrustBundle objects in an auto-updating file.\n\nAlpha, gated by the ClusterTrustBundleProjection feature gate.\n\nClusterTrustBundle objects can either be selected by name, or by the combination of signer name and a label selector.\n\nKubelet performs aggressive normalization of the PEM contents written into the pod filesystem. Esoteric PEM features such as inter-block comments and block headers are stripped. Certificates are deduplicated. The ordering of certificates within the file is arbitrary, and Kubelet may change the order over time.",
"podCertificate": "Projects an auto-rotating credential bundle (private key and certificate chain) that the pod can use either as a TLS client or server.\n\nKubelet generates a private key and uses it to send a PodCertificateRequest to the named signer. Once the signer approves the request and issues a certificate chain, Kubelet writes the key and certificate chain to the pod filesystem. The pod does not start until certificates have been issued for each podCertificate projected volume source in its spec.\n\nKubelet will begin trying to rotate the certificate at the time indicated by the signer using the PodCertificateRequest.Status.BeginRefreshAt timestamp.\n\nKubelet can write a single file, indicated by the credentialBundlePath field, or separate files, indicated by the keyPath and certificateChainPath fields.\n\nThe credential bundle is a single file in PEM format. The first PEM entry is the private key (in PKCS#8 format), and the remaining PEM entries are the certificate chain issued by the signer (typically, signers will return their certificate chain in leaf-to-root order).\n\nPrefer using the credential bundle format, since your application code can read it atomically. If you use keyPath and certificateChainPath, your application must make two separate file reads. If these coincide with a certificate rotation, it is possible that the private key and leaf certificate you read may not correspond to each other. Your application will need to check for this condition, and re-read until they are consistent.\n\nThe named signer controls chooses the format of the certificate it issues; consult the signer implementation's documentation to learn how to use the certificates it issues.",
}
func (VolumeProjection) SwaggerDoc() map[string]string {
@ -2752,10 +2825,10 @@ var map_VolumeSource = map[string]string{
"gitRepo": "gitRepo represents a git repository at a particular revision. Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.",
"secret": "secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret",
"nfs": "nfs represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs",
"iscsi": "iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md",
"glusterfs": "glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. More info: https://examples.k8s.io/volumes/glusterfs/README.md",
"iscsi": "iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi",
"glusterfs": "glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported.",
"persistentVolumeClaim": "persistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims",
"rbd": "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. More info: https://examples.k8s.io/volumes/rbd/README.md",
"rbd": "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported.",
"flexVolume": "flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead.",
"cinder": "cinder represents a cinder volume attached and mounted on kubelets host machine. Deprecated: Cinder is deprecated. All operations for the in-tree cinder type are redirected to the cinder.csi.openstack.org CSI driver. More info: https://examples.k8s.io/mysql-cinder-pd/README.md",
"cephfs": "cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported.",

View File

@ -829,6 +829,13 @@ func (in *Container) DeepCopyInto(out *Container) {
*out = new(ContainerRestartPolicy)
**out = **in
}
if in.RestartPolicyRules != nil {
in, out := &in.RestartPolicyRules, &out.RestartPolicyRules
*out = make([]ContainerRestartRule, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.VolumeMounts != nil {
in, out := &in.VolumeMounts, &out.VolumeMounts
*out = make([]VolumeMount, len(*in))
@ -879,6 +886,22 @@ func (in *Container) DeepCopy() *Container {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ContainerExtendedResourceRequest) DeepCopyInto(out *ContainerExtendedResourceRequest) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerExtendedResourceRequest.
func (in *ContainerExtendedResourceRequest) DeepCopy() *ContainerExtendedResourceRequest {
if in == nil {
return nil
}
out := new(ContainerExtendedResourceRequest)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ContainerImage) DeepCopyInto(out *ContainerImage) {
*out = *in
@ -932,6 +955,48 @@ func (in *ContainerResizePolicy) DeepCopy() *ContainerResizePolicy {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ContainerRestartRule) DeepCopyInto(out *ContainerRestartRule) {
*out = *in
if in.ExitCodes != nil {
in, out := &in.ExitCodes, &out.ExitCodes
*out = new(ContainerRestartRuleOnExitCodes)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerRestartRule.
func (in *ContainerRestartRule) DeepCopy() *ContainerRestartRule {
if in == nil {
return nil
}
out := new(ContainerRestartRule)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ContainerRestartRuleOnExitCodes) DeepCopyInto(out *ContainerRestartRuleOnExitCodes) {
*out = *in
if in.Values != nil {
in, out := &in.Values, &out.Values
*out = make([]int32, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerRestartRuleOnExitCodes.
func (in *ContainerRestartRuleOnExitCodes) DeepCopy() *ContainerRestartRuleOnExitCodes {
if in == nil {
return nil
}
out := new(ContainerRestartRuleOnExitCodes)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ContainerState) DeepCopyInto(out *ContainerState) {
*out = *in
@ -1433,6 +1498,11 @@ func (in *EnvVarSource) DeepCopyInto(out *EnvVarSource) {
*out = new(SecretKeySelector)
(*in).DeepCopyInto(*out)
}
if in.FileKeyRef != nil {
in, out := &in.FileKeyRef, &out.FileKeyRef
*out = new(FileKeySelector)
(*in).DeepCopyInto(*out)
}
return
}
@ -1506,6 +1576,13 @@ func (in *EphemeralContainerCommon) DeepCopyInto(out *EphemeralContainerCommon)
*out = new(ContainerRestartPolicy)
**out = **in
}
if in.RestartPolicyRules != nil {
in, out := &in.RestartPolicyRules, &out.RestartPolicyRules
*out = make([]ContainerRestartRule, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.VolumeMounts != nil {
in, out := &in.VolumeMounts, &out.VolumeMounts
*out = make([]VolumeMount, len(*in))
@ -1736,6 +1813,27 @@ func (in *FCVolumeSource) DeepCopy() *FCVolumeSource {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FileKeySelector) DeepCopyInto(out *FileKeySelector) {
*out = *in
if in.Optional != nil {
in, out := &in.Optional, &out.Optional
*out = new(bool)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FileKeySelector.
func (in *FileKeySelector) DeepCopy() *FileKeySelector {
if in == nil {
return nil
}
out := new(FileKeySelector)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FlexPersistentVolumeSource) DeepCopyInto(out *FlexPersistentVolumeSource) {
*out = *in
@ -3797,6 +3895,27 @@ func (in *PodAttachOptions) DeepCopyObject() runtime.Object {
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodCertificateProjection) DeepCopyInto(out *PodCertificateProjection) {
*out = *in
if in.MaxExpirationSeconds != nil {
in, out := &in.MaxExpirationSeconds, &out.MaxExpirationSeconds
*out = new(int32)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodCertificateProjection.
func (in *PodCertificateProjection) DeepCopy() *PodCertificateProjection {
if in == nil {
return nil
}
out := new(PodCertificateProjection)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodCondition) DeepCopyInto(out *PodCondition) {
*out = *in
@ -3899,6 +4018,27 @@ func (in *PodExecOptions) DeepCopyObject() runtime.Object {
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodExtendedResourceClaimStatus) DeepCopyInto(out *PodExtendedResourceClaimStatus) {
*out = *in
if in.RequestMappings != nil {
in, out := &in.RequestMappings, &out.RequestMappings
*out = make([]ContainerExtendedResourceRequest, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodExtendedResourceClaimStatus.
func (in *PodExtendedResourceClaimStatus) DeepCopy() *PodExtendedResourceClaimStatus {
if in == nil {
return nil
}
out := new(PodExtendedResourceClaimStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodIP) DeepCopyInto(out *PodIP) {
*out = *in
@ -4412,6 +4552,11 @@ func (in *PodSpec) DeepCopyInto(out *PodSpec) {
*out = new(ResourceRequirements)
(*in).DeepCopyInto(*out)
}
if in.HostnameOverride != nil {
in, out := &in.HostnameOverride, &out.HostnameOverride
*out = new(string)
**out = **in
}
return
}
@ -4477,6 +4622,11 @@ func (in *PodStatus) DeepCopyInto(out *PodStatus) {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.ExtendedResourceClaimStatus != nil {
in, out := &in.ExtendedResourceClaimStatus, &out.ExtendedResourceClaimStatus
*out = new(PodExtendedResourceClaimStatus)
(*in).DeepCopyInto(*out)
}
return
}
@ -6412,6 +6562,11 @@ func (in *VolumeProjection) DeepCopyInto(out *VolumeProjection) {
*out = new(ClusterTrustBundleProjection)
(*in).DeepCopyInto(*out)
}
if in.PodCertificate != nil {
in, out := &in.PodCertificate, &out.PodCertificate
*out = new(PodCertificateProjection)
(*in).DeepCopyInto(*out)
}
return
}

View File

@ -18,5 +18,7 @@ limitations under the License.
// +k8s:protobuf-gen=package
// +k8s:openapi-gen=true
// +k8s:prerelease-lifecycle-gen=true
// +k8s:validation-gen=TypeMeta
// +k8s:validation-gen-input=k8s.io/api/extensions/v1beta1
package v1beta1

Some files were not shown because too many files have changed in this diff Show More