mirror of https://github.com/knative/client.git
bump k8s packages to 1.17.6 (#881)
* add script to bump k8s deps * bump k8s to v1.17.6
This commit is contained in:
parent
a7ea77d106
commit
dfd65db9ab
18
go.mod
18
go.mod
|
|
@ -1,5 +1,7 @@
|
|||
module knative.dev/client
|
||||
|
||||
go 1.14
|
||||
|
||||
require (
|
||||
github.com/mitchellh/go-homedir v1.1.0
|
||||
github.com/spf13/cobra v0.0.6
|
||||
|
|
@ -9,7 +11,7 @@ require (
|
|||
gotest.tools v2.2.0+incompatible
|
||||
k8s.io/api v0.17.6
|
||||
k8s.io/apimachinery v0.17.6
|
||||
k8s.io/cli-runtime v0.17.3
|
||||
k8s.io/cli-runtime v0.17.6
|
||||
k8s.io/client-go v11.0.1-0.20190805182717-6502b5e7b1b5+incompatible
|
||||
knative.dev/eventing v0.15.1-0.20200608083719-c024353a712c
|
||||
knative.dev/pkg v0.0.0-20200606224418-7ed1d4a552bc
|
||||
|
|
@ -24,12 +26,10 @@ require (
|
|||
// See https://github.com/spf13/cobra/pull/899
|
||||
replace (
|
||||
github.com/spf13/cobra => github.com/chmouel/cobra v0.0.0-20191021105835-a78788917390
|
||||
k8s.io/api => k8s.io/api v0.16.4
|
||||
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.16.4
|
||||
k8s.io/apimachinery => k8s.io/apimachinery v0.16.4
|
||||
k8s.io/cli-runtime => k8s.io/cli-runtime v0.16.4
|
||||
k8s.io/client-go => k8s.io/client-go v0.16.4
|
||||
k8s.io/code-generator => k8s.io/code-generator v0.16.4
|
||||
k8s.io/api => k8s.io/api v0.17.6
|
||||
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.17.6
|
||||
k8s.io/apimachinery => k8s.io/apimachinery v0.17.6
|
||||
k8s.io/cli-runtime => k8s.io/cli-runtime v0.17.6
|
||||
k8s.io/client-go => k8s.io/client-go v0.17.6
|
||||
k8s.io/code-generator => k8s.io/code-generator v0.17.6
|
||||
)
|
||||
|
||||
go 1.14
|
||||
|
|
|
|||
53
go.sum
53
go.sum
|
|
@ -125,11 +125,13 @@ github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:H
|
|||
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
|
||||
github.com/Shopify/sarama v1.23.1/go.mod h1:XLH1GYJnLVE0XCr6KdJGVJRTwY30moWNJ4sERjXX6fs=
|
||||
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
|
||||
github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM=
|
||||
github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
|
||||
github.com/andybalholm/brotli v0.0.0-20190621154722-5f990b63d2d6/go.mod h1:+lx6/Aqd1kLJ1GQfkvOnaZ1WGmLpMpbprPuIOOZX30U=
|
||||
github.com/andygrunwald/go-gerrit v0.0.0-20190120104749-174420ebee6c/go.mod h1:0iuRQp6WJ44ts+iihy5E/WlPqfg5RNeQxOmzRkxCdtk=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
|
||||
|
|
@ -209,9 +211,7 @@ github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH
|
|||
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
|
||||
github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
|
||||
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/etcd v3.3.17+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
|
||||
github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
|
|
@ -299,7 +299,6 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo
|
|||
github.com/fsouza/fake-gcs-server v0.0.0-20180612165233-e85be23bdaa8/go.mod h1:1/HufuJ+eaDf4KTnYdS6HJMGvMRU8d4cYTuu/1QaBbI=
|
||||
github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY=
|
||||
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/ghodss/yaml v0.0.0-20180820084758-c7ce16629ff4/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
|
||||
|
|
@ -322,6 +321,7 @@ github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70t
|
|||
github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik=
|
||||
github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik=
|
||||
github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk=
|
||||
github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU=
|
||||
github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0=
|
||||
github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0=
|
||||
github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94=
|
||||
|
|
@ -341,8 +341,10 @@ github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf
|
|||
github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
|
||||
github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
|
||||
github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs=
|
||||
github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk=
|
||||
github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA=
|
||||
github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64=
|
||||
github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4=
|
||||
github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
|
||||
github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI=
|
||||
github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI=
|
||||
|
|
@ -354,6 +356,7 @@ github.com/go-openapi/spec v0.19.6/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHK
|
|||
github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU=
|
||||
github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU=
|
||||
github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY=
|
||||
github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU=
|
||||
github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
|
||||
github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg=
|
||||
github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg=
|
||||
|
|
@ -363,6 +366,7 @@ github.com/go-openapi/swag v0.19.7 h1:VRuXN2EnMSsZdauzdss6JBC29YotDqG59BZ+tdlIL1
|
|||
github.com/go-openapi/swag v0.19.7/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY=
|
||||
github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4=
|
||||
github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA=
|
||||
github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4=
|
||||
github.com/go-sql-driver/mysql v0.0.0-20160411075031-7ebe0a500653/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
||||
|
|
@ -485,15 +489,12 @@ github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY
|
|||
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo=
|
||||
github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY=
|
||||
github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
github.com/gregjones/httpcache v0.0.0-20190212212710-3befbb6ad0cc h1:f8eY6cV/x1x+HLjOp4r72s/31/V2aTUtg5oKRRPf8/Q=
|
||||
github.com/gregjones/httpcache v0.0.0-20190212212710-3befbb6ad0cc/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v0.0.0-20190222133341-cfaf5686ec79/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.3.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.4.1/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
|
|
@ -594,6 +595,7 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
|||
github.com/kubernetes-incubator/custom-metrics-apiserver v0.0.0-20190918110929-3d9be26a50eb/go.mod h1:KWRxWvzVCNvDtG9ejU5UdpgvxdCZFMUZu0xroKWG8Bo=
|
||||
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0=
|
||||
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE=
|
||||
github.com/lightstep/tracecontext.go v0.0.0-20181129014701-1757c391b1ac/go.mod h1:Frd2bnT3w5FB5q49ENTfVlztJES+1k/7lyWX2+9gq/M=
|
||||
github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc=
|
||||
|
|
@ -816,7 +818,6 @@ github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:s
|
|||
github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
|
||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/soheilhy/cmux v0.1.3/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||
|
|
@ -858,6 +859,7 @@ github.com/tektoncd/pipeline v0.8.0/go.mod h1:IZzJdiX9EqEMuUcgdnElozdYYRh0/ZRC+N
|
|||
github.com/tektoncd/pipeline v0.10.1/go.mod h1:D2X0exT46zYx95BU7ByM8+erpjoN7thmUBvlKThOszU=
|
||||
github.com/tektoncd/plumbing v0.0.0-20191216083742-847dcf196de9/go.mod h1:QZHgU07PRBTRF6N57w4+ApRu8OgfYLFNqCDlfEZaD9Y=
|
||||
github.com/tektoncd/plumbing/pipelinerun-logs v0.0.0-20191206114338-712d544c2c21/go.mod h1:S62EUWtqmejjJgUMOGB1CCCHRp6C706laH06BoALkzU=
|
||||
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/tsenart/go-tsz v0.0.0-20180814235614-0bd30b3df1c3/go.mod h1:SWZznP1z5Ki7hDT2ioqiFKEse8K9tU2OUvaRI0NeGQo=
|
||||
|
|
@ -873,6 +875,7 @@ github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyC
|
|||
github.com/vdemeester/k8s-pkg-credentialprovider v0.0.0-20200107171650-7c61ffa44238/go.mod h1:JwQJCMWpUDqjZrB5jpw0f5VbN7U95zxFy1ZDpoEarGo=
|
||||
github.com/vdemeester/k8s-pkg-credentialprovider v1.13.12-1/go.mod h1:Fko0rTxEtDW2kju5Ky7yFJNS3IcNvW8IPsp4/e9oev0=
|
||||
github.com/vdemeester/k8s-pkg-credentialprovider v1.17.4/go.mod h1:inCTmtUdr5KJbreVojo06krnTgaeAz/Z7lynpPk/Q2c=
|
||||
github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw=
|
||||
github.com/vmware/govmomi v0.20.3/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU=
|
||||
github.com/wavesoftware/go-ensure v1.0.0/go.mod h1:K2UAFSwMTvpiRGay/M3aEYYuurcR8S4A6HkQlJPV8k4=
|
||||
github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4=
|
||||
|
|
@ -896,6 +899,9 @@ go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
|||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/etcd v0.0.0-20181031231232-83304cfc808c/go.mod h1:weASp41xM3dk0YHg1s/W8ecdGP5G4teSTMBPpYAaUgA=
|
||||
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
|
||||
go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
|
||||
go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
|
||||
go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
|
||||
go.opencensus.io v0.15.0/go.mod h1:UffZAU+4sDEINUGP/B7UfBBkq4fqLu9zXAX7ke6CHW0=
|
||||
go.opencensus.io v0.17.0/go.mod h1:mp1VrMQxhlqqDpKvH4UcQUa4YwlzNmymAjPrDdfxNpI=
|
||||
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||
|
|
@ -907,18 +913,15 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
|||
go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8=
|
||||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opentelemetry.io/otel v0.2.3/go.mod h1:OgNpQOjrlt33Ew6Ds0mGjmcTQg/rhUctsbkRdk/g1fw=
|
||||
go.uber.org/atomic v0.0.0-20181018215023-8dc6146f7569/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk=
|
||||
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
go.uber.org/multierr v0.0.0-20180122172545-ddea229ff1df/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A=
|
||||
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
|
||||
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4=
|
||||
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
|
||||
go.uber.org/zap v0.0.0-20180814183419-67bc79d13d15/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.9.2-0.20180814183419-67bc79d13d15/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
|
|
@ -939,6 +942,7 @@ golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5/go.mod h1:WFFai1msRO1wXaE
|
|||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
|
|
@ -1012,7 +1016,6 @@ golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLL
|
|||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190912160710-24e19bdeb0f2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
|
|
@ -1118,6 +1121,7 @@ golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGm
|
|||
golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
|
|
@ -1130,6 +1134,7 @@ golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBn
|
|||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI=
|
||||
|
|
@ -1273,7 +1278,6 @@ gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
|||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/gcfg.v1 v1.2.0/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
|
||||
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
|
||||
gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/ini.v1 v1.46.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
|
|
@ -1324,23 +1328,21 @@ istio.io/api v0.0.0-20191115173247-e1a1952e5b81/go.mod h1:+cyHH83OwC0rFpwk8eXctz
|
|||
istio.io/client-go v0.0.0-20191120150049-26c62a04cdbc/go.mod h1:Bn3fm/aQ7JXVHiMYB4TrSp+NnUlZ5A5sH20xwzureII=
|
||||
istio.io/gogo-genproto v0.0.0-20190930162913-45029607206a/go.mod h1:OzpAts7jljZceG4Vqi5/zXy/pOg1b209T3jb7Nv5wIs=
|
||||
istio.io/gogo-genproto v0.0.0-20191029161641-f7d19ec0141d/go.mod h1:OzpAts7jljZceG4Vqi5/zXy/pOg1b209T3jb7Nv5wIs=
|
||||
k8s.io/api v0.16.4 h1:O06Ed/hgLiCrzW1SHp6HAhqcTnYHtK80bP5rXoHakpM=
|
||||
k8s.io/api v0.16.4/go.mod h1:AtzMnsR45tccQss5q8RnF+W8L81DH6XwXwo/joEx9u0=
|
||||
k8s.io/apiextensions-apiserver v0.16.4/go.mod h1:HYQwjujEkXmQNhap2C9YDdIVOSskGZ3et0Mvjcyjbto=
|
||||
k8s.io/apimachinery v0.16.4 h1:+VNiyTcctUvBBRUxfulwL2I6TGratkR1oAoULuas/HI=
|
||||
k8s.io/apimachinery v0.16.4/go.mod h1:llRdnznGEAqC3DcNm6yEj472xaFVfLM7hnYofMb12tQ=
|
||||
k8s.io/apiserver v0.16.4/go.mod h1:kbLJOak655g6W7C+muqu1F76u9wnEycfKMqbVaXIdAc=
|
||||
k8s.io/api v0.17.6 h1:S6qZSkjdOU0N/TYBZKoR1o7YVSiWMGFU0XXDoqs2ioA=
|
||||
k8s.io/api v0.17.6/go.mod h1:1jKVwkj0UZ4huak/yRt3MFfU5wc32+B41SkNN5HhyFg=
|
||||
k8s.io/apiextensions-apiserver v0.17.6/go.mod h1:Z3CHLP3Tha+Rbav7JR3S+ye427UaJkHBomK2c4XtZ3A=
|
||||
k8s.io/apimachinery v0.17.6 h1:P0MNfucrmKLPsOSRbhDuG0Tplrpg7hVY4fJHh5sUIUw=
|
||||
k8s.io/apimachinery v0.17.6/go.mod h1:Lg8zZ5iC/O8UjCqW6DNhcQG2m4TdjF9kwG3891OWbbA=
|
||||
k8s.io/apiserver v0.17.0/go.mod h1:ABM+9x/prjINN6iiffRVNCBR2Wk7uY4z+EtEGZD48cg=
|
||||
k8s.io/apiserver v0.17.4/go.mod h1:5ZDQ6Xr5MNBxyi3iUZXS84QOhZl+W7Oq2us/29c0j9I=
|
||||
k8s.io/apiserver v0.17.6/go.mod h1:sAYqm8hUDNA9aj/TzqwsJoExWrxprKv0tqs/z88qym0=
|
||||
k8s.io/cli-runtime v0.16.4 h1:ckLPbEshqrWW0Y0Rykdam5zCaaUdGF5Ss+5WM5pk+nA=
|
||||
k8s.io/cli-runtime v0.16.4/go.mod h1:eMzMINgUDp0722/80GDTQrKJ18S4H48iNzaIt5jy/IU=
|
||||
k8s.io/client-go v0.16.4 h1:sf+FEZXYhJNjpTZapQDLvvN+0kBeUTxCYxlXcVdhv2E=
|
||||
k8s.io/client-go v0.16.4/go.mod h1:ZgxhFDxSnoKY0J0U2/Y1C8obKDdlhGPZwA7oHH863Ok=
|
||||
k8s.io/cli-runtime v0.17.6 h1:JOlW8O0wPwpoQUl6ZDGtRZ5Fw6KQ1XvDxev0V8B22/M=
|
||||
k8s.io/cli-runtime v0.17.6/go.mod h1:CHzZyoNkNFdB3Ot+KcDD26Hd1GCsc84RenZmr8DKnVs=
|
||||
k8s.io/client-go v0.17.6 h1:W/JkbAcIZUPb9vENRTC75ymjQQO3qEJAZyYhOIEOifM=
|
||||
k8s.io/client-go v0.17.6/go.mod h1:tX5eAbQR/Kbqv+5R93rzHQoyRnPjjW2mm9i0lXnW218=
|
||||
k8s.io/cloud-provider v0.17.0/go.mod h1:Ze4c3w2C0bRsjkBUoHpFi+qWe3ob1wI2/7cUn+YQIDE=
|
||||
k8s.io/cloud-provider v0.17.4/go.mod h1:XEjKDzfD+b9MTLXQFlDGkk6Ho8SGMpaU8Uugx/KNK9U=
|
||||
k8s.io/code-generator v0.16.4/go.mod h1:mJUgkl06XV4kstAnLHAIzJPVCOzVR+ZcfPIv4fUsFCY=
|
||||
k8s.io/component-base v0.16.4/go.mod h1:GYQ+4hlkEwdlpAp59Ztc4gYuFhdoZqiAJD1unYDJ3FM=
|
||||
k8s.io/code-generator v0.17.6/go.mod h1:iiHz51+oTx+Z9D0vB3CH3O4HDDPWrvZyUgUYaIE9h9M=
|
||||
k8s.io/component-base v0.17.0/go.mod h1:rKuRAokNMY2nn2A6LP/MiwpoaMRHpfRnrPaUJJj1Yoc=
|
||||
k8s.io/component-base v0.17.2/go.mod h1:zMPW3g5aH7cHJpKYQ/ZsGMcgbsA/VyhEugF3QT1awLs=
|
||||
k8s.io/component-base v0.17.4/go.mod h1:5BRqHMbbQPm2kKu35v3G+CpVq4K0RJKC7TRioF0I9lE=
|
||||
|
|
@ -1355,7 +1357,6 @@ k8s.io/gengo v0.0.0-20200205140755-e0e292d8aa12/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8
|
|||
k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
|
||||
k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
|
||||
k8s.io/klog v0.3.3/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
|
||||
k8s.io/klog v0.4.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
|
||||
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
|
||||
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
|
||||
k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc=
|
||||
|
|
@ -1377,7 +1378,6 @@ k8s.io/test-infra v0.0.0-20191212060232-70b0b49fe247/go.mod h1:d8SKryJBXAwfCFVL4
|
|||
k8s.io/test-infra v0.0.0-20200407001919-bc7f71ef65b8/go.mod h1:/WpJWcaDvuykB322WXP4kJbX8IpalOzuPxA62GpwkJk=
|
||||
k8s.io/utils v0.0.0-20181019225348-5e321f9a457c/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0=
|
||||
k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
|
||||
k8s.io/utils v0.0.0-20190801114015-581e00157fb1/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
|
||||
k8s.io/utils v0.0.0-20190907131718-3d4f5b7dea0b/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
|
||||
k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
|
||||
k8s.io/utils v0.0.0-20200124190032-861946025e34 h1:HjlUD6M0K3P8nRXmr2B9o4F9dUy9TCj/aEpReeyi6+k=
|
||||
|
|
@ -1427,7 +1427,6 @@ sigs.k8s.io/kustomize v2.0.3+incompatible h1:JUufWFNlI44MdtnjUqVnvh29rR37PQFzPbL
|
|||
sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU=
|
||||
sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI=
|
||||
sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06/go.mod h1:/ULNhyfzRopfcjskuui0cTITekDduZ7ycKN3oUT9R18=
|
||||
sigs.k8s.io/structured-merge-diff v1.0.1/go.mod h1:IIgPezJWb76P0hotTxzDbWsMYB8APh18qZnxkomBpxA=
|
||||
sigs.k8s.io/structured-merge-diff/v2 v2.0.1/go.mod h1:Wb7vfKAodbKgf6tn1Kl0VvGj7mRH6DGaRcixXEJXTsE=
|
||||
sigs.k8s.io/testing_frameworks v0.1.1/go.mod h1:VVBKrHmJ6Ekkfz284YKhQePcdycOzNH9qL6ht1zEr/U=
|
||||
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
|
||||
|
|
|
|||
|
|
@ -0,0 +1,50 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2020 The Knative Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
export GO111MODULE=on
|
||||
export K8S_VERSION="${1:-v0.17.6}"
|
||||
|
||||
K8S_DEPS=(
|
||||
"k8s.io/api"
|
||||
"k8s.io/apiextensions-apiserver"
|
||||
"k8s.io/apimachinery"
|
||||
"k8s.io/cli-runtime"
|
||||
"k8s.io/client-go"
|
||||
"k8s.io/code-generator"
|
||||
)
|
||||
|
||||
function update_module {
|
||||
local dep="${1}"
|
||||
local version="${2}"
|
||||
|
||||
|
||||
echo "Updating ${dep} to ${version}"
|
||||
|
||||
go mod edit \
|
||||
-require="${dep}@${version}" \
|
||||
-replace="${dep}=${dep}@${version}"
|
||||
}
|
||||
|
||||
for dep in "${K8S_DEPS[@]}"
|
||||
do
|
||||
update_module "${dep}" "${K8S_VERSION}"
|
||||
done
|
||||
|
||||
./hack/update-deps.sh
|
||||
|
|
@ -0,0 +1,11 @@
|
|||
language: go
|
||||
|
||||
go:
|
||||
- "1.8"
|
||||
- "1.9"
|
||||
- "1.10"
|
||||
- "1.11"
|
||||
- "1.12"
|
||||
- master
|
||||
|
||||
script: go test -v ./...
|
||||
|
|
@ -0,0 +1,27 @@
|
|||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
|
@ -0,0 +1,7 @@
|
|||
This repo is a drop-in replacement for the golang [text/tabwriter](https://golang.org/pkg/text/tabwriter/) package.
|
||||
|
||||
It is based on that package at [cf2c2ea8](https://github.com/golang/go/tree/cf2c2ea89d09d486bb018b1817c5874388038c3a/src/text/tabwriter) and inherits its license.
|
||||
|
||||
The following additional features are supported:
|
||||
* `RememberWidths` flag allows remembering maximum widths seen per column even after Flush() is called.
|
||||
* `RememberedWidths() []int` and `SetRememberedWidths([]int) *Writer` allows obtaining and transferring remembered column width between writers.
|
||||
|
|
@ -0,0 +1,637 @@
|
|||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package tabwriter implements a write filter (tabwriter.Writer) that
|
||||
// translates tabbed columns in input into properly aligned text.
|
||||
//
|
||||
// It is a drop-in replacement for the golang text/tabwriter package (https://golang.org/pkg/text/tabwriter),
|
||||
// based on that package at https://github.com/golang/go/tree/cf2c2ea89d09d486bb018b1817c5874388038c3a
|
||||
// with support for additional features.
|
||||
//
|
||||
// The package is using the Elastic Tabstops algorithm described at
|
||||
// http://nickgravgaard.com/elastictabstops/index.html.
|
||||
package tabwriter
|
||||
|
||||
import (
|
||||
"io"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Filter implementation
|
||||
|
||||
// A cell represents a segment of text terminated by tabs or line breaks.
|
||||
// The text itself is stored in a separate buffer; cell only describes the
|
||||
// segment's size in bytes, its width in runes, and whether it's an htab
|
||||
// ('\t') terminated cell.
|
||||
//
|
||||
type cell struct {
|
||||
size int // cell size in bytes
|
||||
width int // cell width in runes
|
||||
htab bool // true if the cell is terminated by an htab ('\t')
|
||||
}
|
||||
|
||||
// A Writer is a filter that inserts padding around tab-delimited
|
||||
// columns in its input to align them in the output.
|
||||
//
|
||||
// The Writer treats incoming bytes as UTF-8-encoded text consisting
|
||||
// of cells terminated by horizontal ('\t') or vertical ('\v') tabs,
|
||||
// and newline ('\n') or formfeed ('\f') characters; both newline and
|
||||
// formfeed act as line breaks.
|
||||
//
|
||||
// Tab-terminated cells in contiguous lines constitute a column. The
|
||||
// Writer inserts padding as needed to make all cells in a column have
|
||||
// the same width, effectively aligning the columns. It assumes that
|
||||
// all characters have the same width, except for tabs for which a
|
||||
// tabwidth must be specified. Column cells must be tab-terminated, not
|
||||
// tab-separated: non-tab terminated trailing text at the end of a line
|
||||
// forms a cell but that cell is not part of an aligned column.
|
||||
// For instance, in this example (where | stands for a horizontal tab):
|
||||
//
|
||||
// aaaa|bbb|d
|
||||
// aa |b |dd
|
||||
// a |
|
||||
// aa |cccc|eee
|
||||
//
|
||||
// the b and c are in distinct columns (the b column is not contiguous
|
||||
// all the way). The d and e are not in a column at all (there's no
|
||||
// terminating tab, nor would the column be contiguous).
|
||||
//
|
||||
// The Writer assumes that all Unicode code points have the same width;
|
||||
// this may not be true in some fonts or if the string contains combining
|
||||
// characters.
|
||||
//
|
||||
// If DiscardEmptyColumns is set, empty columns that are terminated
|
||||
// entirely by vertical (or "soft") tabs are discarded. Columns
|
||||
// terminated by horizontal (or "hard") tabs are not affected by
|
||||
// this flag.
|
||||
//
|
||||
// If a Writer is configured to filter HTML, HTML tags and entities
|
||||
// are passed through. The widths of tags and entities are
|
||||
// assumed to be zero (tags) and one (entities) for formatting purposes.
|
||||
//
|
||||
// A segment of text may be escaped by bracketing it with Escape
|
||||
// characters. The tabwriter passes escaped text segments through
|
||||
// unchanged. In particular, it does not interpret any tabs or line
|
||||
// breaks within the segment. If the StripEscape flag is set, the
|
||||
// Escape characters are stripped from the output; otherwise they
|
||||
// are passed through as well. For the purpose of formatting, the
|
||||
// width of the escaped text is always computed excluding the Escape
|
||||
// characters.
|
||||
//
|
||||
// The formfeed character acts like a newline but it also terminates
|
||||
// all columns in the current line (effectively calling Flush). Tab-
|
||||
// terminated cells in the next line start new columns. Unless found
|
||||
// inside an HTML tag or inside an escaped text segment, formfeed
|
||||
// characters appear as newlines in the output.
|
||||
//
|
||||
// The Writer must buffer input internally, because proper spacing
|
||||
// of one line may depend on the cells in future lines. Clients must
|
||||
// call Flush when done calling Write.
|
||||
//
|
||||
type Writer struct {
|
||||
// configuration
|
||||
output io.Writer
|
||||
minwidth int
|
||||
tabwidth int
|
||||
padding int
|
||||
padbytes [8]byte
|
||||
flags uint
|
||||
|
||||
// current state
|
||||
buf []byte // collected text excluding tabs or line breaks
|
||||
pos int // buffer position up to which cell.width of incomplete cell has been computed
|
||||
cell cell // current incomplete cell; cell.width is up to buf[pos] excluding ignored sections
|
||||
endChar byte // terminating char of escaped sequence (Escape for escapes, '>', ';' for HTML tags/entities, or 0)
|
||||
lines [][]cell // list of lines; each line is a list of cells
|
||||
widths []int // list of column widths in runes - re-used during formatting
|
||||
|
||||
maxwidths []int // list of max column widths in runes
|
||||
}
|
||||
|
||||
// addLine adds a new line.
|
||||
// flushed is a hint indicating whether the underlying writer was just flushed.
|
||||
// If so, the previous line is not likely to be a good indicator of the new line's cells.
|
||||
func (b *Writer) addLine(flushed bool) {
|
||||
// Grow slice instead of appending,
|
||||
// as that gives us an opportunity
|
||||
// to re-use an existing []cell.
|
||||
if n := len(b.lines) + 1; n <= cap(b.lines) {
|
||||
b.lines = b.lines[:n]
|
||||
b.lines[n-1] = b.lines[n-1][:0]
|
||||
} else {
|
||||
b.lines = append(b.lines, nil)
|
||||
}
|
||||
|
||||
if !flushed {
|
||||
// The previous line is probably a good indicator
|
||||
// of how many cells the current line will have.
|
||||
// If the current line's capacity is smaller than that,
|
||||
// abandon it and make a new one.
|
||||
if n := len(b.lines); n >= 2 {
|
||||
if prev := len(b.lines[n-2]); prev > cap(b.lines[n-1]) {
|
||||
b.lines[n-1] = make([]cell, 0, prev)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Reset the current state.
|
||||
func (b *Writer) reset() {
|
||||
b.buf = b.buf[:0]
|
||||
b.pos = 0
|
||||
b.cell = cell{}
|
||||
b.endChar = 0
|
||||
b.lines = b.lines[0:0]
|
||||
b.widths = b.widths[0:0]
|
||||
b.addLine(true)
|
||||
}
|
||||
|
||||
// Internal representation (current state):
|
||||
//
|
||||
// - all text written is appended to buf; tabs and line breaks are stripped away
|
||||
// - at any given time there is a (possibly empty) incomplete cell at the end
|
||||
// (the cell starts after a tab or line break)
|
||||
// - cell.size is the number of bytes belonging to the cell so far
|
||||
// - cell.width is text width in runes of that cell from the start of the cell to
|
||||
// position pos; html tags and entities are excluded from this width if html
|
||||
// filtering is enabled
|
||||
// - the sizes and widths of processed text are kept in the lines list
|
||||
// which contains a list of cells for each line
|
||||
// - the widths list is a temporary list with current widths used during
|
||||
// formatting; it is kept in Writer because it's re-used
|
||||
//
|
||||
// |<---------- size ---------->|
|
||||
// | |
|
||||
// |<- width ->|<- ignored ->| |
|
||||
// | | | |
|
||||
// [---processed---tab------------<tag>...</tag>...]
|
||||
// ^ ^ ^
|
||||
// | | |
|
||||
// buf start of incomplete cell pos
|
||||
|
||||
// Formatting can be controlled with these flags.
|
||||
const (
|
||||
// Ignore html tags and treat entities (starting with '&'
|
||||
// and ending in ';') as single characters (width = 1).
|
||||
FilterHTML uint = 1 << iota
|
||||
|
||||
// Strip Escape characters bracketing escaped text segments
|
||||
// instead of passing them through unchanged with the text.
|
||||
StripEscape
|
||||
|
||||
// Force right-alignment of cell content.
|
||||
// Default is left-alignment.
|
||||
AlignRight
|
||||
|
||||
// Handle empty columns as if they were not present in
|
||||
// the input in the first place.
|
||||
DiscardEmptyColumns
|
||||
|
||||
// Always use tabs for indentation columns (i.e., padding of
|
||||
// leading empty cells on the left) independent of padchar.
|
||||
TabIndent
|
||||
|
||||
// Print a vertical bar ('|') between columns (after formatting).
|
||||
// Discarded columns appear as zero-width columns ("||").
|
||||
Debug
|
||||
|
||||
// Remember maximum widths seen per column even after Flush() is called.
|
||||
RememberWidths
|
||||
)
|
||||
|
||||
// A Writer must be initialized with a call to Init. The first parameter (output)
|
||||
// specifies the filter output. The remaining parameters control the formatting:
|
||||
//
|
||||
// minwidth minimal cell width including any padding
|
||||
// tabwidth width of tab characters (equivalent number of spaces)
|
||||
// padding padding added to a cell before computing its width
|
||||
// padchar ASCII char used for padding
|
||||
// if padchar == '\t', the Writer will assume that the
|
||||
// width of a '\t' in the formatted output is tabwidth,
|
||||
// and cells are left-aligned independent of align_left
|
||||
// (for correct-looking results, tabwidth must correspond
|
||||
// to the tab width in the viewer displaying the result)
|
||||
// flags formatting control
|
||||
//
|
||||
func (b *Writer) Init(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *Writer {
|
||||
if minwidth < 0 || tabwidth < 0 || padding < 0 {
|
||||
panic("negative minwidth, tabwidth, or padding")
|
||||
}
|
||||
b.output = output
|
||||
b.minwidth = minwidth
|
||||
b.tabwidth = tabwidth
|
||||
b.padding = padding
|
||||
for i := range b.padbytes {
|
||||
b.padbytes[i] = padchar
|
||||
}
|
||||
if padchar == '\t' {
|
||||
// tab padding enforces left-alignment
|
||||
flags &^= AlignRight
|
||||
}
|
||||
b.flags = flags
|
||||
|
||||
b.reset()
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
// debugging support (keep code around)
|
||||
func (b *Writer) dump() {
|
||||
pos := 0
|
||||
for i, line := range b.lines {
|
||||
print("(", i, ") ")
|
||||
for _, c := range line {
|
||||
print("[", string(b.buf[pos:pos+c.size]), "]")
|
||||
pos += c.size
|
||||
}
|
||||
print("\n")
|
||||
}
|
||||
print("\n")
|
||||
}
|
||||
|
||||
// local error wrapper so we can distinguish errors we want to return
|
||||
// as errors from genuine panics (which we don't want to return as errors)
|
||||
type osError struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func (b *Writer) write0(buf []byte) {
|
||||
n, err := b.output.Write(buf)
|
||||
if n != len(buf) && err == nil {
|
||||
err = io.ErrShortWrite
|
||||
}
|
||||
if err != nil {
|
||||
panic(osError{err})
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Writer) writeN(src []byte, n int) {
|
||||
for n > len(src) {
|
||||
b.write0(src)
|
||||
n -= len(src)
|
||||
}
|
||||
b.write0(src[0:n])
|
||||
}
|
||||
|
||||
var (
|
||||
newline = []byte{'\n'}
|
||||
tabs = []byte("\t\t\t\t\t\t\t\t")
|
||||
)
|
||||
|
||||
func (b *Writer) writePadding(textw, cellw int, useTabs bool) {
|
||||
if b.padbytes[0] == '\t' || useTabs {
|
||||
// padding is done with tabs
|
||||
if b.tabwidth == 0 {
|
||||
return // tabs have no width - can't do any padding
|
||||
}
|
||||
// make cellw the smallest multiple of b.tabwidth
|
||||
cellw = (cellw + b.tabwidth - 1) / b.tabwidth * b.tabwidth
|
||||
n := cellw - textw // amount of padding
|
||||
if n < 0 {
|
||||
panic("internal error")
|
||||
}
|
||||
b.writeN(tabs, (n+b.tabwidth-1)/b.tabwidth)
|
||||
return
|
||||
}
|
||||
|
||||
// padding is done with non-tab characters
|
||||
b.writeN(b.padbytes[0:], cellw-textw)
|
||||
}
|
||||
|
||||
var vbar = []byte{'|'}
|
||||
|
||||
func (b *Writer) writeLines(pos0 int, line0, line1 int) (pos int) {
|
||||
pos = pos0
|
||||
for i := line0; i < line1; i++ {
|
||||
line := b.lines[i]
|
||||
|
||||
// if TabIndent is set, use tabs to pad leading empty cells
|
||||
useTabs := b.flags&TabIndent != 0
|
||||
|
||||
for j, c := range line {
|
||||
if j > 0 && b.flags&Debug != 0 {
|
||||
// indicate column break
|
||||
b.write0(vbar)
|
||||
}
|
||||
|
||||
if c.size == 0 {
|
||||
// empty cell
|
||||
if j < len(b.widths) {
|
||||
b.writePadding(c.width, b.widths[j], useTabs)
|
||||
}
|
||||
} else {
|
||||
// non-empty cell
|
||||
useTabs = false
|
||||
if b.flags&AlignRight == 0 { // align left
|
||||
b.write0(b.buf[pos : pos+c.size])
|
||||
pos += c.size
|
||||
if j < len(b.widths) {
|
||||
b.writePadding(c.width, b.widths[j], false)
|
||||
}
|
||||
} else { // align right
|
||||
if j < len(b.widths) {
|
||||
b.writePadding(c.width, b.widths[j], false)
|
||||
}
|
||||
b.write0(b.buf[pos : pos+c.size])
|
||||
pos += c.size
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if i+1 == len(b.lines) {
|
||||
// last buffered line - we don't have a newline, so just write
|
||||
// any outstanding buffered data
|
||||
b.write0(b.buf[pos : pos+b.cell.size])
|
||||
pos += b.cell.size
|
||||
} else {
|
||||
// not the last line - write newline
|
||||
b.write0(newline)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Format the text between line0 and line1 (excluding line1); pos
|
||||
// is the buffer position corresponding to the beginning of line0.
|
||||
// Returns the buffer position corresponding to the beginning of
|
||||
// line1 and an error, if any.
|
||||
//
|
||||
func (b *Writer) format(pos0 int, line0, line1 int) (pos int) {
|
||||
pos = pos0
|
||||
column := len(b.widths)
|
||||
for this := line0; this < line1; this++ {
|
||||
line := b.lines[this]
|
||||
|
||||
if column >= len(line)-1 {
|
||||
continue
|
||||
}
|
||||
// cell exists in this column => this line
|
||||
// has more cells than the previous line
|
||||
// (the last cell per line is ignored because cells are
|
||||
// tab-terminated; the last cell per line describes the
|
||||
// text before the newline/formfeed and does not belong
|
||||
// to a column)
|
||||
|
||||
// print unprinted lines until beginning of block
|
||||
pos = b.writeLines(pos, line0, this)
|
||||
line0 = this
|
||||
|
||||
// column block begin
|
||||
width := b.minwidth // minimal column width
|
||||
discardable := true // true if all cells in this column are empty and "soft"
|
||||
for ; this < line1; this++ {
|
||||
line = b.lines[this]
|
||||
if column >= len(line)-1 {
|
||||
break
|
||||
}
|
||||
// cell exists in this column
|
||||
c := line[column]
|
||||
// update width
|
||||
if w := c.width + b.padding; w > width {
|
||||
width = w
|
||||
}
|
||||
// update discardable
|
||||
if c.width > 0 || c.htab {
|
||||
discardable = false
|
||||
}
|
||||
}
|
||||
// column block end
|
||||
|
||||
// discard empty columns if necessary
|
||||
if discardable && b.flags&DiscardEmptyColumns != 0 {
|
||||
width = 0
|
||||
}
|
||||
|
||||
if b.flags&RememberWidths != 0 {
|
||||
if len(b.maxwidths) < len(b.widths) {
|
||||
b.maxwidths = append(b.maxwidths, b.widths[len(b.maxwidths):]...)
|
||||
}
|
||||
|
||||
switch {
|
||||
case len(b.maxwidths) == len(b.widths):
|
||||
b.maxwidths = append(b.maxwidths, width)
|
||||
case b.maxwidths[len(b.widths)] > width:
|
||||
width = b.maxwidths[len(b.widths)]
|
||||
case b.maxwidths[len(b.widths)] < width:
|
||||
b.maxwidths[len(b.widths)] = width
|
||||
}
|
||||
}
|
||||
|
||||
// format and print all columns to the right of this column
|
||||
// (we know the widths of this column and all columns to the left)
|
||||
b.widths = append(b.widths, width) // push width
|
||||
pos = b.format(pos, line0, this)
|
||||
b.widths = b.widths[0 : len(b.widths)-1] // pop width
|
||||
line0 = this
|
||||
}
|
||||
|
||||
// print unprinted lines until end
|
||||
return b.writeLines(pos, line0, line1)
|
||||
}
|
||||
|
||||
// Append text to current cell.
|
||||
func (b *Writer) append(text []byte) {
|
||||
b.buf = append(b.buf, text...)
|
||||
b.cell.size += len(text)
|
||||
}
|
||||
|
||||
// Update the cell width.
|
||||
func (b *Writer) updateWidth() {
|
||||
b.cell.width += utf8.RuneCount(b.buf[b.pos:])
|
||||
b.pos = len(b.buf)
|
||||
}
|
||||
|
||||
// To escape a text segment, bracket it with Escape characters.
|
||||
// For instance, the tab in this string "Ignore this tab: \xff\t\xff"
|
||||
// does not terminate a cell and constitutes a single character of
|
||||
// width one for formatting purposes.
|
||||
//
|
||||
// The value 0xff was chosen because it cannot appear in a valid UTF-8 sequence.
|
||||
//
|
||||
const Escape = '\xff'
|
||||
|
||||
// Start escaped mode.
|
||||
func (b *Writer) startEscape(ch byte) {
|
||||
switch ch {
|
||||
case Escape:
|
||||
b.endChar = Escape
|
||||
case '<':
|
||||
b.endChar = '>'
|
||||
case '&':
|
||||
b.endChar = ';'
|
||||
}
|
||||
}
|
||||
|
||||
// Terminate escaped mode. If the escaped text was an HTML tag, its width
|
||||
// is assumed to be zero for formatting purposes; if it was an HTML entity,
|
||||
// its width is assumed to be one. In all other cases, the width is the
|
||||
// unicode width of the text.
|
||||
//
|
||||
func (b *Writer) endEscape() {
|
||||
switch b.endChar {
|
||||
case Escape:
|
||||
b.updateWidth()
|
||||
if b.flags&StripEscape == 0 {
|
||||
b.cell.width -= 2 // don't count the Escape chars
|
||||
}
|
||||
case '>': // tag of zero width
|
||||
case ';':
|
||||
b.cell.width++ // entity, count as one rune
|
||||
}
|
||||
b.pos = len(b.buf)
|
||||
b.endChar = 0
|
||||
}
|
||||
|
||||
// Terminate the current cell by adding it to the list of cells of the
|
||||
// current line. Returns the number of cells in that line.
|
||||
//
|
||||
func (b *Writer) terminateCell(htab bool) int {
|
||||
b.cell.htab = htab
|
||||
line := &b.lines[len(b.lines)-1]
|
||||
*line = append(*line, b.cell)
|
||||
b.cell = cell{}
|
||||
return len(*line)
|
||||
}
|
||||
|
||||
func handlePanic(err *error, op string) {
|
||||
if e := recover(); e != nil {
|
||||
if nerr, ok := e.(osError); ok {
|
||||
*err = nerr.err
|
||||
return
|
||||
}
|
||||
panic("tabwriter: panic during " + op)
|
||||
}
|
||||
}
|
||||
|
||||
// RememberedWidths returns a copy of the remembered per-column maximum widths.
|
||||
// Requires use of the RememberWidths flag, and is not threadsafe.
|
||||
func (b *Writer) RememberedWidths() []int {
|
||||
retval := make([]int, len(b.maxwidths))
|
||||
copy(retval, b.maxwidths)
|
||||
return retval
|
||||
}
|
||||
|
||||
// SetRememberedWidths sets the remembered per-column maximum widths.
|
||||
// Requires use of the RememberWidths flag, and is not threadsafe.
|
||||
func (b *Writer) SetRememberedWidths(widths []int) *Writer {
|
||||
b.maxwidths = make([]int, len(widths))
|
||||
copy(b.maxwidths, widths)
|
||||
return b
|
||||
}
|
||||
|
||||
// Flush should be called after the last call to Write to ensure
|
||||
// that any data buffered in the Writer is written to output. Any
|
||||
// incomplete escape sequence at the end is considered
|
||||
// complete for formatting purposes.
|
||||
func (b *Writer) Flush() error {
|
||||
return b.flush()
|
||||
}
|
||||
|
||||
func (b *Writer) flush() (err error) {
|
||||
defer b.reset() // even in the presence of errors
|
||||
defer handlePanic(&err, "Flush")
|
||||
|
||||
// add current cell if not empty
|
||||
if b.cell.size > 0 {
|
||||
if b.endChar != 0 {
|
||||
// inside escape - terminate it even if incomplete
|
||||
b.endEscape()
|
||||
}
|
||||
b.terminateCell(false)
|
||||
}
|
||||
|
||||
// format contents of buffer
|
||||
b.format(0, 0, len(b.lines))
|
||||
return nil
|
||||
}
|
||||
|
||||
var hbar = []byte("---\n")
|
||||
|
||||
// Write writes buf to the writer b.
|
||||
// The only errors returned are ones encountered
|
||||
// while writing to the underlying output stream.
|
||||
//
|
||||
func (b *Writer) Write(buf []byte) (n int, err error) {
|
||||
defer handlePanic(&err, "Write")
|
||||
|
||||
// split text into cells
|
||||
n = 0
|
||||
for i, ch := range buf {
|
||||
if b.endChar == 0 {
|
||||
// outside escape
|
||||
switch ch {
|
||||
case '\t', '\v', '\n', '\f':
|
||||
// end of cell
|
||||
b.append(buf[n:i])
|
||||
b.updateWidth()
|
||||
n = i + 1 // ch consumed
|
||||
ncells := b.terminateCell(ch == '\t')
|
||||
if ch == '\n' || ch == '\f' {
|
||||
// terminate line
|
||||
b.addLine(ch == '\f')
|
||||
if ch == '\f' || ncells == 1 {
|
||||
// A '\f' always forces a flush. Otherwise, if the previous
|
||||
// line has only one cell which does not have an impact on
|
||||
// the formatting of the following lines (the last cell per
|
||||
// line is ignored by format()), thus we can flush the
|
||||
// Writer contents.
|
||||
if err = b.Flush(); err != nil {
|
||||
return
|
||||
}
|
||||
if ch == '\f' && b.flags&Debug != 0 {
|
||||
// indicate section break
|
||||
b.write0(hbar)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
case Escape:
|
||||
// start of escaped sequence
|
||||
b.append(buf[n:i])
|
||||
b.updateWidth()
|
||||
n = i
|
||||
if b.flags&StripEscape != 0 {
|
||||
n++ // strip Escape
|
||||
}
|
||||
b.startEscape(Escape)
|
||||
|
||||
case '<', '&':
|
||||
// possibly an html tag/entity
|
||||
if b.flags&FilterHTML != 0 {
|
||||
// begin of tag/entity
|
||||
b.append(buf[n:i])
|
||||
b.updateWidth()
|
||||
n = i
|
||||
b.startEscape(ch)
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
// inside escape
|
||||
if ch == b.endChar {
|
||||
// end of tag/entity
|
||||
j := i + 1
|
||||
if ch == Escape && b.flags&StripEscape != 0 {
|
||||
j = i // strip Escape
|
||||
}
|
||||
b.append(buf[n:j])
|
||||
n = i + 1 // ch consumed
|
||||
b.endEscape()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// append leftover text
|
||||
b.append(buf[n:])
|
||||
n = len(buf)
|
||||
return
|
||||
}
|
||||
|
||||
// NewWriter allocates and initializes a new tabwriter.Writer.
|
||||
// The parameters are the same as for the Init function.
|
||||
//
|
||||
func NewWriter(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *Writer {
|
||||
return new(Writer).Init(output, minwidth, tabwidth, padding, padchar, flags)
|
||||
}
|
||||
|
|
@ -131,7 +131,7 @@ message MutatingWebhook {
|
|||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 11;
|
||||
|
||||
// SideEffects states whether this webhookk has side effects.
|
||||
// SideEffects states whether this webhook has side effects.
|
||||
// Acceptable values are: Unknown, None, Some, NoneOnDryRun
|
||||
// Webhooks with side effects MUST implement a reconciliation system, since a request may be
|
||||
// rejected by a future step in the admission change and the side effects therefore need to be undone.
|
||||
|
|
@ -385,7 +385,7 @@ message ValidatingWebhook {
|
|||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 10;
|
||||
|
||||
// SideEffects states whether this webhookk has side effects.
|
||||
// SideEffects states whether this webhook has side effects.
|
||||
// Acceptable values are: Unknown, None, Some, NoneOnDryRun
|
||||
// Webhooks with side effects MUST implement a reconciliation system, since a request may be
|
||||
// rejected by a future step in the admission change and the side effects therefore need to be undone.
|
||||
|
|
|
|||
|
|
@ -275,7 +275,7 @@ type ValidatingWebhook struct {
|
|||
// +optional
|
||||
ObjectSelector *metav1.LabelSelector `json:"objectSelector,omitempty" protobuf:"bytes,10,opt,name=objectSelector"`
|
||||
|
||||
// SideEffects states whether this webhookk has side effects.
|
||||
// SideEffects states whether this webhook has side effects.
|
||||
// Acceptable values are: Unknown, None, Some, NoneOnDryRun
|
||||
// Webhooks with side effects MUST implement a reconciliation system, since a request may be
|
||||
// rejected by a future step in the admission change and the side effects therefore need to be undone.
|
||||
|
|
@ -407,7 +407,7 @@ type MutatingWebhook struct {
|
|||
// +optional
|
||||
ObjectSelector *metav1.LabelSelector `json:"objectSelector,omitempty" protobuf:"bytes,11,opt,name=objectSelector"`
|
||||
|
||||
// SideEffects states whether this webhookk has side effects.
|
||||
// SideEffects states whether this webhook has side effects.
|
||||
// Acceptable values are: Unknown, None, Some, NoneOnDryRun
|
||||
// Webhooks with side effects MUST implement a reconciliation system, since a request may be
|
||||
// rejected by a future step in the admission change and the side effects therefore need to be undone.
|
||||
|
|
|
|||
|
|
@ -36,7 +36,7 @@ var map_MutatingWebhook = map[string]string{
|
|||
"matchPolicy": "matchPolicy defines how the \"rules\" list is used to match incoming requests. Allowed values are \"Exact\" or \"Equivalent\".\n\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook.\n\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook.\n\nDefaults to \"Exact\"",
|
||||
"namespaceSelector": "NamespaceSelector decides whether to run the webhook on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the webhook.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the webhook on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything.",
|
||||
"objectSelector": "ObjectSelector decides whether to run the webhook based on if the object has matching labels. objectSelector is evaluated against both the oldObject and newObject that would be sent to the webhook, and is considered to match if either object matches the selector. A null object (oldObject in the case of create, or newObject in the case of delete) or an object that cannot have labels (like a DeploymentRollback or a PodProxyOptions object) is not considered to match. Use the object selector only if the webhook is opt-in, because end users may skip the admission webhook by setting the labels. Default to the empty LabelSelector, which matches everything.",
|
||||
"sideEffects": "SideEffects states whether this webhookk has side effects. Acceptable values are: Unknown, None, Some, NoneOnDryRun Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission change and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some. Defaults to Unknown.",
|
||||
"sideEffects": "SideEffects states whether this webhook has side effects. Acceptable values are: Unknown, None, Some, NoneOnDryRun Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission change and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some. Defaults to Unknown.",
|
||||
"timeoutSeconds": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 30 seconds.",
|
||||
"admissionReviewVersions": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy. Default to `['v1beta1']`.",
|
||||
"reinvocationPolicy": "reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. Allowed values are \"Never\" and \"IfNeeded\".\n\nNever: the webhook will not be called more than once in a single admission evaluation.\n\nIfNeeded: the webhook will be called at least one additional time as part of the admission evaluation if the object being admitted is modified by other admission plugins after the initial webhook call. Webhooks that specify this option *must* be idempotent, able to process objects they previously admitted. Note: * the number of additional invocations is not guaranteed to be exactly one. * if additional invocations result in further modifications to the object, webhooks are not guaranteed to be invoked again. * webhooks that use this option may be reordered to minimize the number of additional invocations. * to validate an object after all mutations are guaranteed complete, use a validating admission webhook instead.\n\nDefaults to \"Never\".",
|
||||
|
|
@ -108,7 +108,7 @@ var map_ValidatingWebhook = map[string]string{
|
|||
"matchPolicy": "matchPolicy defines how the \"rules\" list is used to match incoming requests. Allowed values are \"Exact\" or \"Equivalent\".\n\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook.\n\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook.\n\nDefaults to \"Exact\"",
|
||||
"namespaceSelector": "NamespaceSelector decides whether to run the webhook on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the webhook.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the webhook on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything.",
|
||||
"objectSelector": "ObjectSelector decides whether to run the webhook based on if the object has matching labels. objectSelector is evaluated against both the oldObject and newObject that would be sent to the webhook, and is considered to match if either object matches the selector. A null object (oldObject in the case of create, or newObject in the case of delete) or an object that cannot have labels (like a DeploymentRollback or a PodProxyOptions object) is not considered to match. Use the object selector only if the webhook is opt-in, because end users may skip the admission webhook by setting the labels. Default to the empty LabelSelector, which matches everything.",
|
||||
"sideEffects": "SideEffects states whether this webhookk has side effects. Acceptable values are: Unknown, None, Some, NoneOnDryRun Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission change and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some. Defaults to Unknown.",
|
||||
"sideEffects": "SideEffects states whether this webhook has side effects. Acceptable values are: Unknown, None, Some, NoneOnDryRun Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission change and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some. Defaults to Unknown.",
|
||||
"timeoutSeconds": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 30 seconds.",
|
||||
"admissionReviewVersions": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy. Default to `['v1beta1']`.",
|
||||
}
|
||||
|
|
|
|||
|
|
@ -151,7 +151,7 @@ type ScaleStatus struct {
|
|||
// MetricSourceType indicates the type of metric.
|
||||
type MetricSourceType string
|
||||
|
||||
var (
|
||||
const (
|
||||
// ObjectMetricSourceType is a metric describing a kubernetes object
|
||||
// (for example, hits-per-second on an Ingress object).
|
||||
ObjectMetricSourceType MetricSourceType = "Object"
|
||||
|
|
@ -322,7 +322,7 @@ type MetricStatus struct {
|
|||
// a HorizontalPodAutoscaler.
|
||||
type HorizontalPodAutoscalerConditionType string
|
||||
|
||||
var (
|
||||
const (
|
||||
// ScalingActive indicates that the HPA controller is able to scale if necessary:
|
||||
// it's correctly configured, can fetch the desired metrics, and isn't disabled.
|
||||
ScalingActive HorizontalPodAutoscalerConditionType = "ScalingActive"
|
||||
|
|
|
|||
|
|
@ -62,7 +62,7 @@ type HorizontalPodAutoscalerSpec struct {
|
|||
// MetricSourceType indicates the type of metric.
|
||||
type MetricSourceType string
|
||||
|
||||
var (
|
||||
const (
|
||||
// ObjectMetricSourceType is a metric describing a kubernetes object
|
||||
// (for example, hits-per-second on an Ingress object).
|
||||
ObjectMetricSourceType MetricSourceType = "Object"
|
||||
|
|
@ -231,7 +231,7 @@ type HorizontalPodAutoscalerStatus struct {
|
|||
// a HorizontalPodAutoscaler.
|
||||
type HorizontalPodAutoscalerConditionType string
|
||||
|
||||
var (
|
||||
const (
|
||||
// ScalingActive indicates that the HPA controller is able to scale if necessary:
|
||||
// it's correctly configured, can fetch the desired metrics, and isn't disabled.
|
||||
ScalingActive HorizontalPodAutoscalerConditionType = "ScalingActive"
|
||||
|
|
|
|||
|
|
@ -120,7 +120,7 @@ type MetricSpec struct {
|
|||
// MetricSourceType indicates the type of metric.
|
||||
type MetricSourceType string
|
||||
|
||||
var (
|
||||
const (
|
||||
// ObjectMetricSourceType is a metric describing a kubernetes object
|
||||
// (for example, hits-per-second on an Ingress object).
|
||||
ObjectMetricSourceType MetricSourceType = "Object"
|
||||
|
|
@ -221,7 +221,7 @@ type MetricTarget struct {
|
|||
// "Value", "AverageValue", or "Utilization"
|
||||
type MetricTargetType string
|
||||
|
||||
var (
|
||||
const (
|
||||
// UtilizationMetricType declares a MetricTarget is an AverageUtilization value
|
||||
UtilizationMetricType MetricTargetType = "Utilization"
|
||||
// ValueMetricType declares a MetricTarget is a raw value
|
||||
|
|
@ -262,7 +262,7 @@ type HorizontalPodAutoscalerStatus struct {
|
|||
// a HorizontalPodAutoscaler.
|
||||
type HorizontalPodAutoscalerConditionType string
|
||||
|
||||
var (
|
||||
const (
|
||||
// ScalingActive indicates that the HPA controller is able to scale if necessary:
|
||||
// it's correctly configured, can fetch the desired metrics, and isn't disabled.
|
||||
ScalingActive HorizontalPodAutoscalerConditionType = "ScalingActive"
|
||||
|
|
|
|||
|
|
@ -129,27 +129,27 @@ type CertificateSigningRequestList struct {
|
|||
type KeyUsage string
|
||||
|
||||
const (
|
||||
UsageSigning KeyUsage = "signing"
|
||||
UsageDigitalSignature KeyUsage = "digital signature"
|
||||
UsageContentCommittment KeyUsage = "content commitment"
|
||||
UsageKeyEncipherment KeyUsage = "key encipherment"
|
||||
UsageKeyAgreement KeyUsage = "key agreement"
|
||||
UsageDataEncipherment KeyUsage = "data encipherment"
|
||||
UsageCertSign KeyUsage = "cert sign"
|
||||
UsageCRLSign KeyUsage = "crl sign"
|
||||
UsageEncipherOnly KeyUsage = "encipher only"
|
||||
UsageDecipherOnly KeyUsage = "decipher only"
|
||||
UsageAny KeyUsage = "any"
|
||||
UsageServerAuth KeyUsage = "server auth"
|
||||
UsageClientAuth KeyUsage = "client auth"
|
||||
UsageCodeSigning KeyUsage = "code signing"
|
||||
UsageEmailProtection KeyUsage = "email protection"
|
||||
UsageSMIME KeyUsage = "s/mime"
|
||||
UsageIPsecEndSystem KeyUsage = "ipsec end system"
|
||||
UsageIPsecTunnel KeyUsage = "ipsec tunnel"
|
||||
UsageIPsecUser KeyUsage = "ipsec user"
|
||||
UsageTimestamping KeyUsage = "timestamping"
|
||||
UsageOCSPSigning KeyUsage = "ocsp signing"
|
||||
UsageMicrosoftSGC KeyUsage = "microsoft sgc"
|
||||
UsageNetscapSGC KeyUsage = "netscape sgc"
|
||||
UsageSigning KeyUsage = "signing"
|
||||
UsageDigitalSignature KeyUsage = "digital signature"
|
||||
UsageContentCommitment KeyUsage = "content commitment"
|
||||
UsageKeyEncipherment KeyUsage = "key encipherment"
|
||||
UsageKeyAgreement KeyUsage = "key agreement"
|
||||
UsageDataEncipherment KeyUsage = "data encipherment"
|
||||
UsageCertSign KeyUsage = "cert sign"
|
||||
UsageCRLSign KeyUsage = "crl sign"
|
||||
UsageEncipherOnly KeyUsage = "encipher only"
|
||||
UsageDecipherOnly KeyUsage = "decipher only"
|
||||
UsageAny KeyUsage = "any"
|
||||
UsageServerAuth KeyUsage = "server auth"
|
||||
UsageClientAuth KeyUsage = "client auth"
|
||||
UsageCodeSigning KeyUsage = "code signing"
|
||||
UsageEmailProtection KeyUsage = "email protection"
|
||||
UsageSMIME KeyUsage = "s/mime"
|
||||
UsageIPsecEndSystem KeyUsage = "ipsec end system"
|
||||
UsageIPsecTunnel KeyUsage = "ipsec tunnel"
|
||||
UsageIPsecUser KeyUsage = "ipsec user"
|
||||
UsageTimestamping KeyUsage = "timestamping"
|
||||
UsageOCSPSigning KeyUsage = "ocsp signing"
|
||||
UsageMicrosoftSGC KeyUsage = "microsoft sgc"
|
||||
UsageNetscapeSGC KeyUsage = "netscape sgc"
|
||||
)
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
|
|
@ -1142,7 +1142,7 @@ message EnvVar {
|
|||
// EnvVarSource represents a source for the value of an EnvVar.
|
||||
message EnvVarSource {
|
||||
// Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations,
|
||||
// spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP.
|
||||
// spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
|
||||
// +optional
|
||||
optional ObjectFieldSelector fieldRef = 1;
|
||||
|
||||
|
|
@ -3145,6 +3145,15 @@ message PodLogOptions {
|
|||
// slightly more or slightly less than the specified limit.
|
||||
// +optional
|
||||
optional int64 limitBytes = 8;
|
||||
|
||||
// insecureSkipTLSVerifyBackend indicates that the apiserver should not confirm the validity of the
|
||||
// serving certificate of the backend it is connecting to. This will make the HTTPS connection between the apiserver
|
||||
// and the backend insecure. This means the apiserver cannot verify the log data it is receiving came from the real
|
||||
// kubelet. If the kubelet is configured to verify the apiserver's TLS credentials, it does not mean the
|
||||
// connection to the real kubelet is vulnerable to a man in the middle attack (e.g. an attacker could not intercept
|
||||
// the actual log data coming from the real kubelet).
|
||||
// +optional
|
||||
optional bool insecureSkipTLSVerifyBackend = 9;
|
||||
}
|
||||
|
||||
// PodPortForwardOptions is the query options to a Pod's port forward call
|
||||
|
|
@ -3375,7 +3384,6 @@ message PodSpec {
|
|||
// in the same pod, and the first process in each container will not be assigned PID 1.
|
||||
// HostPID and ShareProcessNamespace cannot both be set.
|
||||
// Optional: Default to false.
|
||||
// This field is beta-level and may be disabled with the PodShareProcessNamespace feature.
|
||||
// +k8s:conversion-gen=false
|
||||
// +optional
|
||||
optional bool shareProcessNamespace = 27;
|
||||
|
|
@ -4733,6 +4741,21 @@ message ServiceSpec {
|
|||
// cluster (e.g. IPv6 in IPv4 only cluster) is an error condition and will fail during clusterIP assignment.
|
||||
// +optional
|
||||
optional string ipFamily = 15;
|
||||
|
||||
// topologyKeys is a preference-order list of topology keys which
|
||||
// implementations of services should use to preferentially sort endpoints
|
||||
// when accessing this Service, it can not be used at the same time as
|
||||
// externalTrafficPolicy=Local.
|
||||
// Topology keys must be valid label keys and at most 16 keys may be specified.
|
||||
// Endpoints are chosen based on the first topology key with available backends.
|
||||
// If this field is specified and all entries have no backends that match
|
||||
// the topology of the client, the service has no backends for that client
|
||||
// and connections should fail.
|
||||
// The special value "*" may be used to mean "any topology". This catch-all
|
||||
// value, if used, only makes sense as the last value in the list.
|
||||
// If this is not specified or empty, no topology constraints will be applied.
|
||||
// +optional
|
||||
repeated string topologyKeys = 16;
|
||||
}
|
||||
|
||||
// ServiceStatus represents the current status of a service.
|
||||
|
|
@ -5032,7 +5055,6 @@ message VolumeMount {
|
|||
// Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment.
|
||||
// Defaults to "" (volume's root).
|
||||
// SubPathExpr and SubPath are mutually exclusive.
|
||||
// This field is beta in 1.15.
|
||||
// +optional
|
||||
optional string subPathExpr = 6;
|
||||
}
|
||||
|
|
@ -5249,7 +5271,7 @@ message WindowsSecurityContextOptions {
|
|||
// Defaults to the user specified in image metadata if unspecified.
|
||||
// May also be set in PodSecurityContext. If set in both SecurityContext and
|
||||
// PodSecurityContext, the value specified in SecurityContext takes precedence.
|
||||
// This field is alpha-level and it is only honored by servers that enable the WindowsRunAsUserName feature flag.
|
||||
// This field is beta-level and may be disabled with the WindowsRunAsUserName feature flag.
|
||||
// +optional
|
||||
optional string runAsUserName = 3;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -30,6 +30,8 @@ const (
|
|||
NamespaceAll string = ""
|
||||
// NamespaceNodeLease is the namespace where we place node lease objects (used for node heartbeats)
|
||||
NamespaceNodeLease string = "kube-node-lease"
|
||||
// TopologyKeyAny is the service topology key that matches any node
|
||||
TopologyKeyAny string = "*"
|
||||
)
|
||||
|
||||
// Volume represents a named volume in a pod that may be accessed by any container in the pod.
|
||||
|
|
@ -1784,7 +1786,6 @@ type VolumeMount struct {
|
|||
// Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment.
|
||||
// Defaults to "" (volume's root).
|
||||
// SubPathExpr and SubPath are mutually exclusive.
|
||||
// This field is beta in 1.15.
|
||||
// +optional
|
||||
SubPathExpr string `json:"subPathExpr,omitempty" protobuf:"bytes,6,opt,name=subPathExpr"`
|
||||
}
|
||||
|
|
@ -1847,7 +1848,7 @@ type EnvVar struct {
|
|||
// EnvVarSource represents a source for the value of an EnvVar.
|
||||
type EnvVarSource struct {
|
||||
// Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations,
|
||||
// spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP.
|
||||
// spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
|
||||
// +optional
|
||||
FieldRef *ObjectFieldSelector `json:"fieldRef,omitempty" protobuf:"bytes,1,opt,name=fieldRef"`
|
||||
// Selects a resource of the container: only resources limits and requests
|
||||
|
|
@ -2941,7 +2942,6 @@ type PodSpec struct {
|
|||
// in the same pod, and the first process in each container will not be assigned PID 1.
|
||||
// HostPID and ShareProcessNamespace cannot both be set.
|
||||
// Optional: Default to false.
|
||||
// This field is beta-level and may be disabled with the PodShareProcessNamespace feature.
|
||||
// +k8s:conversion-gen=false
|
||||
// +optional
|
||||
ShareProcessNamespace *bool `json:"shareProcessNamespace,omitempty" protobuf:"varint,27,opt,name=shareProcessNamespace"`
|
||||
|
|
@ -3828,6 +3828,8 @@ const (
|
|||
IPv4Protocol IPFamily = "IPv4"
|
||||
// IPv6Protocol indicates that this IP is IPv6 protocol
|
||||
IPv6Protocol IPFamily = "IPv6"
|
||||
// MaxServiceTopologyKeys is the largest number of topology keys allowed on a service
|
||||
MaxServiceTopologyKeys = 16
|
||||
)
|
||||
|
||||
// ServiceSpec describes the attributes that a user creates on a service.
|
||||
|
|
@ -3942,6 +3944,7 @@ type ServiceSpec struct {
|
|||
// of peer discovery.
|
||||
// +optional
|
||||
PublishNotReadyAddresses bool `json:"publishNotReadyAddresses,omitempty" protobuf:"varint,13,opt,name=publishNotReadyAddresses"`
|
||||
|
||||
// sessionAffinityConfig contains the configurations of session affinity.
|
||||
// +optional
|
||||
SessionAffinityConfig *SessionAffinityConfig `json:"sessionAffinityConfig,omitempty" protobuf:"bytes,14,opt,name=sessionAffinityConfig"`
|
||||
|
|
@ -3955,6 +3958,21 @@ type ServiceSpec struct {
|
|||
// cluster (e.g. IPv6 in IPv4 only cluster) is an error condition and will fail during clusterIP assignment.
|
||||
// +optional
|
||||
IPFamily *IPFamily `json:"ipFamily,omitempty" protobuf:"bytes,15,opt,name=ipFamily,Configcasttype=IPFamily"`
|
||||
|
||||
// topologyKeys is a preference-order list of topology keys which
|
||||
// implementations of services should use to preferentially sort endpoints
|
||||
// when accessing this Service, it can not be used at the same time as
|
||||
// externalTrafficPolicy=Local.
|
||||
// Topology keys must be valid label keys and at most 16 keys may be specified.
|
||||
// Endpoints are chosen based on the first topology key with available backends.
|
||||
// If this field is specified and all entries have no backends that match
|
||||
// the topology of the client, the service has no backends for that client
|
||||
// and connections should fail.
|
||||
// The special value "*" may be used to mean "any topology". This catch-all
|
||||
// value, if used, only makes sense as the last value in the list.
|
||||
// If this is not specified or empty, no topology constraints will be applied.
|
||||
// +optional
|
||||
TopologyKeys []string `json:"topologyKeys,omitempty" protobuf:"bytes,16,opt,name=topologyKeys"`
|
||||
}
|
||||
|
||||
// ServicePort contains information on service's port.
|
||||
|
|
@ -4233,7 +4251,7 @@ type NodeSpec struct {
|
|||
// Deprecated. Not all kubelets will set this field. Remove field after 1.13.
|
||||
// see: https://issues.k8s.io/61966
|
||||
// +optional
|
||||
DoNotUse_ExternalID string `json:"externalID,omitempty" protobuf:"bytes,2,opt,name=externalID"`
|
||||
DoNotUseExternalID string `json:"externalID,omitempty" protobuf:"bytes,2,opt,name=externalID"`
|
||||
}
|
||||
|
||||
// NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil.
|
||||
|
|
@ -4660,6 +4678,12 @@ const (
|
|||
NamespaceTerminating NamespacePhase = "Terminating"
|
||||
)
|
||||
|
||||
const (
|
||||
// NamespaceTerminatingCause is returned as a defaults.cause item when a change is
|
||||
// forbidden due to the namespace being terminated.
|
||||
NamespaceTerminatingCause metav1.CauseType = "NamespaceTerminating"
|
||||
)
|
||||
|
||||
type NamespaceConditionType string
|
||||
|
||||
// These are valid conditions of a namespace.
|
||||
|
|
@ -4670,6 +4694,10 @@ const (
|
|||
NamespaceDeletionContentFailure NamespaceConditionType = "NamespaceDeletionContentFailure"
|
||||
// NamespaceDeletionGVParsingFailure contains information about namespace deleter errors parsing GV for legacy types.
|
||||
NamespaceDeletionGVParsingFailure NamespaceConditionType = "NamespaceDeletionGroupVersionParsingFailure"
|
||||
// NamespaceContentRemaining contains information about resources remaining in a namespace.
|
||||
NamespaceContentRemaining NamespaceConditionType = "NamespaceContentRemaining"
|
||||
// NamespaceFinalizersRemaining contains information about which finalizers are on resources remaining in a namespace.
|
||||
NamespaceFinalizersRemaining NamespaceConditionType = "NamespaceFinalizersRemaining"
|
||||
)
|
||||
|
||||
// NamespaceCondition contains details about state of namespace.
|
||||
|
|
@ -4765,6 +4793,7 @@ type Preconditions struct {
|
|||
UID *types.UID `json:"uid,omitempty" protobuf:"bytes,1,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"`
|
||||
}
|
||||
|
||||
// +k8s:conversion-gen:explicit-from=net/url.Values
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// PodLogOptions is the query options for a Pod's logs REST call.
|
||||
|
|
@ -4805,8 +4834,18 @@ type PodLogOptions struct {
|
|||
// slightly more or slightly less than the specified limit.
|
||||
// +optional
|
||||
LimitBytes *int64 `json:"limitBytes,omitempty" protobuf:"varint,8,opt,name=limitBytes"`
|
||||
|
||||
// insecureSkipTLSVerifyBackend indicates that the apiserver should not confirm the validity of the
|
||||
// serving certificate of the backend it is connecting to. This will make the HTTPS connection between the apiserver
|
||||
// and the backend insecure. This means the apiserver cannot verify the log data it is receiving came from the real
|
||||
// kubelet. If the kubelet is configured to verify the apiserver's TLS credentials, it does not mean the
|
||||
// connection to the real kubelet is vulnerable to a man in the middle attack (e.g. an attacker could not intercept
|
||||
// the actual log data coming from the real kubelet).
|
||||
// +optional
|
||||
InsecureSkipTLSVerifyBackend bool `json:"insecureSkipTLSVerifyBackend,omitempty" protobuf:"varint,9,opt,name=insecureSkipTLSVerifyBackend"`
|
||||
}
|
||||
|
||||
// +k8s:conversion-gen:explicit-from=net/url.Values
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// PodAttachOptions is the query options to a Pod's remote attach call.
|
||||
|
|
@ -4844,6 +4883,7 @@ type PodAttachOptions struct {
|
|||
Container string `json:"container,omitempty" protobuf:"bytes,5,opt,name=container"`
|
||||
}
|
||||
|
||||
// +k8s:conversion-gen:explicit-from=net/url.Values
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// PodExecOptions is the query options to a Pod's remote exec call.
|
||||
|
|
@ -4882,6 +4922,7 @@ type PodExecOptions struct {
|
|||
Command []string `json:"command" protobuf:"bytes,6,rep,name=command"`
|
||||
}
|
||||
|
||||
// +k8s:conversion-gen:explicit-from=net/url.Values
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// PodPortForwardOptions is the query options to a Pod's port forward call
|
||||
|
|
@ -4899,6 +4940,7 @@ type PodPortForwardOptions struct {
|
|||
Ports []int32 `json:"ports,omitempty" protobuf:"varint,1,rep,name=ports"`
|
||||
}
|
||||
|
||||
// +k8s:conversion-gen:explicit-from=net/url.Values
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// PodProxyOptions is the query options to a Pod's proxy call.
|
||||
|
|
@ -4910,6 +4952,7 @@ type PodProxyOptions struct {
|
|||
Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"`
|
||||
}
|
||||
|
||||
// +k8s:conversion-gen:explicit-from=net/url.Values
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// NodeProxyOptions is the query options to a Node's proxy call.
|
||||
|
|
@ -4921,6 +4964,7 @@ type NodeProxyOptions struct {
|
|||
Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"`
|
||||
}
|
||||
|
||||
// +k8s:conversion-gen:explicit-from=net/url.Values
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// ServiceProxyOptions is the query options to a Service's proxy call.
|
||||
|
|
@ -5764,7 +5808,7 @@ type WindowsSecurityContextOptions struct {
|
|||
// Defaults to the user specified in image metadata if unspecified.
|
||||
// May also be set in PodSecurityContext. If set in both SecurityContext and
|
||||
// PodSecurityContext, the value specified in SecurityContext takes precedence.
|
||||
// This field is alpha-level and it is only honored by servers that enable the WindowsRunAsUserName feature flag.
|
||||
// This field is beta-level and may be disabled with the WindowsRunAsUserName feature flag.
|
||||
// +optional
|
||||
RunAsUserName *string `json:"runAsUserName,omitempty" protobuf:"bytes,3,opt,name=runAsUserName"`
|
||||
}
|
||||
|
|
|
|||
|
|
@ -566,7 +566,7 @@ func (EnvVar) SwaggerDoc() map[string]string {
|
|||
|
||||
var map_EnvVarSource = map[string]string{
|
||||
"": "EnvVarSource represents a source for the value of an EnvVar.",
|
||||
"fieldRef": "Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP.",
|
||||
"fieldRef": "Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.",
|
||||
"resourceFieldRef": "Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.",
|
||||
"configMapKeyRef": "Selects a key of a ConfigMap.",
|
||||
"secretKeyRef": "Selects a key of a secret in the pod's namespace",
|
||||
|
|
@ -1528,15 +1528,16 @@ func (PodList) SwaggerDoc() map[string]string {
|
|||
}
|
||||
|
||||
var map_PodLogOptions = map[string]string{
|
||||
"": "PodLogOptions is the query options for a Pod's logs REST call.",
|
||||
"container": "The container for which to stream logs. Defaults to only container if there is one container in the pod.",
|
||||
"follow": "Follow the log stream of the pod. Defaults to false.",
|
||||
"previous": "Return previous terminated container logs. Defaults to false.",
|
||||
"sinceSeconds": "A relative time in seconds before the current time from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.",
|
||||
"sinceTime": "An RFC3339 timestamp from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.",
|
||||
"timestamps": "If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line of log output. Defaults to false.",
|
||||
"tailLines": "If set, the number of lines from the end of the logs to show. If not specified, logs are shown from the creation of the container or sinceSeconds or sinceTime",
|
||||
"limitBytes": "If set, the number of bytes to read from the server before terminating the log output. This may not display a complete final line of logging, and may return slightly more or slightly less than the specified limit.",
|
||||
"": "PodLogOptions is the query options for a Pod's logs REST call.",
|
||||
"container": "The container for which to stream logs. Defaults to only container if there is one container in the pod.",
|
||||
"follow": "Follow the log stream of the pod. Defaults to false.",
|
||||
"previous": "Return previous terminated container logs. Defaults to false.",
|
||||
"sinceSeconds": "A relative time in seconds before the current time from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.",
|
||||
"sinceTime": "An RFC3339 timestamp from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.",
|
||||
"timestamps": "If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line of log output. Defaults to false.",
|
||||
"tailLines": "If set, the number of lines from the end of the logs to show. If not specified, logs are shown from the creation of the container or sinceSeconds or sinceTime",
|
||||
"limitBytes": "If set, the number of bytes to read from the server before terminating the log output. This may not display a complete final line of logging, and may return slightly more or slightly less than the specified limit.",
|
||||
"insecureSkipTLSVerifyBackend": "insecureSkipTLSVerifyBackend indicates that the apiserver should not confirm the validity of the serving certificate of the backend it is connecting to. This will make the HTTPS connection between the apiserver and the backend insecure. This means the apiserver cannot verify the log data it is receiving came from the real kubelet. If the kubelet is configured to verify the apiserver's TLS credentials, it does not mean the connection to the real kubelet is vulnerable to a man in the middle attack (e.g. an attacker could not intercept the actual log data coming from the real kubelet).",
|
||||
}
|
||||
|
||||
func (PodLogOptions) SwaggerDoc() map[string]string {
|
||||
|
|
@ -1613,7 +1614,7 @@ var map_PodSpec = map[string]string{
|
|||
"hostNetwork": "Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.",
|
||||
"hostPID": "Use the host's pid namespace. Optional: Default to false.",
|
||||
"hostIPC": "Use the host's ipc namespace. Optional: Default to false.",
|
||||
"shareProcessNamespace": "Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false. This field is beta-level and may be disabled with the PodShareProcessNamespace feature.",
|
||||
"shareProcessNamespace": "Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false.",
|
||||
"securityContext": "SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.",
|
||||
"imagePullSecrets": "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod",
|
||||
"hostname": "Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.",
|
||||
|
|
@ -2203,6 +2204,7 @@ var map_ServiceSpec = map[string]string{
|
|||
"publishNotReadyAddresses": "publishNotReadyAddresses, when set to true, indicates that DNS implementations must publish the notReadyAddresses of subsets for the Endpoints associated with the Service. The default value is false. The primary use case for setting this field is to use a StatefulSet's Headless Service to propagate SRV records for its Pods without respect to their readiness for purpose of peer discovery.",
|
||||
"sessionAffinityConfig": "sessionAffinityConfig contains the configurations of session affinity.",
|
||||
"ipFamily": "ipFamily specifies whether this Service has a preference for a particular IP family (e.g. IPv4 vs. IPv6). If a specific IP family is requested, the clusterIP field will be allocated from that family, if it is available in the cluster. If no IP family is requested, the cluster's primary IP family will be used. Other IP fields (loadBalancerIP, loadBalancerSourceRanges, externalIPs) and controllers which allocate external load-balancers should use the same IP family. Endpoints for this Service will be of this family. This field is immutable after creation. Assigning a ServiceIPFamily not available in the cluster (e.g. IPv6 in IPv4 only cluster) is an error condition and will fail during clusterIP assignment.",
|
||||
"topologyKeys": "topologyKeys is a preference-order list of topology keys which implementations of services should use to preferentially sort endpoints when accessing this Service, it can not be used at the same time as externalTrafficPolicy=Local. Topology keys must be valid label keys and at most 16 keys may be specified. Endpoints are chosen based on the first topology key with available backends. If this field is specified and all entries have no backends that match the topology of the client, the service has no backends for that client and connections should fail. The special value \"*\" may be used to mean \"any topology\". This catch-all value, if used, only makes sense as the last value in the list. If this is not specified or empty, no topology constraints will be applied.",
|
||||
}
|
||||
|
||||
func (ServiceSpec) SwaggerDoc() map[string]string {
|
||||
|
|
@ -2366,7 +2368,7 @@ var map_VolumeMount = map[string]string{
|
|||
"mountPath": "Path within the container at which the volume should be mounted. Must not contain ':'.",
|
||||
"subPath": "Path within the volume from which the container's volume should be mounted. Defaults to \"\" (volume's root).",
|
||||
"mountPropagation": "mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.",
|
||||
"subPathExpr": "Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to \"\" (volume's root). SubPathExpr and SubPath are mutually exclusive. This field is beta in 1.15.",
|
||||
"subPathExpr": "Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to \"\" (volume's root). SubPathExpr and SubPath are mutually exclusive.",
|
||||
}
|
||||
|
||||
func (VolumeMount) SwaggerDoc() map[string]string {
|
||||
|
|
@ -2456,7 +2458,7 @@ var map_WindowsSecurityContextOptions = map[string]string{
|
|||
"": "WindowsSecurityContextOptions contain Windows-specific options and credentials.",
|
||||
"gmsaCredentialSpecName": "GMSACredentialSpecName is the name of the GMSA credential spec to use. This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag.",
|
||||
"gmsaCredentialSpec": "GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag.",
|
||||
"runAsUserName": "The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. This field is alpha-level and it is only honored by servers that enable the WindowsRunAsUserName feature flag.",
|
||||
"runAsUserName": "The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. This field is beta-level and may be disabled with the WindowsRunAsUserName feature flag.",
|
||||
}
|
||||
|
||||
func (WindowsSecurityContextOptions) SwaggerDoc() map[string]string {
|
||||
|
|
|
|||
|
|
@ -17,15 +17,23 @@ limitations under the License.
|
|||
package v1
|
||||
|
||||
const (
|
||||
LabelHostname = "kubernetes.io/hostname"
|
||||
LabelZoneFailureDomain = "failure-domain.beta.kubernetes.io/zone"
|
||||
LabelZoneRegion = "failure-domain.beta.kubernetes.io/region"
|
||||
LabelHostname = "kubernetes.io/hostname"
|
||||
|
||||
LabelInstanceType = "beta.kubernetes.io/instance-type"
|
||||
LabelZoneFailureDomain = "failure-domain.beta.kubernetes.io/zone"
|
||||
LabelZoneRegion = "failure-domain.beta.kubernetes.io/region"
|
||||
LabelZoneFailureDomainStable = "topology.kubernetes.io/zone"
|
||||
LabelZoneRegionStable = "topology.kubernetes.io/region"
|
||||
|
||||
LabelInstanceType = "beta.kubernetes.io/instance-type"
|
||||
LabelInstanceTypeStable = "node.kubernetes.io/instance-type"
|
||||
|
||||
LabelOSStable = "kubernetes.io/os"
|
||||
LabelArchStable = "kubernetes.io/arch"
|
||||
|
||||
// LabelWindowsBuild is used on Windows nodes to specify the Windows build number starting with v1.17.0.
|
||||
// It's in the format MajorVersion.MinorVersion.BuildNumber (for ex: 10.0.17763)
|
||||
LabelWindowsBuild = "node.kubernetes.io/windows-build"
|
||||
|
||||
// LabelNamespaceSuffixKubelet is an allowed label namespace suffix kubelets can self-set ([*.]kubelet.kubernetes.io/*)
|
||||
LabelNamespaceSuffixKubelet = "kubelet.kubernetes.io"
|
||||
// LabelNamespaceSuffixNode is an allowed label namespace suffix kubelets can self-set ([*.]node.kubernetes.io/*)
|
||||
|
|
|
|||
|
|
@ -0,0 +1,55 @@
|
|||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1
|
||||
|
||||
const (
|
||||
// TaintNodeNotReady will be added when node is not ready
|
||||
// and feature-gate for TaintBasedEvictions flag is enabled,
|
||||
// and removed when node becomes ready.
|
||||
TaintNodeNotReady = "node.kubernetes.io/not-ready"
|
||||
|
||||
// TaintNodeUnreachable will be added when node becomes unreachable
|
||||
// (corresponding to NodeReady status ConditionUnknown)
|
||||
// and feature-gate for TaintBasedEvictions flag is enabled,
|
||||
// and removed when node becomes reachable (NodeReady status ConditionTrue).
|
||||
TaintNodeUnreachable = "node.kubernetes.io/unreachable"
|
||||
|
||||
// TaintNodeUnschedulable will be added when node becomes unschedulable
|
||||
// and feature-gate for TaintNodesByCondition flag is enabled,
|
||||
// and removed when node becomes scheduable.
|
||||
TaintNodeUnschedulable = "node.kubernetes.io/unschedulable"
|
||||
|
||||
// TaintNodeMemoryPressure will be added when node has memory pressure
|
||||
// and feature-gate for TaintNodesByCondition flag is enabled,
|
||||
// and removed when node has enough memory.
|
||||
TaintNodeMemoryPressure = "node.kubernetes.io/memory-pressure"
|
||||
|
||||
// TaintNodeDiskPressure will be added when node has disk pressure
|
||||
// and feature-gate for TaintNodesByCondition flag is enabled,
|
||||
// and removed when node has enough disk.
|
||||
TaintNodeDiskPressure = "node.kubernetes.io/disk-pressure"
|
||||
|
||||
// TaintNodeNetworkUnavailable will be added when node's network is unavailable
|
||||
// and feature-gate for TaintNodesByCondition flag is enabled,
|
||||
// and removed when network becomes ready.
|
||||
TaintNodeNetworkUnavailable = "node.kubernetes.io/network-unavailable"
|
||||
|
||||
// TaintNodePIDPressure will be added when node has pid pressure
|
||||
// and feature-gate for TaintNodesByCondition flag is enabled,
|
||||
// and removed when node has enough disk.
|
||||
TaintNodePIDPressure = "node.kubernetes.io/pid-pressure"
|
||||
)
|
||||
|
|
@ -5186,6 +5186,11 @@ func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) {
|
|||
*out = new(IPFamily)
|
||||
**out = **in
|
||||
}
|
||||
if in.TopologyKeys != nil {
|
||||
in, out := &in.TopologyKeys, &out.TopologyKeys
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -200,53 +200,54 @@ func init() {
|
|||
}
|
||||
|
||||
var fileDescriptor_772f83c5b34e07a5 = []byte{
|
||||
// 728 bytes of a gzipped FileDescriptorProto
|
||||
// 746 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0x4b, 0x6f, 0xd3, 0x4a,
|
||||
0x14, 0x8e, 0x9b, 0x5a, 0xb2, 0x27, 0x8d, 0xd4, 0x8e, 0xee, 0x22, 0xca, 0xbd, 0xd7, 0x8e, 0xc2,
|
||||
0x82, 0x48, 0x85, 0x31, 0xa9, 0x28, 0xaa, 0x60, 0x43, 0x8d, 0xca, 0x43, 0xe2, 0x11, 0x86, 0x2e,
|
||||
0x10, 0x62, 0xc1, 0xc4, 0x9e, 0x3a, 0x26, 0x89, 0xc7, 0xb2, 0x27, 0x91, 0xb2, 0xe3, 0x27, 0x20,
|
||||
0xf1, 0x77, 0x58, 0xb2, 0xe8, 0xb2, 0xcb, 0xae, 0x0c, 0x35, 0xff, 0xa2, 0x2b, 0x34, 0xe3, 0x57,
|
||||
0x4a, 0x78, 0x64, 0x37, 0xe7, 0x9b, 0xf3, 0x7d, 0xe7, 0x9c, 0x6f, 0xce, 0x80, 0x87, 0xe3, 0x83,
|
||||
0x18, 0xf9, 0xcc, 0x1a, 0xcf, 0x86, 0x34, 0x0a, 0x28, 0xa7, 0xb1, 0x35, 0xa7, 0x81, 0xcb, 0x22,
|
||||
0x2b, 0xbf, 0x20, 0xa1, 0x6f, 0xb9, 0x7e, 0xec, 0xb0, 0x39, 0x8d, 0x16, 0xd6, 0xbc, 0x4f, 0x26,
|
||||
0xe1, 0x88, 0xf4, 0x2d, 0x8f, 0x06, 0x34, 0x22, 0x9c, 0xba, 0x28, 0x8c, 0x18, 0x67, 0xf0, 0xff,
|
||||
0x2c, 0x1d, 0x91, 0xd0, 0x47, 0x65, 0x3a, 0x2a, 0xd2, 0xdb, 0x37, 0x3d, 0x9f, 0x8f, 0x66, 0x43,
|
||||
0xe4, 0xb0, 0xa9, 0xe5, 0x31, 0x8f, 0x59, 0x92, 0x35, 0x9c, 0x9d, 0xc8, 0x48, 0x06, 0xf2, 0x94,
|
||||
0xa9, 0xb5, 0xbb, 0x4b, 0xc5, 0x1d, 0x16, 0x51, 0x6b, 0xbe, 0x52, 0xb1, 0x7d, 0xbb, 0xca, 0x99,
|
||||
0x12, 0x67, 0xe4, 0x07, 0xa2, 0xbf, 0x70, 0xec, 0x09, 0x20, 0xb6, 0xa6, 0x94, 0x93, 0x5f, 0xb1,
|
||||
0xac, 0xdf, 0xb1, 0xa2, 0x59, 0xc0, 0xfd, 0x29, 0x5d, 0x21, 0xdc, 0xf9, 0x1b, 0x21, 0x76, 0x46,
|
||||
0x74, 0x4a, 0x7e, 0xe6, 0x75, 0x3f, 0xd7, 0x81, 0x76, 0x14, 0xb8, 0x21, 0xf3, 0x03, 0x0e, 0x77,
|
||||
0x81, 0x4e, 0x5c, 0x37, 0xa2, 0x71, 0x4c, 0xe3, 0x96, 0xd2, 0xa9, 0xf7, 0x74, 0xbb, 0x99, 0x26,
|
||||
0xa6, 0x7e, 0x58, 0x80, 0xb8, 0xba, 0x87, 0x14, 0x00, 0x87, 0x05, 0xae, 0xcf, 0x7d, 0x16, 0xc4,
|
||||
0xad, 0x8d, 0x8e, 0xd2, 0x6b, 0xec, 0xf5, 0xd1, 0x1f, 0xfd, 0x45, 0x45, 0xa5, 0x07, 0x25, 0xd1,
|
||||
0x86, 0xa7, 0x89, 0x59, 0x4b, 0x13, 0x13, 0x54, 0x18, 0x5e, 0x12, 0x86, 0x3d, 0xa0, 0x8d, 0x58,
|
||||
0xcc, 0x03, 0x32, 0xa5, 0xad, 0x7a, 0x47, 0xe9, 0xe9, 0xf6, 0x56, 0x9a, 0x98, 0xda, 0xe3, 0x1c,
|
||||
0xc3, 0xe5, 0x2d, 0x1c, 0x00, 0x9d, 0x93, 0xc8, 0xa3, 0x1c, 0xd3, 0x93, 0xd6, 0xa6, 0xec, 0xe7,
|
||||
0xda, 0x72, 0x3f, 0xe2, 0x85, 0xd0, 0xbc, 0x8f, 0x5e, 0x0c, 0xdf, 0x53, 0x47, 0x24, 0xd1, 0x88,
|
||||
0x06, 0x0e, 0xcd, 0x46, 0x3c, 0x2e, 0x98, 0xb8, 0x12, 0x81, 0x0e, 0xd0, 0x38, 0x0b, 0xd9, 0x84,
|
||||
0x79, 0x8b, 0x96, 0xda, 0xa9, 0xf7, 0x1a, 0x7b, 0xfb, 0x6b, 0x0e, 0x88, 0x8e, 0x73, 0xde, 0x51,
|
||||
0xc0, 0xa3, 0x85, 0xbd, 0x9d, 0x0f, 0xa9, 0x15, 0x30, 0x2e, 0x85, 0xdb, 0xf7, 0x40, 0xf3, 0x4a,
|
||||
0x32, 0xdc, 0x06, 0xf5, 0x31, 0x5d, 0xb4, 0x14, 0x31, 0x2c, 0x16, 0x47, 0xf8, 0x0f, 0x50, 0xe7,
|
||||
0x64, 0x32, 0xa3, 0xd2, 0x65, 0x1d, 0x67, 0xc1, 0xdd, 0x8d, 0x03, 0xa5, 0xbb, 0x0f, 0xe0, 0xaa,
|
||||
0xa7, 0xd0, 0x04, 0x6a, 0x44, 0x89, 0x9b, 0x69, 0x68, 0xb6, 0x9e, 0x26, 0xa6, 0x8a, 0x05, 0x80,
|
||||
0x33, 0xbc, 0xfb, 0x49, 0x01, 0x5b, 0x05, 0x6f, 0xc0, 0x22, 0x0e, 0xff, 0x03, 0x9b, 0xd2, 0x61,
|
||||
0x59, 0xd4, 0xd6, 0xd2, 0xc4, 0xdc, 0x7c, 0x2e, 0xdc, 0x95, 0x28, 0x7c, 0x04, 0x34, 0xb9, 0x2d,
|
||||
0x0e, 0x9b, 0x64, 0x2d, 0xd8, 0xbb, 0x62, 0x98, 0x41, 0x8e, 0x5d, 0x26, 0xe6, 0xbf, 0xab, 0x3f,
|
||||
0x01, 0x15, 0xd7, 0xb8, 0x24, 0x8b, 0x32, 0x21, 0x8b, 0xb8, 0x7c, 0x48, 0x35, 0x2b, 0x23, 0xca,
|
||||
0x63, 0x89, 0x76, 0xbf, 0x6e, 0x80, 0x66, 0xd1, 0xd5, 0xab, 0x89, 0xef, 0x50, 0xf8, 0x0e, 0x68,
|
||||
0xe2, 0x87, 0xb8, 0x84, 0x13, 0xd9, 0x5a, 0x63, 0xef, 0xd6, 0xd2, 0x03, 0x94, 0x8b, 0x8e, 0xc2,
|
||||
0xb1, 0x27, 0x80, 0x18, 0x89, 0xec, 0xea, 0x8d, 0x9f, 0x51, 0x4e, 0xaa, 0x05, 0xab, 0x30, 0x5c,
|
||||
0xaa, 0xc2, 0xfb, 0xa0, 0x91, 0xaf, 0xf4, 0xf1, 0x22, 0xa4, 0x72, 0x6d, 0x74, 0xdb, 0x48, 0x13,
|
||||
0xb3, 0x71, 0x58, 0xc1, 0x97, 0x57, 0x43, 0xbc, 0x4c, 0x81, 0xaf, 0x81, 0x4e, 0xf3, 0xa6, 0xc5,
|
||||
0x37, 0x10, 0x5b, 0x72, 0x7d, 0xcd, 0x2d, 0xb1, 0x77, 0xf2, 0xde, 0xf4, 0x02, 0x89, 0x71, 0x25,
|
||||
0x06, 0x07, 0x40, 0x15, 0xbe, 0xc4, 0xad, 0xba, 0x54, 0xdd, 0x5d, 0x53, 0x55, 0x38, 0x6a, 0x37,
|
||||
0x73, 0x65, 0x55, 0x44, 0x31, 0xce, 0x84, 0xba, 0x5f, 0x14, 0xb0, 0x73, 0xc5, 0xe1, 0xa7, 0x7e,
|
||||
0xcc, 0xe1, 0xdb, 0x15, 0x97, 0xd1, 0x7a, 0x2e, 0x0b, 0xb6, 0xf4, 0xb8, 0xdc, 0xef, 0x02, 0x59,
|
||||
0x72, 0xf8, 0x25, 0x50, 0x7d, 0x4e, 0xa7, 0x85, 0x37, 0x37, 0xd6, 0x9c, 0x42, 0xb6, 0x57, 0x8d,
|
||||
0xf1, 0x44, 0x48, 0xe0, 0x4c, 0xc9, 0x46, 0xa7, 0x17, 0x46, 0xed, 0xec, 0xc2, 0xa8, 0x9d, 0x5f,
|
||||
0x18, 0xb5, 0x0f, 0xa9, 0xa1, 0x9c, 0xa6, 0x86, 0x72, 0x96, 0x1a, 0xca, 0x79, 0x6a, 0x28, 0xdf,
|
||||
0x52, 0x43, 0xf9, 0xf8, 0xdd, 0xa8, 0xbd, 0xd1, 0x0a, 0xcd, 0x1f, 0x01, 0x00, 0x00, 0xff, 0xff,
|
||||
0x04, 0x9d, 0x1a, 0x33, 0x3e, 0x06, 0x00, 0x00,
|
||||
0x14, 0x8e, 0x9b, 0x5a, 0xb2, 0x27, 0x8d, 0x6e, 0x3b, 0xba, 0x8b, 0x28, 0xf7, 0x5e, 0x3b, 0xca,
|
||||
0x5d, 0x10, 0xa9, 0x30, 0x26, 0x15, 0x45, 0x15, 0xac, 0x6a, 0x28, 0x0f, 0x89, 0x47, 0x18, 0xba,
|
||||
0x40, 0x88, 0x05, 0x13, 0x7b, 0xea, 0x98, 0x24, 0x1e, 0xcb, 0x9e, 0x44, 0xca, 0x8e, 0x9f, 0xc0,
|
||||
0x0f, 0x62, 0x89, 0x50, 0x97, 0x5d, 0x76, 0x65, 0x51, 0xf7, 0x5f, 0x74, 0x85, 0x66, 0xfc, 0x4a,
|
||||
0x09, 0x8f, 0xec, 0x66, 0xbe, 0x39, 0xdf, 0x77, 0xce, 0xf9, 0xe6, 0x1c, 0xf0, 0x68, 0x7c, 0x10,
|
||||
0x23, 0x9f, 0x59, 0xe3, 0xd9, 0x90, 0x46, 0x01, 0xe5, 0x34, 0xb6, 0xe6, 0x34, 0x70, 0x59, 0x64,
|
||||
0xe5, 0x0f, 0x24, 0xf4, 0x2d, 0xd7, 0x8f, 0x1d, 0x36, 0xa7, 0xd1, 0xc2, 0x9a, 0xf7, 0xc9, 0x24,
|
||||
0x1c, 0x91, 0xbe, 0xe5, 0xd1, 0x80, 0x46, 0x84, 0x53, 0x17, 0x85, 0x11, 0xe3, 0x0c, 0xfe, 0x97,
|
||||
0x85, 0x23, 0x12, 0xfa, 0xa8, 0x0c, 0x47, 0x45, 0x78, 0xfb, 0x96, 0xe7, 0xf3, 0xd1, 0x6c, 0x88,
|
||||
0x1c, 0x36, 0xb5, 0x3c, 0xe6, 0x31, 0x4b, 0xb2, 0x86, 0xb3, 0x13, 0x79, 0x93, 0x17, 0x79, 0xca,
|
||||
0xd4, 0xda, 0xdd, 0xa5, 0xe4, 0x0e, 0x8b, 0xa8, 0x35, 0x5f, 0xc9, 0xd8, 0xbe, 0x53, 0xc5, 0x4c,
|
||||
0x89, 0x33, 0xf2, 0x03, 0x51, 0x5f, 0x38, 0xf6, 0x04, 0x10, 0x5b, 0x53, 0xca, 0xc9, 0xcf, 0x58,
|
||||
0xd6, 0xaf, 0x58, 0xd1, 0x2c, 0xe0, 0xfe, 0x94, 0xae, 0x10, 0xee, 0xfe, 0x89, 0x10, 0x3b, 0x23,
|
||||
0x3a, 0x25, 0x3f, 0xf2, 0xba, 0x9f, 0xeb, 0x40, 0x3b, 0x0a, 0xdc, 0x90, 0xf9, 0x01, 0x87, 0xbb,
|
||||
0x40, 0x27, 0xae, 0x1b, 0xd1, 0x38, 0xa6, 0x71, 0x4b, 0xe9, 0xd4, 0x7b, 0xba, 0xdd, 0x4c, 0x13,
|
||||
0x53, 0x3f, 0x2c, 0x40, 0x5c, 0xbd, 0x43, 0x0a, 0x80, 0xc3, 0x02, 0xd7, 0xe7, 0x3e, 0x0b, 0xe2,
|
||||
0xd6, 0x46, 0x47, 0xe9, 0x35, 0xf6, 0xfa, 0xe8, 0xb7, 0xfe, 0xa2, 0x22, 0xd3, 0x83, 0x92, 0x68,
|
||||
0xc3, 0xd3, 0xc4, 0xac, 0xa5, 0x89, 0x09, 0x2a, 0x0c, 0x2f, 0x09, 0xc3, 0x1e, 0xd0, 0x46, 0x2c,
|
||||
0xe6, 0x01, 0x99, 0xd2, 0x56, 0xbd, 0xa3, 0xf4, 0x74, 0x7b, 0x2b, 0x4d, 0x4c, 0xed, 0x49, 0x8e,
|
||||
0xe1, 0xf2, 0x15, 0x0e, 0x80, 0xce, 0x49, 0xe4, 0x51, 0x8e, 0xe9, 0x49, 0x6b, 0x53, 0xd6, 0xf3,
|
||||
0xff, 0x72, 0x3d, 0xe2, 0x87, 0xd0, 0xbc, 0x8f, 0x5e, 0x0e, 0x3f, 0x50, 0x47, 0x04, 0xd1, 0x88,
|
||||
0x06, 0x0e, 0xcd, 0x5a, 0x3c, 0x2e, 0x98, 0xb8, 0x12, 0x81, 0x0e, 0xd0, 0x38, 0x0b, 0xd9, 0x84,
|
||||
0x79, 0x8b, 0x96, 0xda, 0xa9, 0xf7, 0x1a, 0x7b, 0xfb, 0x6b, 0x36, 0x88, 0x8e, 0x73, 0xde, 0x51,
|
||||
0xc0, 0xa3, 0x85, 0xbd, 0x9d, 0x37, 0xa9, 0x15, 0x30, 0x2e, 0x85, 0xdb, 0xf7, 0x41, 0xf3, 0x5a,
|
||||
0x30, 0xdc, 0x06, 0xf5, 0x31, 0x5d, 0xb4, 0x14, 0xd1, 0x2c, 0x16, 0x47, 0xf8, 0x37, 0x50, 0xe7,
|
||||
0x64, 0x32, 0xa3, 0xd2, 0x65, 0x1d, 0x67, 0x97, 0x7b, 0x1b, 0x07, 0x4a, 0x77, 0x1f, 0xc0, 0x55,
|
||||
0x4f, 0xa1, 0x09, 0xd4, 0x88, 0x12, 0x37, 0xd3, 0xd0, 0x6c, 0x3d, 0x4d, 0x4c, 0x15, 0x0b, 0x00,
|
||||
0x67, 0x78, 0xf7, 0xab, 0x02, 0xb6, 0x0a, 0xde, 0x80, 0x45, 0x1c, 0xfe, 0x0b, 0x36, 0xa5, 0xc3,
|
||||
0x32, 0xa9, 0xad, 0xa5, 0x89, 0xb9, 0xf9, 0x42, 0xb8, 0x2b, 0x51, 0xf8, 0x18, 0x68, 0x72, 0x5a,
|
||||
0x1c, 0x36, 0xc9, 0x4a, 0xb0, 0x77, 0x45, 0x33, 0x83, 0x1c, 0xbb, 0x4a, 0xcc, 0x7f, 0x56, 0x37,
|
||||
0x01, 0x15, 0xcf, 0xb8, 0x24, 0x8b, 0x34, 0x21, 0x8b, 0xb8, 0xfc, 0x48, 0x35, 0x4b, 0x23, 0xd2,
|
||||
0x63, 0x89, 0xc2, 0x3e, 0x68, 0x90, 0x30, 0x2c, 0x68, 0xf2, 0x0b, 0x75, 0xfb, 0xaf, 0x34, 0x31,
|
||||
0x1b, 0x87, 0x15, 0x8c, 0x97, 0x63, 0xba, 0x97, 0x1b, 0xa0, 0x59, 0x34, 0xf2, 0x7a, 0xe2, 0x3b,
|
||||
0x14, 0xbe, 0x07, 0x9a, 0x58, 0x2a, 0x97, 0x70, 0x22, 0xbb, 0x69, 0xec, 0xdd, 0x5e, 0xfa, 0xb3,
|
||||
0x72, 0x37, 0x50, 0x38, 0xf6, 0x04, 0x10, 0x23, 0x11, 0x5d, 0x8d, 0xc5, 0x73, 0xca, 0x49, 0x35,
|
||||
0x93, 0x15, 0x86, 0x4b, 0x55, 0xf8, 0x10, 0x34, 0xf2, 0x2d, 0x38, 0x5e, 0x84, 0x34, 0x2f, 0xb3,
|
||||
0x9b, 0x53, 0x1a, 0x87, 0xd5, 0xd3, 0xd5, 0xf5, 0x2b, 0x5e, 0xa6, 0xc1, 0x37, 0x40, 0xa7, 0x79,
|
||||
0xe1, 0x62, 0x7b, 0xc4, 0x70, 0xdd, 0x58, 0x73, 0xb8, 0xec, 0x9d, 0x3c, 0x99, 0x5e, 0x20, 0x31,
|
||||
0xae, 0xc4, 0xe0, 0x00, 0xa8, 0xc2, 0xce, 0xb8, 0x55, 0x97, 0xaa, 0xbb, 0x6b, 0xaa, 0x8a, 0x8f,
|
||||
0xb0, 0x9b, 0xb9, 0xb2, 0x2a, 0x6e, 0x31, 0xce, 0x84, 0xba, 0x5f, 0x14, 0xb0, 0x73, 0xcd, 0xe5,
|
||||
0x67, 0x7e, 0xcc, 0xe1, 0xbb, 0x15, 0xa7, 0xd1, 0x7a, 0x4e, 0x0b, 0xb6, 0xf4, 0xb9, 0x5c, 0x8b,
|
||||
0x02, 0x59, 0x72, 0xf9, 0x15, 0x50, 0x7d, 0x4e, 0xa7, 0x85, 0x37, 0x37, 0xd7, 0xec, 0x42, 0x96,
|
||||
0x57, 0xb5, 0xf1, 0x54, 0x48, 0xe0, 0x4c, 0xc9, 0x46, 0xa7, 0x17, 0x46, 0xed, 0xec, 0xc2, 0xa8,
|
||||
0x9d, 0x5f, 0x18, 0xb5, 0x8f, 0xa9, 0xa1, 0x9c, 0xa6, 0x86, 0x72, 0x96, 0x1a, 0xca, 0x79, 0x6a,
|
||||
0x28, 0xdf, 0x52, 0x43, 0xf9, 0x74, 0x69, 0xd4, 0xde, 0x6a, 0x85, 0xe6, 0xf7, 0x00, 0x00, 0x00,
|
||||
0xff, 0xff, 0x65, 0x85, 0x5a, 0x9b, 0x75, 0x06, 0x00, 0x00,
|
||||
}
|
||||
|
||||
func (m *Endpoint) Marshal() (dAtA []byte, err error) {
|
||||
|
|
@ -387,6 +388,13 @@ func (m *EndpointPort) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
|||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.AppProtocol != nil {
|
||||
i -= len(*m.AppProtocol)
|
||||
copy(dAtA[i:], *m.AppProtocol)
|
||||
i = encodeVarintGenerated(dAtA, i, uint64(len(*m.AppProtocol)))
|
||||
i--
|
||||
dAtA[i] = 0x22
|
||||
}
|
||||
if m.Port != nil {
|
||||
i = encodeVarintGenerated(dAtA, i, uint64(*m.Port))
|
||||
i--
|
||||
|
|
@ -429,13 +437,11 @@ func (m *EndpointSlice) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
|||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.AddressType != nil {
|
||||
i -= len(*m.AddressType)
|
||||
copy(dAtA[i:], *m.AddressType)
|
||||
i = encodeVarintGenerated(dAtA, i, uint64(len(*m.AddressType)))
|
||||
i--
|
||||
dAtA[i] = 0x22
|
||||
}
|
||||
i -= len(m.AddressType)
|
||||
copy(dAtA[i:], m.AddressType)
|
||||
i = encodeVarintGenerated(dAtA, i, uint64(len(m.AddressType)))
|
||||
i--
|
||||
dAtA[i] = 0x22
|
||||
if len(m.Ports) > 0 {
|
||||
for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- {
|
||||
{
|
||||
|
|
@ -597,6 +603,10 @@ func (m *EndpointPort) Size() (n int) {
|
|||
if m.Port != nil {
|
||||
n += 1 + sovGenerated(uint64(*m.Port))
|
||||
}
|
||||
if m.AppProtocol != nil {
|
||||
l = len(*m.AppProtocol)
|
||||
n += 1 + l + sovGenerated(uint64(l))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
|
|
@ -620,10 +630,8 @@ func (m *EndpointSlice) Size() (n int) {
|
|||
n += 1 + l + sovGenerated(uint64(l))
|
||||
}
|
||||
}
|
||||
if m.AddressType != nil {
|
||||
l = len(*m.AddressType)
|
||||
n += 1 + l + sovGenerated(uint64(l))
|
||||
}
|
||||
l = len(m.AddressType)
|
||||
n += 1 + l + sovGenerated(uint64(l))
|
||||
return n
|
||||
}
|
||||
|
||||
|
|
@ -692,6 +700,7 @@ func (this *EndpointPort) String() string {
|
|||
`Name:` + valueToStringGenerated(this.Name) + `,`,
|
||||
`Protocol:` + valueToStringGenerated(this.Protocol) + `,`,
|
||||
`Port:` + valueToStringGenerated(this.Port) + `,`,
|
||||
`AppProtocol:` + valueToStringGenerated(this.AppProtocol) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
|
|
@ -714,7 +723,7 @@ func (this *EndpointSlice) String() string {
|
|||
`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`,
|
||||
`Endpoints:` + repeatedStringForEndpoints + `,`,
|
||||
`Ports:` + repeatedStringForPorts + `,`,
|
||||
`AddressType:` + valueToStringGenerated(this.AddressType) + `,`,
|
||||
`AddressType:` + fmt.Sprintf("%v", this.AddressType) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
|
|
@ -1246,6 +1255,39 @@ func (m *EndpointPort) Unmarshal(dAtA []byte) error {
|
|||
}
|
||||
}
|
||||
m.Port = &v
|
||||
case 4:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field AppProtocol", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
s := string(dAtA[iNdEx:postIndex])
|
||||
m.AppProtocol = &s
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||
|
|
@ -1430,8 +1472,7 @@ func (m *EndpointSlice) Unmarshal(dAtA []byte) error {
|
|||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
s := AddressType(dAtA[iNdEx:postIndex])
|
||||
m.AddressType = &s
|
||||
m.AddressType = AddressType(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
|
|
|
|||
|
|
@ -32,11 +32,10 @@ option go_package = "v1alpha1";
|
|||
// Endpoint represents a single logical "backend" implementing a service.
|
||||
message Endpoint {
|
||||
// addresses of this endpoint. The contents of this field are interpreted
|
||||
// according to the corresponding EndpointSlice addressType field. This
|
||||
// allows for cases like dual-stack (IPv4 and IPv6) networking. Consumers
|
||||
// (e.g. kube-proxy) must handle different types of addresses in the context
|
||||
// of their own capabilities. This must contain at least one address but no
|
||||
// more than 100.
|
||||
// according to the corresponding EndpointSlice addressType field. Consumers
|
||||
// must handle different types of addresses in the context of their own
|
||||
// capabilities. This must contain at least one address but no more than
|
||||
// 100.
|
||||
// +listType=set
|
||||
repeated string addresses = 1;
|
||||
|
||||
|
|
@ -87,11 +86,10 @@ message EndpointPort {
|
|||
// The name of this port. All ports in an EndpointSlice must have a unique
|
||||
// name. If the EndpointSlice is dervied from a Kubernetes service, this
|
||||
// corresponds to the Service.ports[].name.
|
||||
// Name must either be an empty string or pass IANA_SVC_NAME validation:
|
||||
// * must be no more than 15 characters long
|
||||
// * may contain only [-a-z0-9]
|
||||
// * must contain at least one letter [a-z]
|
||||
// * it must not start or end with a hyphen, nor contain adjacent hyphens
|
||||
// Name must either be an empty string or pass DNS_LABEL validation:
|
||||
// * must be no more than 63 characters long.
|
||||
// * must consist of lower case alphanumeric characters or '-'.
|
||||
// * must start and end with an alphanumeric character.
|
||||
// Default is empty string.
|
||||
optional string name = 1;
|
||||
|
||||
|
|
@ -104,6 +102,14 @@ message EndpointPort {
|
|||
// If this is not specified, ports are not restricted and must be
|
||||
// interpreted in the context of the specific consumer.
|
||||
optional int32 port = 3;
|
||||
|
||||
// The application protocol for this port.
|
||||
// This field follows standard Kubernetes label syntax.
|
||||
// Un-prefixed names are reserved for IANA standard service names (as per
|
||||
// RFC-6335 and http://www.iana.org/assignments/service-names).
|
||||
// Non-standard protocols should use prefixed names.
|
||||
// Default is empty string.
|
||||
optional string appProtocol = 4;
|
||||
}
|
||||
|
||||
// EndpointSlice represents a subset of the endpoints that implement a service.
|
||||
|
|
@ -115,9 +121,12 @@ message EndpointSlice {
|
|||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||
|
||||
// addressType specifies the type of address carried by this EndpointSlice.
|
||||
// All addresses in this slice must be the same type.
|
||||
// Default is IP
|
||||
// +optional
|
||||
// All addresses in this slice must be the same type. This field is
|
||||
// immutable after creation. The following address types are currently
|
||||
// supported:
|
||||
// * IPv4: Represents an IPv4 Address.
|
||||
// * IPv6: Represents an IPv6 Address.
|
||||
// * FQDN: Represents a Fully Qualified Domain Name.
|
||||
optional string addressType = 4;
|
||||
|
||||
// endpoints is a list of unique endpoints in this slice. Each slice may
|
||||
|
|
|
|||
|
|
@ -33,10 +33,13 @@ type EndpointSlice struct {
|
|||
// +optional
|
||||
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
// addressType specifies the type of address carried by this EndpointSlice.
|
||||
// All addresses in this slice must be the same type.
|
||||
// Default is IP
|
||||
// +optional
|
||||
AddressType *AddressType `json:"addressType" protobuf:"bytes,4,rep,name=addressType"`
|
||||
// All addresses in this slice must be the same type. This field is
|
||||
// immutable after creation. The following address types are currently
|
||||
// supported:
|
||||
// * IPv4: Represents an IPv4 Address.
|
||||
// * IPv6: Represents an IPv6 Address.
|
||||
// * FQDN: Represents a Fully Qualified Domain Name.
|
||||
AddressType AddressType `json:"addressType" protobuf:"bytes,4,rep,name=addressType"`
|
||||
// endpoints is a list of unique endpoints in this slice. Each slice may
|
||||
// include a maximum of 1000 endpoints.
|
||||
// +listType=atomic
|
||||
|
|
@ -56,17 +59,26 @@ type AddressType string
|
|||
|
||||
const (
|
||||
// AddressTypeIP represents an IP Address.
|
||||
// This address type has been deprecated and has been replaced by the IPv4
|
||||
// and IPv6 adddress types. New resources with this address type will be
|
||||
// considered invalid. This will be fully removed in 1.18.
|
||||
// +deprecated
|
||||
AddressTypeIP = AddressType("IP")
|
||||
// AddressTypeIPv4 represents an IPv4 Address.
|
||||
AddressTypeIPv4 = AddressType(v1.IPv4Protocol)
|
||||
// AddressTypeIPv6 represents an IPv6 Address.
|
||||
AddressTypeIPv6 = AddressType(v1.IPv6Protocol)
|
||||
// AddressTypeFQDN represents a FQDN.
|
||||
AddressTypeFQDN = AddressType("FQDN")
|
||||
)
|
||||
|
||||
// Endpoint represents a single logical "backend" implementing a service.
|
||||
type Endpoint struct {
|
||||
// addresses of this endpoint. The contents of this field are interpreted
|
||||
// according to the corresponding EndpointSlice addressType field. This
|
||||
// allows for cases like dual-stack (IPv4 and IPv6) networking. Consumers
|
||||
// (e.g. kube-proxy) must handle different types of addresses in the context
|
||||
// of their own capabilities. This must contain at least one address but no
|
||||
// more than 100.
|
||||
// according to the corresponding EndpointSlice addressType field. Consumers
|
||||
// must handle different types of addresses in the context of their own
|
||||
// capabilities. This must contain at least one address but no more than
|
||||
// 100.
|
||||
// +listType=set
|
||||
Addresses []string `json:"addresses" protobuf:"bytes,1,rep,name=addresses"`
|
||||
// conditions contains information about the current status of the endpoint.
|
||||
|
|
@ -113,11 +125,10 @@ type EndpointPort struct {
|
|||
// The name of this port. All ports in an EndpointSlice must have a unique
|
||||
// name. If the EndpointSlice is dervied from a Kubernetes service, this
|
||||
// corresponds to the Service.ports[].name.
|
||||
// Name must either be an empty string or pass IANA_SVC_NAME validation:
|
||||
// * must be no more than 15 characters long
|
||||
// * may contain only [-a-z0-9]
|
||||
// * must contain at least one letter [a-z]
|
||||
// * it must not start or end with a hyphen, nor contain adjacent hyphens
|
||||
// Name must either be an empty string or pass DNS_LABEL validation:
|
||||
// * must be no more than 63 characters long.
|
||||
// * must consist of lower case alphanumeric characters or '-'.
|
||||
// * must start and end with an alphanumeric character.
|
||||
// Default is empty string.
|
||||
Name *string `json:"name,omitempty" protobuf:"bytes,1,name=name"`
|
||||
// The IP protocol for this port.
|
||||
|
|
@ -128,6 +139,13 @@ type EndpointPort struct {
|
|||
// If this is not specified, ports are not restricted and must be
|
||||
// interpreted in the context of the specific consumer.
|
||||
Port *int32 `json:"port,omitempty" protobuf:"bytes,3,opt,name=port"`
|
||||
// The application protocol for this port.
|
||||
// This field follows standard Kubernetes label syntax.
|
||||
// Un-prefixed names are reserved for IANA standard service names (as per
|
||||
// RFC-6335 and http://www.iana.org/assignments/service-names).
|
||||
// Non-standard protocols should use prefixed names.
|
||||
// Default is empty string.
|
||||
AppProtocol *string `json:"appProtocol,omitempty" protobuf:"bytes,4,name=appProtocol"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ package v1alpha1
|
|||
// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
|
||||
var map_Endpoint = map[string]string{
|
||||
"": "Endpoint represents a single logical \"backend\" implementing a service.",
|
||||
"addresses": "addresses of this endpoint. The contents of this field are interpreted according to the corresponding EndpointSlice addressType field. This allows for cases like dual-stack (IPv4 and IPv6) networking. Consumers (e.g. kube-proxy) must handle different types of addresses in the context of their own capabilities. This must contain at least one address but no more than 100.",
|
||||
"addresses": "addresses of this endpoint. The contents of this field are interpreted according to the corresponding EndpointSlice addressType field. Consumers must handle different types of addresses in the context of their own capabilities. This must contain at least one address but no more than 100.",
|
||||
"conditions": "conditions contains information about the current status of the endpoint.",
|
||||
"hostname": "hostname of this endpoint. This field may be used by consumers of endpoints to distinguish endpoints from each other (e.g. in DNS names). Multiple endpoints which use the same hostname should be considered fungible (e.g. multiple A values in DNS). Must pass DNS Label (RFC 1123) validation.",
|
||||
"targetRef": "targetRef is a reference to a Kubernetes object that represents this endpoint.",
|
||||
|
|
@ -50,10 +50,11 @@ func (EndpointConditions) SwaggerDoc() map[string]string {
|
|||
}
|
||||
|
||||
var map_EndpointPort = map[string]string{
|
||||
"": "EndpointPort represents a Port used by an EndpointSlice",
|
||||
"name": "The name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass IANA_SVC_NAME validation: * must be no more than 15 characters long * may contain only [-a-z0-9] * must contain at least one letter [a-z] * it must not start or end with a hyphen, nor contain adjacent hyphens Default is empty string.",
|
||||
"protocol": "The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.",
|
||||
"port": "The port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer.",
|
||||
"": "EndpointPort represents a Port used by an EndpointSlice",
|
||||
"name": "The name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.",
|
||||
"protocol": "The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.",
|
||||
"port": "The port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer.",
|
||||
"appProtocol": "The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names. Default is empty string.",
|
||||
}
|
||||
|
||||
func (EndpointPort) SwaggerDoc() map[string]string {
|
||||
|
|
@ -63,7 +64,7 @@ func (EndpointPort) SwaggerDoc() map[string]string {
|
|||
var map_EndpointSlice = map[string]string{
|
||||
"": "EndpointSlice represents a subset of the endpoints that implement a service. For a given service there may be multiple EndpointSlice objects, selected by labels, which must be joined to produce the full set of endpoints.",
|
||||
"metadata": "Standard object's metadata.",
|
||||
"addressType": "addressType specifies the type of address carried by this EndpointSlice. All addresses in this slice must be the same type. Default is IP",
|
||||
"addressType": "addressType specifies the type of address carried by this EndpointSlice. All addresses in this slice must be the same type. This field is immutable after creation. The following address types are currently supported: * IPv4: Represents an IPv4 Address. * IPv6: Represents an IPv6 Address. * FQDN: Represents a Fully Qualified Domain Name.",
|
||||
"endpoints": "endpoints is a list of unique endpoints in this slice. Each slice may include a maximum of 1000 endpoints.",
|
||||
"ports": "ports specifies the list of network ports exposed by each endpoint in this slice. Each port must have a unique name. When ports is empty, it indicates that there are no defined ports. When a port is defined with a nil port value, it indicates \"all ports\". Each slice may include a maximum of 100 ports.",
|
||||
}
|
||||
|
|
|
|||
|
|
@ -19,4 +19,10 @@ package v1alpha1
|
|||
const (
|
||||
// LabelServiceName is used to indicate the name of a Kubernetes service.
|
||||
LabelServiceName = "kubernetes.io/service-name"
|
||||
// LabelManagedBy is used to indicate the controller or entity that manages
|
||||
// an EndpointSlice. This label aims to enable different EndpointSlice
|
||||
// objects to be managed by different controllers or entities within the
|
||||
// same cluster. It is highly recommended to configure this label for all
|
||||
// EndpointSlices.
|
||||
LabelManagedBy = "endpointslice.kubernetes.io/managed-by"
|
||||
)
|
||||
|
|
|
|||
|
|
@ -103,6 +103,11 @@ func (in *EndpointPort) DeepCopyInto(out *EndpointPort) {
|
|||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
if in.AppProtocol != nil {
|
||||
in, out := &in.AppProtocol, &out.AppProtocol
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -121,11 +126,6 @@ func (in *EndpointSlice) DeepCopyInto(out *EndpointSlice) {
|
|||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
if in.AddressType != nil {
|
||||
in, out := &in.AddressType, &out.AddressType
|
||||
*out = new(AddressType)
|
||||
**out = **in
|
||||
}
|
||||
if in.Endpoints != nil {
|
||||
in, out := &in.Endpoints, &out.Endpoints
|
||||
*out = make([]Endpoint, len(*in))
|
||||
|
|
|
|||
|
|
@ -0,0 +1,22 @@
|
|||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// +k8s:deepcopy-gen=package
|
||||
// +k8s:protobuf-gen=package
|
||||
// +k8s:openapi-gen=true
|
||||
// +groupName=discovery.k8s.io
|
||||
|
||||
package v1beta1 // import "k8s.io/api/discovery/v1beta1"
|
||||
File diff suppressed because it is too large
Load Diff
|
|
@ -0,0 +1,157 @@
|
|||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
|
||||
// This file was autogenerated by go-to-protobuf. Do not edit it manually!
|
||||
|
||||
syntax = 'proto2';
|
||||
|
||||
package k8s.io.api.discovery.v1beta1;
|
||||
|
||||
import "k8s.io/api/core/v1/generated.proto";
|
||||
import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
|
||||
import "k8s.io/apimachinery/pkg/runtime/generated.proto";
|
||||
import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
|
||||
|
||||
// Package-wide variables from generator "generated".
|
||||
option go_package = "v1beta1";
|
||||
|
||||
// Endpoint represents a single logical "backend" implementing a service.
|
||||
message Endpoint {
|
||||
// addresses of this endpoint. The contents of this field are interpreted
|
||||
// according to the corresponding EndpointSlice addressType field. Consumers
|
||||
// must handle different types of addresses in the context of their own
|
||||
// capabilities. This must contain at least one address but no more than
|
||||
// 100.
|
||||
// +listType=set
|
||||
repeated string addresses = 1;
|
||||
|
||||
// conditions contains information about the current status of the endpoint.
|
||||
optional EndpointConditions conditions = 2;
|
||||
|
||||
// hostname of this endpoint. This field may be used by consumers of
|
||||
// endpoints to distinguish endpoints from each other (e.g. in DNS names).
|
||||
// Multiple endpoints which use the same hostname should be considered
|
||||
// fungible (e.g. multiple A values in DNS). Must pass DNS Label (RFC 1123)
|
||||
// validation.
|
||||
// +optional
|
||||
optional string hostname = 3;
|
||||
|
||||
// targetRef is a reference to a Kubernetes object that represents this
|
||||
// endpoint.
|
||||
// +optional
|
||||
optional k8s.io.api.core.v1.ObjectReference targetRef = 4;
|
||||
|
||||
// topology contains arbitrary topology information associated with the
|
||||
// endpoint. These key/value pairs must conform with the label format.
|
||||
// https://kubernetes.io/docs/concepts/overview/working-with-objects/labels
|
||||
// Topology may include a maximum of 16 key/value pairs. This includes, but
|
||||
// is not limited to the following well known keys:
|
||||
// * kubernetes.io/hostname: the value indicates the hostname of the node
|
||||
// where the endpoint is located. This should match the corresponding
|
||||
// node label.
|
||||
// * topology.kubernetes.io/zone: the value indicates the zone where the
|
||||
// endpoint is located. This should match the corresponding node label.
|
||||
// * topology.kubernetes.io/region: the value indicates the region where the
|
||||
// endpoint is located. This should match the corresponding node label.
|
||||
// +optional
|
||||
map<string, string> topology = 5;
|
||||
}
|
||||
|
||||
// EndpointConditions represents the current condition of an endpoint.
|
||||
message EndpointConditions {
|
||||
// ready indicates that this endpoint is prepared to receive traffic,
|
||||
// according to whatever system is managing the endpoint. A nil value
|
||||
// indicates an unknown state. In most cases consumers should interpret this
|
||||
// unknown state as ready.
|
||||
// +optional
|
||||
optional bool ready = 1;
|
||||
}
|
||||
|
||||
// EndpointPort represents a Port used by an EndpointSlice
|
||||
message EndpointPort {
|
||||
// The name of this port. All ports in an EndpointSlice must have a unique
|
||||
// name. If the EndpointSlice is dervied from a Kubernetes service, this
|
||||
// corresponds to the Service.ports[].name.
|
||||
// Name must either be an empty string or pass DNS_LABEL validation:
|
||||
// * must be no more than 63 characters long.
|
||||
// * must consist of lower case alphanumeric characters or '-'.
|
||||
// * must start and end with an alphanumeric character.
|
||||
// Default is empty string.
|
||||
optional string name = 1;
|
||||
|
||||
// The IP protocol for this port.
|
||||
// Must be UDP, TCP, or SCTP.
|
||||
// Default is TCP.
|
||||
optional string protocol = 2;
|
||||
|
||||
// The port number of the endpoint.
|
||||
// If this is not specified, ports are not restricted and must be
|
||||
// interpreted in the context of the specific consumer.
|
||||
optional int32 port = 3;
|
||||
|
||||
// The application protocol for this port.
|
||||
// This field follows standard Kubernetes label syntax.
|
||||
// Un-prefixed names are reserved for IANA standard service names (as per
|
||||
// RFC-6335 and http://www.iana.org/assignments/service-names).
|
||||
// Non-standard protocols should use prefixed names.
|
||||
// Default is empty string.
|
||||
optional string appProtocol = 4;
|
||||
}
|
||||
|
||||
// EndpointSlice represents a subset of the endpoints that implement a service.
|
||||
// For a given service there may be multiple EndpointSlice objects, selected by
|
||||
// labels, which must be joined to produce the full set of endpoints.
|
||||
message EndpointSlice {
|
||||
// Standard object's metadata.
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||
|
||||
// addressType specifies the type of address carried by this EndpointSlice.
|
||||
// All addresses in this slice must be the same type. This field is
|
||||
// immutable after creation. The following address types are currently
|
||||
// supported:
|
||||
// * IPv4: Represents an IPv4 Address.
|
||||
// * IPv6: Represents an IPv6 Address.
|
||||
// * FQDN: Represents a Fully Qualified Domain Name.
|
||||
optional string addressType = 4;
|
||||
|
||||
// endpoints is a list of unique endpoints in this slice. Each slice may
|
||||
// include a maximum of 1000 endpoints.
|
||||
// +listType=atomic
|
||||
repeated Endpoint endpoints = 2;
|
||||
|
||||
// ports specifies the list of network ports exposed by each endpoint in
|
||||
// this slice. Each port must have a unique name. When ports is empty, it
|
||||
// indicates that there are no defined ports. When a port is defined with a
|
||||
// nil port value, it indicates "all ports". Each slice may include a
|
||||
// maximum of 100 ports.
|
||||
// +optional
|
||||
// +listType=atomic
|
||||
repeated EndpointPort ports = 3;
|
||||
}
|
||||
|
||||
// EndpointSliceList represents a list of endpoint slices
|
||||
message EndpointSliceList {
|
||||
// Standard list metadata.
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
||||
|
||||
// List of endpoint slices
|
||||
// +listType=set
|
||||
repeated EndpointSlice items = 2;
|
||||
}
|
||||
|
||||
|
|
@ -0,0 +1,56 @@
|
|||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1beta1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
// GroupName is the group name used in this package
|
||||
const GroupName = "discovery.k8s.io"
|
||||
|
||||
// SchemeGroupVersion is group version used to register these objects
|
||||
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"}
|
||||
|
||||
// Kind takes an unqualified kind and returns a Group qualified GroupKind
|
||||
func Kind(kind string) schema.GroupKind {
|
||||
return SchemeGroupVersion.WithKind(kind).GroupKind()
|
||||
}
|
||||
|
||||
// Resource takes an unqualified resource and returns a Group qualified GroupResource
|
||||
func Resource(resource string) schema.GroupResource {
|
||||
return SchemeGroupVersion.WithResource(resource).GroupResource()
|
||||
}
|
||||
|
||||
var (
|
||||
// SchemeBuilder is the scheme builder with scheme init functions to run for this API package
|
||||
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
|
||||
// AddToScheme is a common registration function for mapping packaged scoped group & version keys to a scheme
|
||||
AddToScheme = SchemeBuilder.AddToScheme
|
||||
)
|
||||
|
||||
// Adds the list of known types to the given scheme.
|
||||
func addKnownTypes(scheme *runtime.Scheme) error {
|
||||
scheme.AddKnownTypes(SchemeGroupVersion,
|
||||
&EndpointSlice{},
|
||||
&EndpointSliceList{},
|
||||
)
|
||||
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
|
||||
return nil
|
||||
}
|
||||
|
|
@ -0,0 +1,162 @@
|
|||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1beta1
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// +genclient
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// EndpointSlice represents a subset of the endpoints that implement a service.
|
||||
// For a given service there may be multiple EndpointSlice objects, selected by
|
||||
// labels, which must be joined to produce the full set of endpoints.
|
||||
type EndpointSlice struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard object's metadata.
|
||||
// +optional
|
||||
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
// addressType specifies the type of address carried by this EndpointSlice.
|
||||
// All addresses in this slice must be the same type. This field is
|
||||
// immutable after creation. The following address types are currently
|
||||
// supported:
|
||||
// * IPv4: Represents an IPv4 Address.
|
||||
// * IPv6: Represents an IPv6 Address.
|
||||
// * FQDN: Represents a Fully Qualified Domain Name.
|
||||
AddressType AddressType `json:"addressType" protobuf:"bytes,4,rep,name=addressType"`
|
||||
// endpoints is a list of unique endpoints in this slice. Each slice may
|
||||
// include a maximum of 1000 endpoints.
|
||||
// +listType=atomic
|
||||
Endpoints []Endpoint `json:"endpoints" protobuf:"bytes,2,rep,name=endpoints"`
|
||||
// ports specifies the list of network ports exposed by each endpoint in
|
||||
// this slice. Each port must have a unique name. When ports is empty, it
|
||||
// indicates that there are no defined ports. When a port is defined with a
|
||||
// nil port value, it indicates "all ports". Each slice may include a
|
||||
// maximum of 100 ports.
|
||||
// +optional
|
||||
// +listType=atomic
|
||||
Ports []EndpointPort `json:"ports" protobuf:"bytes,3,rep,name=ports"`
|
||||
}
|
||||
|
||||
// AddressType represents the type of address referred to by an endpoint.
|
||||
type AddressType string
|
||||
|
||||
const (
|
||||
// AddressTypeIP represents an IP Address.
|
||||
// This address type has been deprecated and has been replaced by the IPv4
|
||||
// and IPv6 adddress types. New resources with this address type will be
|
||||
// considered invalid. This will be fully removed in 1.18.
|
||||
// +deprecated
|
||||
AddressTypeIP = AddressType("IP")
|
||||
// AddressTypeIPv4 represents an IPv4 Address.
|
||||
AddressTypeIPv4 = AddressType(v1.IPv4Protocol)
|
||||
// AddressTypeIPv6 represents an IPv6 Address.
|
||||
AddressTypeIPv6 = AddressType(v1.IPv6Protocol)
|
||||
// AddressTypeFQDN represents a FQDN.
|
||||
AddressTypeFQDN = AddressType("FQDN")
|
||||
)
|
||||
|
||||
// Endpoint represents a single logical "backend" implementing a service.
|
||||
type Endpoint struct {
|
||||
// addresses of this endpoint. The contents of this field are interpreted
|
||||
// according to the corresponding EndpointSlice addressType field. Consumers
|
||||
// must handle different types of addresses in the context of their own
|
||||
// capabilities. This must contain at least one address but no more than
|
||||
// 100.
|
||||
// +listType=set
|
||||
Addresses []string `json:"addresses" protobuf:"bytes,1,rep,name=addresses"`
|
||||
// conditions contains information about the current status of the endpoint.
|
||||
Conditions EndpointConditions `json:"conditions,omitempty" protobuf:"bytes,2,opt,name=conditions"`
|
||||
// hostname of this endpoint. This field may be used by consumers of
|
||||
// endpoints to distinguish endpoints from each other (e.g. in DNS names).
|
||||
// Multiple endpoints which use the same hostname should be considered
|
||||
// fungible (e.g. multiple A values in DNS). Must pass DNS Label (RFC 1123)
|
||||
// validation.
|
||||
// +optional
|
||||
Hostname *string `json:"hostname,omitempty" protobuf:"bytes,3,opt,name=hostname"`
|
||||
// targetRef is a reference to a Kubernetes object that represents this
|
||||
// endpoint.
|
||||
// +optional
|
||||
TargetRef *v1.ObjectReference `json:"targetRef,omitempty" protobuf:"bytes,4,opt,name=targetRef"`
|
||||
// topology contains arbitrary topology information associated with the
|
||||
// endpoint. These key/value pairs must conform with the label format.
|
||||
// https://kubernetes.io/docs/concepts/overview/working-with-objects/labels
|
||||
// Topology may include a maximum of 16 key/value pairs. This includes, but
|
||||
// is not limited to the following well known keys:
|
||||
// * kubernetes.io/hostname: the value indicates the hostname of the node
|
||||
// where the endpoint is located. This should match the corresponding
|
||||
// node label.
|
||||
// * topology.kubernetes.io/zone: the value indicates the zone where the
|
||||
// endpoint is located. This should match the corresponding node label.
|
||||
// * topology.kubernetes.io/region: the value indicates the region where the
|
||||
// endpoint is located. This should match the corresponding node label.
|
||||
// +optional
|
||||
Topology map[string]string `json:"topology,omitempty" protobuf:"bytes,5,opt,name=topology"`
|
||||
}
|
||||
|
||||
// EndpointConditions represents the current condition of an endpoint.
|
||||
type EndpointConditions struct {
|
||||
// ready indicates that this endpoint is prepared to receive traffic,
|
||||
// according to whatever system is managing the endpoint. A nil value
|
||||
// indicates an unknown state. In most cases consumers should interpret this
|
||||
// unknown state as ready.
|
||||
// +optional
|
||||
Ready *bool `json:"ready,omitempty" protobuf:"bytes,1,name=ready"`
|
||||
}
|
||||
|
||||
// EndpointPort represents a Port used by an EndpointSlice
|
||||
type EndpointPort struct {
|
||||
// The name of this port. All ports in an EndpointSlice must have a unique
|
||||
// name. If the EndpointSlice is dervied from a Kubernetes service, this
|
||||
// corresponds to the Service.ports[].name.
|
||||
// Name must either be an empty string or pass DNS_LABEL validation:
|
||||
// * must be no more than 63 characters long.
|
||||
// * must consist of lower case alphanumeric characters or '-'.
|
||||
// * must start and end with an alphanumeric character.
|
||||
// Default is empty string.
|
||||
Name *string `json:"name,omitempty" protobuf:"bytes,1,name=name"`
|
||||
// The IP protocol for this port.
|
||||
// Must be UDP, TCP, or SCTP.
|
||||
// Default is TCP.
|
||||
Protocol *v1.Protocol `json:"protocol,omitempty" protobuf:"bytes,2,name=protocol"`
|
||||
// The port number of the endpoint.
|
||||
// If this is not specified, ports are not restricted and must be
|
||||
// interpreted in the context of the specific consumer.
|
||||
Port *int32 `json:"port,omitempty" protobuf:"bytes,3,opt,name=port"`
|
||||
// The application protocol for this port.
|
||||
// This field follows standard Kubernetes label syntax.
|
||||
// Un-prefixed names are reserved for IANA standard service names (as per
|
||||
// RFC-6335 and http://www.iana.org/assignments/service-names).
|
||||
// Non-standard protocols should use prefixed names.
|
||||
// Default is empty string.
|
||||
AppProtocol *string `json:"appProtocol,omitempty" protobuf:"bytes,4,name=appProtocol"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// EndpointSliceList represents a list of endpoint slices
|
||||
type EndpointSliceList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard list metadata.
|
||||
// +optional
|
||||
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
// List of endpoint slices
|
||||
// +listType=set
|
||||
Items []EndpointSlice `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||
}
|
||||
86
vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go
generated
vendored
Normal file
86
vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go
generated
vendored
Normal file
|
|
@ -0,0 +1,86 @@
|
|||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1beta1
|
||||
|
||||
// This file contains a collection of methods that can be used from go-restful to
|
||||
// generate Swagger API documentation for its models. Please read this PR for more
|
||||
// information on the implementation: https://github.com/emicklei/go-restful/pull/215
|
||||
//
|
||||
// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
|
||||
// they are on one line! For multiple line or blocks that you want to ignore use ---.
|
||||
// Any context after a --- is ignored.
|
||||
//
|
||||
// Those methods can be generated by using hack/update-generated-swagger-docs.sh
|
||||
|
||||
// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
|
||||
var map_Endpoint = map[string]string{
|
||||
"": "Endpoint represents a single logical \"backend\" implementing a service.",
|
||||
"addresses": "addresses of this endpoint. The contents of this field are interpreted according to the corresponding EndpointSlice addressType field. Consumers must handle different types of addresses in the context of their own capabilities. This must contain at least one address but no more than 100.",
|
||||
"conditions": "conditions contains information about the current status of the endpoint.",
|
||||
"hostname": "hostname of this endpoint. This field may be used by consumers of endpoints to distinguish endpoints from each other (e.g. in DNS names). Multiple endpoints which use the same hostname should be considered fungible (e.g. multiple A values in DNS). Must pass DNS Label (RFC 1123) validation.",
|
||||
"targetRef": "targetRef is a reference to a Kubernetes object that represents this endpoint.",
|
||||
"topology": "topology contains arbitrary topology information associated with the endpoint. These key/value pairs must conform with the label format. https://kubernetes.io/docs/concepts/overview/working-with-objects/labels Topology may include a maximum of 16 key/value pairs. This includes, but is not limited to the following well known keys: * kubernetes.io/hostname: the value indicates the hostname of the node\n where the endpoint is located. This should match the corresponding\n node label.\n* topology.kubernetes.io/zone: the value indicates the zone where the\n endpoint is located. This should match the corresponding node label.\n* topology.kubernetes.io/region: the value indicates the region where the\n endpoint is located. This should match the corresponding node label.",
|
||||
}
|
||||
|
||||
func (Endpoint) SwaggerDoc() map[string]string {
|
||||
return map_Endpoint
|
||||
}
|
||||
|
||||
var map_EndpointConditions = map[string]string{
|
||||
"": "EndpointConditions represents the current condition of an endpoint.",
|
||||
"ready": "ready indicates that this endpoint is prepared to receive traffic, according to whatever system is managing the endpoint. A nil value indicates an unknown state. In most cases consumers should interpret this unknown state as ready.",
|
||||
}
|
||||
|
||||
func (EndpointConditions) SwaggerDoc() map[string]string {
|
||||
return map_EndpointConditions
|
||||
}
|
||||
|
||||
var map_EndpointPort = map[string]string{
|
||||
"": "EndpointPort represents a Port used by an EndpointSlice",
|
||||
"name": "The name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.",
|
||||
"protocol": "The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.",
|
||||
"port": "The port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer.",
|
||||
"appProtocol": "The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names. Default is empty string.",
|
||||
}
|
||||
|
||||
func (EndpointPort) SwaggerDoc() map[string]string {
|
||||
return map_EndpointPort
|
||||
}
|
||||
|
||||
var map_EndpointSlice = map[string]string{
|
||||
"": "EndpointSlice represents a subset of the endpoints that implement a service. For a given service there may be multiple EndpointSlice objects, selected by labels, which must be joined to produce the full set of endpoints.",
|
||||
"metadata": "Standard object's metadata.",
|
||||
"addressType": "addressType specifies the type of address carried by this EndpointSlice. All addresses in this slice must be the same type. This field is immutable after creation. The following address types are currently supported: * IPv4: Represents an IPv4 Address. * IPv6: Represents an IPv6 Address. * FQDN: Represents a Fully Qualified Domain Name.",
|
||||
"endpoints": "endpoints is a list of unique endpoints in this slice. Each slice may include a maximum of 1000 endpoints.",
|
||||
"ports": "ports specifies the list of network ports exposed by each endpoint in this slice. Each port must have a unique name. When ports is empty, it indicates that there are no defined ports. When a port is defined with a nil port value, it indicates \"all ports\". Each slice may include a maximum of 100 ports.",
|
||||
}
|
||||
|
||||
func (EndpointSlice) SwaggerDoc() map[string]string {
|
||||
return map_EndpointSlice
|
||||
}
|
||||
|
||||
var map_EndpointSliceList = map[string]string{
|
||||
"": "EndpointSliceList represents a list of endpoint slices",
|
||||
"metadata": "Standard list metadata.",
|
||||
"items": "List of endpoint slices",
|
||||
}
|
||||
|
||||
func (EndpointSliceList) SwaggerDoc() map[string]string {
|
||||
return map_EndpointSliceList
|
||||
}
|
||||
|
||||
// AUTO-GENERATED FUNCTIONS END HERE
|
||||
|
|
@ -0,0 +1,28 @@
|
|||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1beta1
|
||||
|
||||
const (
|
||||
// LabelServiceName is used to indicate the name of a Kubernetes service.
|
||||
LabelServiceName = "kubernetes.io/service-name"
|
||||
// LabelManagedBy is used to indicate the controller or entity that manages
|
||||
// an EndpointSlice. This label aims to enable different EndpointSlice
|
||||
// objects to be managed by different controllers or entities within the
|
||||
// same cluster. It is highly recommended to configure this label for all
|
||||
// EndpointSlices.
|
||||
LabelManagedBy = "endpointslice.kubernetes.io/managed-by"
|
||||
)
|
||||
|
|
@ -0,0 +1,195 @@
|
|||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by deepcopy-gen. DO NOT EDIT.
|
||||
|
||||
package v1beta1
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Endpoint) DeepCopyInto(out *Endpoint) {
|
||||
*out = *in
|
||||
if in.Addresses != nil {
|
||||
in, out := &in.Addresses, &out.Addresses
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
in.Conditions.DeepCopyInto(&out.Conditions)
|
||||
if in.Hostname != nil {
|
||||
in, out := &in.Hostname, &out.Hostname
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
if in.TargetRef != nil {
|
||||
in, out := &in.TargetRef, &out.TargetRef
|
||||
*out = new(v1.ObjectReference)
|
||||
**out = **in
|
||||
}
|
||||
if in.Topology != nil {
|
||||
in, out := &in.Topology, &out.Topology
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Endpoint.
|
||||
func (in *Endpoint) DeepCopy() *Endpoint {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Endpoint)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *EndpointConditions) DeepCopyInto(out *EndpointConditions) {
|
||||
*out = *in
|
||||
if in.Ready != nil {
|
||||
in, out := &in.Ready, &out.Ready
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointConditions.
|
||||
func (in *EndpointConditions) DeepCopy() *EndpointConditions {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(EndpointConditions)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *EndpointPort) DeepCopyInto(out *EndpointPort) {
|
||||
*out = *in
|
||||
if in.Name != nil {
|
||||
in, out := &in.Name, &out.Name
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
if in.Protocol != nil {
|
||||
in, out := &in.Protocol, &out.Protocol
|
||||
*out = new(v1.Protocol)
|
||||
**out = **in
|
||||
}
|
||||
if in.Port != nil {
|
||||
in, out := &in.Port, &out.Port
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
if in.AppProtocol != nil {
|
||||
in, out := &in.AppProtocol, &out.AppProtocol
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointPort.
|
||||
func (in *EndpointPort) DeepCopy() *EndpointPort {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(EndpointPort)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *EndpointSlice) DeepCopyInto(out *EndpointSlice) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
if in.Endpoints != nil {
|
||||
in, out := &in.Endpoints, &out.Endpoints
|
||||
*out = make([]Endpoint, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.Ports != nil {
|
||||
in, out := &in.Ports, &out.Ports
|
||||
*out = make([]EndpointPort, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointSlice.
|
||||
func (in *EndpointSlice) DeepCopy() *EndpointSlice {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(EndpointSlice)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *EndpointSlice) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *EndpointSliceList) DeepCopyInto(out *EndpointSliceList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]EndpointSlice, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointSliceList.
|
||||
func (in *EndpointSliceList) DeepCopy() *EndpointSliceList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(EndpointSliceList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *EndpointSliceList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -985,7 +985,7 @@ type AllowedHostPath struct {
|
|||
// Deprecated: use FSType from policy API Group instead.
|
||||
type FSType string
|
||||
|
||||
var (
|
||||
const (
|
||||
AzureFile FSType = "azureFile"
|
||||
Flocker FSType = "flocker"
|
||||
FlexVolume FSType = "flexVolume"
|
||||
|
|
|
|||
|
|
@ -0,0 +1,24 @@
|
|||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// +k8s:deepcopy-gen=package
|
||||
// +k8s:protobuf-gen=package
|
||||
// +k8s:openapi-gen=true
|
||||
|
||||
// +groupName=flowcontrol.apiserver.k8s.io
|
||||
|
||||
// Package v1alpha1 holds api types of version v1alpha1 for group "flowcontrol.apiserver.k8s.io".
|
||||
package v1alpha1 // import "k8s.io/api/flowcontrol/v1alpha1"
|
||||
File diff suppressed because it is too large
Load Diff
|
|
@ -0,0 +1,436 @@
|
|||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
|
||||
// This file was autogenerated by go-to-protobuf. Do not edit it manually!
|
||||
|
||||
syntax = 'proto2';
|
||||
|
||||
package k8s.io.api.flowcontrol.v1alpha1;
|
||||
|
||||
import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
|
||||
import "k8s.io/apimachinery/pkg/runtime/generated.proto";
|
||||
import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
|
||||
|
||||
// Package-wide variables from generator "generated".
|
||||
option go_package = "v1alpha1";
|
||||
|
||||
// FlowDistinguisherMethod specifies the method of a flow distinguisher.
|
||||
message FlowDistinguisherMethod {
|
||||
// `type` is the type of flow distinguisher method
|
||||
// The supported types are "ByUser" and "ByNamespace".
|
||||
// Required.
|
||||
optional string type = 1;
|
||||
}
|
||||
|
||||
// FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with
|
||||
// similar attributes and is identified by a pair of strings: the name of the FlowSchema and a "flow distinguisher".
|
||||
message FlowSchema {
|
||||
// `metadata` is the standard object's metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||
|
||||
// `spec` is the specification of the desired behavior of a FlowSchema.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
|
||||
// +optional
|
||||
optional FlowSchemaSpec spec = 2;
|
||||
|
||||
// `status` is the current status of a FlowSchema.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
|
||||
// +optional
|
||||
optional FlowSchemaStatus status = 3;
|
||||
}
|
||||
|
||||
// FlowSchemaCondition describes conditions for a FlowSchema.
|
||||
message FlowSchemaCondition {
|
||||
// `type` is the type of the condition.
|
||||
// Required.
|
||||
optional string type = 1;
|
||||
|
||||
// `status` is the status of the condition.
|
||||
// Can be True, False, Unknown.
|
||||
// Required.
|
||||
optional string status = 2;
|
||||
|
||||
// `lastTransitionTime` is the last time the condition transitioned from one status to another.
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
|
||||
|
||||
// `reason` is a unique, one-word, CamelCase reason for the condition's last transition.
|
||||
optional string reason = 4;
|
||||
|
||||
// `message` is a human-readable message indicating details about last transition.
|
||||
optional string message = 5;
|
||||
}
|
||||
|
||||
// FlowSchemaList is a list of FlowSchema objects.
|
||||
message FlowSchemaList {
|
||||
// `metadata` is the standard list metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
||||
|
||||
// `items` is a list of FlowSchemas.
|
||||
// +listType=set
|
||||
repeated FlowSchema items = 2;
|
||||
}
|
||||
|
||||
// FlowSchemaSpec describes how the FlowSchema's specification looks like.
|
||||
message FlowSchemaSpec {
|
||||
// `priorityLevelConfiguration` should reference a PriorityLevelConfiguration in the cluster. If the reference cannot
|
||||
// be resolved, the FlowSchema will be ignored and marked as invalid in its status.
|
||||
// Required.
|
||||
optional PriorityLevelConfigurationReference priorityLevelConfiguration = 1;
|
||||
|
||||
// `matchingPrecedence` is used to choose among the FlowSchemas that match a given request. The chosen
|
||||
// FlowSchema is among those with the numerically lowest (which we take to be logically highest)
|
||||
// MatchingPrecedence. Each MatchingPrecedence value must be non-negative.
|
||||
// Note that if the precedence is not specified or zero, it will be set to 1000 as default.
|
||||
// +optional
|
||||
optional int32 matchingPrecedence = 2;
|
||||
|
||||
// `distinguisherMethod` defines how to compute the flow distinguisher for requests that match this schema.
|
||||
// `nil` specifies that the distinguisher is disabled and thus will always be the empty string.
|
||||
// +optional
|
||||
optional FlowDistinguisherMethod distinguisherMethod = 3;
|
||||
|
||||
// `rules` describes which requests will match this flow schema. This FlowSchema matches a request if and only if
|
||||
// at least one member of rules matches the request.
|
||||
// if it is an empty slice, there will be no requests matching the FlowSchema.
|
||||
// +listType=set
|
||||
// +optional
|
||||
repeated PolicyRulesWithSubjects rules = 4;
|
||||
}
|
||||
|
||||
// FlowSchemaStatus represents the current state of a FlowSchema.
|
||||
message FlowSchemaStatus {
|
||||
// `conditions` is a list of the current states of FlowSchema.
|
||||
// +listType=map
|
||||
// +listMapKey=type
|
||||
// +optional
|
||||
repeated FlowSchemaCondition conditions = 1;
|
||||
}
|
||||
|
||||
// GroupSubject holds detailed information for group-kind subject.
|
||||
message GroupSubject {
|
||||
// name is the user group that matches, or "*" to match all user groups.
|
||||
// See https://github.com/kubernetes/apiserver/blob/master/pkg/authentication/user/user.go for some
|
||||
// well-known group names.
|
||||
// Required.
|
||||
optional string name = 1;
|
||||
}
|
||||
|
||||
// LimitResponse defines how to handle requests that can not be executed right now.
|
||||
// +union
|
||||
message LimitResponse {
|
||||
// `type` is "Queue" or "Reject".
|
||||
// "Queue" means that requests that can not be executed upon arrival
|
||||
// are held in a queue until they can be executed or a queuing limit
|
||||
// is reached.
|
||||
// "Reject" means that requests that can not be executed upon arrival
|
||||
// are rejected.
|
||||
// Required.
|
||||
// +unionDiscriminator
|
||||
optional string type = 1;
|
||||
|
||||
// `queuing` holds the configuration parameters for queuing.
|
||||
// This field may be non-empty only if `type` is `"Queue"`.
|
||||
// +optional
|
||||
optional QueuingConfiguration queuing = 2;
|
||||
}
|
||||
|
||||
// LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits.
|
||||
// It addresses two issues:
|
||||
// * How are requests for this priority level limited?
|
||||
// * What should be done with requests that exceed the limit?
|
||||
message LimitedPriorityLevelConfiguration {
|
||||
// `assuredConcurrencyShares` (ACS) configures the execution
|
||||
// limit, which is a limit on the number of requests of this
|
||||
// priority level that may be exeucting at a given time. ACS must
|
||||
// be a positive number. The server's concurrency limit (SCL) is
|
||||
// divided among the concurrency-controlled priority levels in
|
||||
// proportion to their assured concurrency shares. This produces
|
||||
// the assured concurrency value (ACV) --- the number of requests
|
||||
// that may be executing at a time --- for each such priority
|
||||
// level:
|
||||
//
|
||||
// ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) )
|
||||
//
|
||||
// bigger numbers of ACS mean more reserved concurrent requests (at the
|
||||
// expense of every other PL).
|
||||
// This field has a default value of 30.
|
||||
// +optional
|
||||
optional int32 assuredConcurrencyShares = 1;
|
||||
|
||||
// `limitResponse` indicates what to do with requests that can not be executed right now
|
||||
optional LimitResponse limitResponse = 2;
|
||||
}
|
||||
|
||||
// NonResourcePolicyRule is a predicate that matches non-resource requests according to their verb and the
|
||||
// target non-resource URL. A NonResourcePolicyRule matches a request if and only if both (a) at least one member
|
||||
// of verbs matches the request and (b) at least one member of nonResourceURLs matches the request.
|
||||
message NonResourcePolicyRule {
|
||||
// `verbs` is a list of matching verbs and may not be empty.
|
||||
// "*" matches all verbs. If it is present, it must be the only entry.
|
||||
// +listType=set
|
||||
// Required.
|
||||
repeated string verbs = 1;
|
||||
|
||||
// `nonResourceURLs` is a set of url prefixes that a user should have access to and may not be empty.
|
||||
// For example:
|
||||
// - "/healthz" is legal
|
||||
// - "/hea*" is illegal
|
||||
// - "/hea" is legal but matches nothing
|
||||
// - "/hea/*" also matches nothing
|
||||
// - "/healthz/*" matches all per-component health checks.
|
||||
// "*" matches all non-resource urls. if it is present, it must be the only entry.
|
||||
// +listType=set
|
||||
// Required.
|
||||
repeated string nonResourceURLs = 6;
|
||||
}
|
||||
|
||||
// PolicyRulesWithSubjects prescribes a test that applies to a request to an apiserver. The test considers the subject
|
||||
// making the request, the verb being requested, and the resource to be acted upon. This PolicyRulesWithSubjects matches
|
||||
// a request if and only if both (a) at least one member of subjects matches the request and (b) at least one member
|
||||
// of resourceRules or nonResourceRules matches the request.
|
||||
message PolicyRulesWithSubjects {
|
||||
// subjects is the list of normal user, serviceaccount, or group that this rule cares about.
|
||||
// There must be at least one member in this slice.
|
||||
// A slice that includes both the system:authenticated and system:unauthenticated user groups matches every request.
|
||||
// +listType=set
|
||||
// Required.
|
||||
repeated Subject subjects = 1;
|
||||
|
||||
// `resourceRules` is a slice of ResourcePolicyRules that identify matching requests according to their verb and the
|
||||
// target resource.
|
||||
// At least one of `resourceRules` and `nonResourceRules` has to be non-empty.
|
||||
// +listType=set
|
||||
// +optional
|
||||
repeated ResourcePolicyRule resourceRules = 2;
|
||||
|
||||
// `nonResourceRules` is a list of NonResourcePolicyRules that identify matching requests according to their verb
|
||||
// and the target non-resource URL.
|
||||
// +listType=set
|
||||
// +optional
|
||||
repeated NonResourcePolicyRule nonResourceRules = 3;
|
||||
}
|
||||
|
||||
// PriorityLevelConfiguration represents the configuration of a priority level.
|
||||
message PriorityLevelConfiguration {
|
||||
// `metadata` is the standard object's metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||
|
||||
// `spec` is the specification of the desired behavior of a "request-priority".
|
||||
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
|
||||
// +optional
|
||||
optional PriorityLevelConfigurationSpec spec = 2;
|
||||
|
||||
// `status` is the current status of a "request-priority".
|
||||
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
|
||||
// +optional
|
||||
optional PriorityLevelConfigurationStatus status = 3;
|
||||
}
|
||||
|
||||
// PriorityLevelConfigurationCondition defines the condition of priority level.
|
||||
message PriorityLevelConfigurationCondition {
|
||||
// `type` is the type of the condition.
|
||||
// Required.
|
||||
optional string type = 1;
|
||||
|
||||
// `status` is the status of the condition.
|
||||
// Can be True, False, Unknown.
|
||||
// Required.
|
||||
optional string status = 2;
|
||||
|
||||
// `lastTransitionTime` is the last time the condition transitioned from one status to another.
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
|
||||
|
||||
// `reason` is a unique, one-word, CamelCase reason for the condition's last transition.
|
||||
optional string reason = 4;
|
||||
|
||||
// `message` is a human-readable message indicating details about last transition.
|
||||
optional string message = 5;
|
||||
}
|
||||
|
||||
// PriorityLevelConfigurationList is a list of PriorityLevelConfiguration objects.
|
||||
message PriorityLevelConfigurationList {
|
||||
// `metadata` is the standard object's metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
||||
|
||||
// `items` is a list of request-priorities.
|
||||
// +listType=set
|
||||
repeated PriorityLevelConfiguration items = 2;
|
||||
}
|
||||
|
||||
// PriorityLevelConfigurationReference contains information that points to the "request-priority" being used.
|
||||
message PriorityLevelConfigurationReference {
|
||||
// `name` is the name of the priority level configuration being referenced
|
||||
// Required.
|
||||
optional string name = 1;
|
||||
}
|
||||
|
||||
// PriorityLevelConfigurationSpec specifies the configuration of a priority level.
|
||||
// +union
|
||||
message PriorityLevelConfigurationSpec {
|
||||
// `type` indicates whether this priority level is subject to
|
||||
// limitation on request execution. A value of `"Exempt"` means
|
||||
// that requests of this priority level are not subject to a limit
|
||||
// (and thus are never queued) and do not detract from the
|
||||
// capacity made available to other priority levels. A value of
|
||||
// `"Limited"` means that (a) requests of this priority level
|
||||
// _are_ subject to limits and (b) some of the server's limited
|
||||
// capacity is made available exclusively to this priority level.
|
||||
// Required.
|
||||
// +unionDiscriminator
|
||||
optional string type = 1;
|
||||
|
||||
// `limited` specifies how requests are handled for a Limited priority level.
|
||||
// This field must be non-empty if and only if `type` is `"Limited"`.
|
||||
// +optional
|
||||
optional LimitedPriorityLevelConfiguration limited = 2;
|
||||
}
|
||||
|
||||
// PriorityLevelConfigurationStatus represents the current state of a "request-priority".
|
||||
message PriorityLevelConfigurationStatus {
|
||||
// `conditions` is the current state of "request-priority".
|
||||
// +listType=map
|
||||
// +listMapKey=type
|
||||
// +optional
|
||||
repeated PriorityLevelConfigurationCondition conditions = 1;
|
||||
}
|
||||
|
||||
// QueuingConfiguration holds the configuration parameters for queuing
|
||||
message QueuingConfiguration {
|
||||
// `queues` is the number of queues for this priority level. The
|
||||
// queues exist independently at each apiserver. The value must be
|
||||
// positive. Setting it to 1 effectively precludes
|
||||
// shufflesharding and thus makes the distinguisher method of
|
||||
// associated flow schemas irrelevant. This field has a default
|
||||
// value of 64.
|
||||
// +optional
|
||||
optional int32 queues = 1;
|
||||
|
||||
// `handSize` is a small positive number that configures the
|
||||
// shuffle sharding of requests into queues. When enqueuing a request
|
||||
// at this priority level the request's flow identifier (a string
|
||||
// pair) is hashed and the hash value is used to shuffle the list
|
||||
// of queues and deal a hand of the size specified here. The
|
||||
// request is put into one of the shortest queues in that hand.
|
||||
// `handSize` must be no larger than `queues`, and should be
|
||||
// significantly smaller (so that a few heavy flows do not
|
||||
// saturate most of the queues). See the user-facing
|
||||
// documentation for more extensive guidance on setting this
|
||||
// field. This field has a default value of 8.
|
||||
// +optional
|
||||
optional int32 handSize = 2;
|
||||
|
||||
// `queueLengthLimit` is the maximum number of requests allowed to
|
||||
// be waiting in a given queue of this priority level at a time;
|
||||
// excess requests are rejected. This value must be positive. If
|
||||
// not specified, it will be defaulted to 50.
|
||||
// +optional
|
||||
optional int32 queueLengthLimit = 3;
|
||||
}
|
||||
|
||||
// ResourcePolicyRule is a predicate that matches some resource
|
||||
// requests, testing the request's verb and the target resource. A
|
||||
// ResourcePolicyRule matches a resource request if and only if: (a)
|
||||
// at least one member of verbs matches the request, (b) at least one
|
||||
// member of apiGroups matches the request, (c) at least one member of
|
||||
// resources matches the request, and (d) least one member of
|
||||
// namespaces matches the request.
|
||||
message ResourcePolicyRule {
|
||||
// `verbs` is a list of matching verbs and may not be empty.
|
||||
// "*" matches all verbs and, if present, must be the only entry.
|
||||
// +listType=set
|
||||
// Required.
|
||||
repeated string verbs = 1;
|
||||
|
||||
// `apiGroups` is a list of matching API groups and may not be empty.
|
||||
// "*" matches all API groups and, if present, must be the only entry.
|
||||
// +listType=set
|
||||
// Required.
|
||||
repeated string apiGroups = 2;
|
||||
|
||||
// `resources` is a list of matching resources (i.e., lowercase
|
||||
// and plural) with, if desired, subresource. For example, [
|
||||
// "services", "nodes/status" ]. This list may not be empty.
|
||||
// "*" matches all resources and, if present, must be the only entry.
|
||||
// Required.
|
||||
// +listType=set
|
||||
repeated string resources = 3;
|
||||
|
||||
// `clusterScope` indicates whether to match requests that do not
|
||||
// specify a namespace (which happens either because the resource
|
||||
// is not namespaced or the request targets all namespaces).
|
||||
// If this field is omitted or false then the `namespaces` field
|
||||
// must contain a non-empty list.
|
||||
// +optional
|
||||
optional bool clusterScope = 4;
|
||||
|
||||
// `namespaces` is a list of target namespaces that restricts
|
||||
// matches. A request that specifies a target namespace matches
|
||||
// only if either (a) this list contains that target namespace or
|
||||
// (b) this list contains "*". Note that "*" matches any
|
||||
// specified namespace but does not match a request that _does
|
||||
// not specify_ a namespace (see the `clusterScope` field for
|
||||
// that).
|
||||
// This list may be empty, but only if `clusterScope` is true.
|
||||
// +optional
|
||||
// +listType=set
|
||||
repeated string namespaces = 5;
|
||||
}
|
||||
|
||||
// ServiceAccountSubject holds detailed information for service-account-kind subject.
|
||||
message ServiceAccountSubject {
|
||||
// `namespace` is the namespace of matching ServiceAccount objects.
|
||||
// Required.
|
||||
optional string namespace = 1;
|
||||
|
||||
// `name` is the name of matching ServiceAccount objects, or "*" to match regardless of name.
|
||||
// Required.
|
||||
optional string name = 2;
|
||||
}
|
||||
|
||||
// Subject matches the originator of a request, as identified by the request authentication system. There are three
|
||||
// ways of matching an originator; by user, group, or service account.
|
||||
// +union
|
||||
message Subject {
|
||||
// Required
|
||||
// +unionDiscriminator
|
||||
optional string kind = 1;
|
||||
|
||||
// +optional
|
||||
optional UserSubject user = 2;
|
||||
|
||||
// +optional
|
||||
optional GroupSubject group = 3;
|
||||
|
||||
// +optional
|
||||
optional ServiceAccountSubject serviceAccount = 4;
|
||||
}
|
||||
|
||||
// UserSubject holds detailed information for user-kind subject.
|
||||
message UserSubject {
|
||||
// `name` is the username that matches, or "*" to match all usernames.
|
||||
// Required.
|
||||
optional string name = 1;
|
||||
}
|
||||
|
||||
|
|
@ -0,0 +1,58 @@
|
|||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
// GroupName is the name of api group
|
||||
const GroupName = "flowcontrol.apiserver.k8s.io"
|
||||
|
||||
// SchemeGroupVersion is group version used to register these objects
|
||||
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
|
||||
|
||||
// Kind takes an unqualified kind and returns a Group qualified GroupKind
|
||||
func Kind(kind string) schema.GroupKind {
|
||||
return SchemeGroupVersion.WithKind(kind).GroupKind()
|
||||
}
|
||||
|
||||
// Resource takes an unqualified resource and returns a Group qualified GroupResource
|
||||
func Resource(resource string) schema.GroupResource {
|
||||
return SchemeGroupVersion.WithResource(resource).GroupResource()
|
||||
}
|
||||
|
||||
var (
|
||||
// SchemeBuilder installs the api group to a scheme
|
||||
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
|
||||
// AddToScheme adds api to a scheme
|
||||
AddToScheme = SchemeBuilder.AddToScheme
|
||||
)
|
||||
|
||||
// Adds the list of known types to the given scheme.
|
||||
func addKnownTypes(scheme *runtime.Scheme) error {
|
||||
scheme.AddKnownTypes(SchemeGroupVersion,
|
||||
&FlowSchema{},
|
||||
&FlowSchemaList{},
|
||||
&PriorityLevelConfiguration{},
|
||||
&PriorityLevelConfigurationList{},
|
||||
)
|
||||
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
|
||||
return nil
|
||||
}
|
||||
|
|
@ -0,0 +1,513 @@
|
|||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// These are valid wildcards.
|
||||
const (
|
||||
APIGroupAll = "*"
|
||||
ResourceAll = "*"
|
||||
VerbAll = "*"
|
||||
NonResourceAll = "*"
|
||||
NameAll = "*"
|
||||
|
||||
NamespaceEvery = "*" // matches every particular namespace
|
||||
)
|
||||
|
||||
// System preset priority level names
|
||||
const (
|
||||
PriorityLevelConfigurationNameExempt = "exempt"
|
||||
)
|
||||
|
||||
// Conditions
|
||||
const (
|
||||
FlowSchemaConditionDangling = "Dangling"
|
||||
|
||||
PriorityLevelConfigurationConditionConcurrencyShared = "ConcurrencyShared"
|
||||
)
|
||||
|
||||
// +genclient
|
||||
// +genclient:nonNamespaced
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with
|
||||
// similar attributes and is identified by a pair of strings: the name of the FlowSchema and a "flow distinguisher".
|
||||
type FlowSchema struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// `metadata` is the standard object's metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
|
||||
// +optional
|
||||
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
// `spec` is the specification of the desired behavior of a FlowSchema.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
|
||||
// +optional
|
||||
Spec FlowSchemaSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
|
||||
// `status` is the current status of a FlowSchema.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
|
||||
// +optional
|
||||
Status FlowSchemaStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// FlowSchemaList is a list of FlowSchema objects.
|
||||
type FlowSchemaList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// `metadata` is the standard list metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
|
||||
// +optional
|
||||
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
// `items` is a list of FlowSchemas.
|
||||
// +listType=set
|
||||
Items []FlowSchema `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||
}
|
||||
|
||||
// FlowSchemaSpec describes how the FlowSchema's specification looks like.
|
||||
type FlowSchemaSpec struct {
|
||||
// `priorityLevelConfiguration` should reference a PriorityLevelConfiguration in the cluster. If the reference cannot
|
||||
// be resolved, the FlowSchema will be ignored and marked as invalid in its status.
|
||||
// Required.
|
||||
PriorityLevelConfiguration PriorityLevelConfigurationReference `json:"priorityLevelConfiguration" protobuf:"bytes,1,opt,name=priorityLevelConfiguration"`
|
||||
// `matchingPrecedence` is used to choose among the FlowSchemas that match a given request. The chosen
|
||||
// FlowSchema is among those with the numerically lowest (which we take to be logically highest)
|
||||
// MatchingPrecedence. Each MatchingPrecedence value must be non-negative.
|
||||
// Note that if the precedence is not specified or zero, it will be set to 1000 as default.
|
||||
// +optional
|
||||
MatchingPrecedence int32 `json:"matchingPrecedence" protobuf:"varint,2,opt,name=matchingPrecedence"`
|
||||
// `distinguisherMethod` defines how to compute the flow distinguisher for requests that match this schema.
|
||||
// `nil` specifies that the distinguisher is disabled and thus will always be the empty string.
|
||||
// +optional
|
||||
DistinguisherMethod *FlowDistinguisherMethod `json:"distinguisherMethod,omitempty" protobuf:"bytes,3,opt,name=distinguisherMethod"`
|
||||
// `rules` describes which requests will match this flow schema. This FlowSchema matches a request if and only if
|
||||
// at least one member of rules matches the request.
|
||||
// if it is an empty slice, there will be no requests matching the FlowSchema.
|
||||
// +listType=set
|
||||
// +optional
|
||||
Rules []PolicyRulesWithSubjects `json:"rules,omitempty" protobuf:"bytes,4,rep,name=rules"`
|
||||
}
|
||||
|
||||
// FlowDistinguisherMethodType is the type of flow distinguisher method
|
||||
type FlowDistinguisherMethodType string
|
||||
|
||||
// These are valid flow-distinguisher methods.
|
||||
const (
|
||||
// FlowDistinguisherMethodByUserType specifies that the flow distinguisher is the username in the request.
|
||||
// This type is used to provide some insulation between users.
|
||||
FlowDistinguisherMethodByUserType FlowDistinguisherMethodType = "ByUser"
|
||||
|
||||
// FlowDistinguisherMethodByNamespaceType specifies that the flow distinguisher is the namespace of the
|
||||
// object that the request acts upon. If the object is not namespaced, or if the request is a non-resource
|
||||
// request, then the distinguisher will be the empty string. An example usage of this type is to provide
|
||||
// some insulation between tenants in a situation where there are multiple tenants and each namespace
|
||||
// is dedicated to a tenant.
|
||||
FlowDistinguisherMethodByNamespaceType FlowDistinguisherMethodType = "ByNamespace"
|
||||
)
|
||||
|
||||
// FlowDistinguisherMethod specifies the method of a flow distinguisher.
|
||||
type FlowDistinguisherMethod struct {
|
||||
// `type` is the type of flow distinguisher method
|
||||
// The supported types are "ByUser" and "ByNamespace".
|
||||
// Required.
|
||||
Type FlowDistinguisherMethodType `json:"type" protobuf:"bytes,1,opt,name=type"`
|
||||
}
|
||||
|
||||
// PriorityLevelConfigurationReference contains information that points to the "request-priority" being used.
|
||||
type PriorityLevelConfigurationReference struct {
|
||||
// `name` is the name of the priority level configuration being referenced
|
||||
// Required.
|
||||
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
|
||||
}
|
||||
|
||||
// PolicyRulesWithSubjects prescribes a test that applies to a request to an apiserver. The test considers the subject
|
||||
// making the request, the verb being requested, and the resource to be acted upon. This PolicyRulesWithSubjects matches
|
||||
// a request if and only if both (a) at least one member of subjects matches the request and (b) at least one member
|
||||
// of resourceRules or nonResourceRules matches the request.
|
||||
type PolicyRulesWithSubjects struct {
|
||||
// subjects is the list of normal user, serviceaccount, or group that this rule cares about.
|
||||
// There must be at least one member in this slice.
|
||||
// A slice that includes both the system:authenticated and system:unauthenticated user groups matches every request.
|
||||
// +listType=set
|
||||
// Required.
|
||||
Subjects []Subject `json:"subjects" protobuf:"bytes,1,rep,name=subjects"`
|
||||
// `resourceRules` is a slice of ResourcePolicyRules that identify matching requests according to their verb and the
|
||||
// target resource.
|
||||
// At least one of `resourceRules` and `nonResourceRules` has to be non-empty.
|
||||
// +listType=set
|
||||
// +optional
|
||||
ResourceRules []ResourcePolicyRule `json:"resourceRules,omitempty" protobuf:"bytes,2,opt,name=resourceRules"`
|
||||
// `nonResourceRules` is a list of NonResourcePolicyRules that identify matching requests according to their verb
|
||||
// and the target non-resource URL.
|
||||
// +listType=set
|
||||
// +optional
|
||||
NonResourceRules []NonResourcePolicyRule `json:"nonResourceRules,omitempty" protobuf:"bytes,3,opt,name=nonResourceRules"`
|
||||
}
|
||||
|
||||
// Subject matches the originator of a request, as identified by the request authentication system. There are three
|
||||
// ways of matching an originator; by user, group, or service account.
|
||||
// +union
|
||||
type Subject struct {
|
||||
// Required
|
||||
// +unionDiscriminator
|
||||
Kind SubjectKind `json:"kind" protobuf:"bytes,1,opt,name=kind"`
|
||||
// +optional
|
||||
User *UserSubject `json:"user,omitempty" protobuf:"bytes,2,opt,name=user"`
|
||||
// +optional
|
||||
Group *GroupSubject `json:"group,omitempty" protobuf:"bytes,3,opt,name=group"`
|
||||
// +optional
|
||||
ServiceAccount *ServiceAccountSubject `json:"serviceAccount,omitempty" protobuf:"bytes,4,opt,name=serviceAccount"`
|
||||
}
|
||||
|
||||
// SubjectKind is the kind of subject.
|
||||
type SubjectKind string
|
||||
|
||||
// Supported subject's kinds.
|
||||
const (
|
||||
SubjectKindUser SubjectKind = "User"
|
||||
SubjectKindGroup SubjectKind = "Group"
|
||||
SubjectKindServiceAccount SubjectKind = "ServiceAccount"
|
||||
)
|
||||
|
||||
// UserSubject holds detailed information for user-kind subject.
|
||||
type UserSubject struct {
|
||||
// `name` is the username that matches, or "*" to match all usernames.
|
||||
// Required.
|
||||
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
|
||||
}
|
||||
|
||||
// GroupSubject holds detailed information for group-kind subject.
|
||||
type GroupSubject struct {
|
||||
// name is the user group that matches, or "*" to match all user groups.
|
||||
// See https://github.com/kubernetes/apiserver/blob/master/pkg/authentication/user/user.go for some
|
||||
// well-known group names.
|
||||
// Required.
|
||||
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
|
||||
}
|
||||
|
||||
// ServiceAccountSubject holds detailed information for service-account-kind subject.
|
||||
type ServiceAccountSubject struct {
|
||||
// `namespace` is the namespace of matching ServiceAccount objects.
|
||||
// Required.
|
||||
Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"`
|
||||
// `name` is the name of matching ServiceAccount objects, or "*" to match regardless of name.
|
||||
// Required.
|
||||
Name string `json:"name" protobuf:"bytes,2,opt,name=name"`
|
||||
}
|
||||
|
||||
// ResourcePolicyRule is a predicate that matches some resource
|
||||
// requests, testing the request's verb and the target resource. A
|
||||
// ResourcePolicyRule matches a resource request if and only if: (a)
|
||||
// at least one member of verbs matches the request, (b) at least one
|
||||
// member of apiGroups matches the request, (c) at least one member of
|
||||
// resources matches the request, and (d) least one member of
|
||||
// namespaces matches the request.
|
||||
type ResourcePolicyRule struct {
|
||||
// `verbs` is a list of matching verbs and may not be empty.
|
||||
// "*" matches all verbs and, if present, must be the only entry.
|
||||
// +listType=set
|
||||
// Required.
|
||||
Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"`
|
||||
|
||||
// `apiGroups` is a list of matching API groups and may not be empty.
|
||||
// "*" matches all API groups and, if present, must be the only entry.
|
||||
// +listType=set
|
||||
// Required.
|
||||
APIGroups []string `json:"apiGroups" protobuf:"bytes,2,rep,name=apiGroups"`
|
||||
|
||||
// `resources` is a list of matching resources (i.e., lowercase
|
||||
// and plural) with, if desired, subresource. For example, [
|
||||
// "services", "nodes/status" ]. This list may not be empty.
|
||||
// "*" matches all resources and, if present, must be the only entry.
|
||||
// Required.
|
||||
// +listType=set
|
||||
Resources []string `json:"resources" protobuf:"bytes,3,rep,name=resources"`
|
||||
|
||||
// `clusterScope` indicates whether to match requests that do not
|
||||
// specify a namespace (which happens either because the resource
|
||||
// is not namespaced or the request targets all namespaces).
|
||||
// If this field is omitted or false then the `namespaces` field
|
||||
// must contain a non-empty list.
|
||||
// +optional
|
||||
ClusterScope bool `json:"clusterScope,omitempty" protobuf:"varint,4,opt,name=clusterScope"`
|
||||
|
||||
// `namespaces` is a list of target namespaces that restricts
|
||||
// matches. A request that specifies a target namespace matches
|
||||
// only if either (a) this list contains that target namespace or
|
||||
// (b) this list contains "*". Note that "*" matches any
|
||||
// specified namespace but does not match a request that _does
|
||||
// not specify_ a namespace (see the `clusterScope` field for
|
||||
// that).
|
||||
// This list may be empty, but only if `clusterScope` is true.
|
||||
// +optional
|
||||
// +listType=set
|
||||
Namespaces []string `json:"namespaces" protobuf:"bytes,5,rep,name=namespaces"`
|
||||
}
|
||||
|
||||
// NonResourcePolicyRule is a predicate that matches non-resource requests according to their verb and the
|
||||
// target non-resource URL. A NonResourcePolicyRule matches a request if and only if both (a) at least one member
|
||||
// of verbs matches the request and (b) at least one member of nonResourceURLs matches the request.
|
||||
type NonResourcePolicyRule struct {
|
||||
// `verbs` is a list of matching verbs and may not be empty.
|
||||
// "*" matches all verbs. If it is present, it must be the only entry.
|
||||
// +listType=set
|
||||
// Required.
|
||||
Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"`
|
||||
// `nonResourceURLs` is a set of url prefixes that a user should have access to and may not be empty.
|
||||
// For example:
|
||||
// - "/healthz" is legal
|
||||
// - "/hea*" is illegal
|
||||
// - "/hea" is legal but matches nothing
|
||||
// - "/hea/*" also matches nothing
|
||||
// - "/healthz/*" matches all per-component health checks.
|
||||
// "*" matches all non-resource urls. if it is present, it must be the only entry.
|
||||
// +listType=set
|
||||
// Required.
|
||||
NonResourceURLs []string `json:"nonResourceURLs" protobuf:"bytes,6,rep,name=nonResourceURLs"`
|
||||
}
|
||||
|
||||
// FlowSchemaStatus represents the current state of a FlowSchema.
|
||||
type FlowSchemaStatus struct {
|
||||
// `conditions` is a list of the current states of FlowSchema.
|
||||
// +listType=map
|
||||
// +listMapKey=type
|
||||
// +optional
|
||||
Conditions []FlowSchemaCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"`
|
||||
}
|
||||
|
||||
// FlowSchemaCondition describes conditions for a FlowSchema.
|
||||
type FlowSchemaCondition struct {
|
||||
// `type` is the type of the condition.
|
||||
// Required.
|
||||
Type FlowSchemaConditionType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type"`
|
||||
// `status` is the status of the condition.
|
||||
// Can be True, False, Unknown.
|
||||
// Required.
|
||||
Status ConditionStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"`
|
||||
// `lastTransitionTime` is the last time the condition transitioned from one status to another.
|
||||
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"`
|
||||
// `reason` is a unique, one-word, CamelCase reason for the condition's last transition.
|
||||
Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`
|
||||
// `message` is a human-readable message indicating details about last transition.
|
||||
Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"`
|
||||
}
|
||||
|
||||
// FlowSchemaConditionType is a valid value for FlowSchemaStatusCondition.Type
|
||||
type FlowSchemaConditionType string
|
||||
|
||||
// +genclient
|
||||
// +genclient:nonNamespaced
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// PriorityLevelConfiguration represents the configuration of a priority level.
|
||||
type PriorityLevelConfiguration struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// `metadata` is the standard object's metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
|
||||
// +optional
|
||||
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
// `spec` is the specification of the desired behavior of a "request-priority".
|
||||
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
|
||||
// +optional
|
||||
Spec PriorityLevelConfigurationSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
|
||||
// `status` is the current status of a "request-priority".
|
||||
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
|
||||
// +optional
|
||||
Status PriorityLevelConfigurationStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// PriorityLevelConfigurationList is a list of PriorityLevelConfiguration objects.
|
||||
type PriorityLevelConfigurationList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// `metadata` is the standard object's metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
|
||||
// +optional
|
||||
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
// `items` is a list of request-priorities.
|
||||
// +listType=set
|
||||
Items []PriorityLevelConfiguration `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||
}
|
||||
|
||||
// PriorityLevelConfigurationSpec specifies the configuration of a priority level.
|
||||
// +union
|
||||
type PriorityLevelConfigurationSpec struct {
|
||||
// `type` indicates whether this priority level is subject to
|
||||
// limitation on request execution. A value of `"Exempt"` means
|
||||
// that requests of this priority level are not subject to a limit
|
||||
// (and thus are never queued) and do not detract from the
|
||||
// capacity made available to other priority levels. A value of
|
||||
// `"Limited"` means that (a) requests of this priority level
|
||||
// _are_ subject to limits and (b) some of the server's limited
|
||||
// capacity is made available exclusively to this priority level.
|
||||
// Required.
|
||||
// +unionDiscriminator
|
||||
Type PriorityLevelEnablement `json:"type" protobuf:"bytes,1,opt,name=type"`
|
||||
|
||||
// `limited` specifies how requests are handled for a Limited priority level.
|
||||
// This field must be non-empty if and only if `type` is `"Limited"`.
|
||||
// +optional
|
||||
Limited *LimitedPriorityLevelConfiguration `json:"limited,omitempty" protobuf:"bytes,2,opt,name=limited"`
|
||||
}
|
||||
|
||||
// PriorityLevelEnablement indicates whether limits on execution are enabled for the priority level
|
||||
type PriorityLevelEnablement string
|
||||
|
||||
// Supported priority level enablement values.
|
||||
const (
|
||||
// PriorityLevelEnablementExempt means that requests are not subject to limits
|
||||
PriorityLevelEnablementExempt PriorityLevelEnablement = "Exempt"
|
||||
|
||||
// PriorityLevelEnablementLimited means that requests are subject to limits
|
||||
PriorityLevelEnablementLimited PriorityLevelEnablement = "Limited"
|
||||
)
|
||||
|
||||
// LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits.
|
||||
// It addresses two issues:
|
||||
// * How are requests for this priority level limited?
|
||||
// * What should be done with requests that exceed the limit?
|
||||
type LimitedPriorityLevelConfiguration struct {
|
||||
// `assuredConcurrencyShares` (ACS) configures the execution
|
||||
// limit, which is a limit on the number of requests of this
|
||||
// priority level that may be exeucting at a given time. ACS must
|
||||
// be a positive number. The server's concurrency limit (SCL) is
|
||||
// divided among the concurrency-controlled priority levels in
|
||||
// proportion to their assured concurrency shares. This produces
|
||||
// the assured concurrency value (ACV) --- the number of requests
|
||||
// that may be executing at a time --- for each such priority
|
||||
// level:
|
||||
//
|
||||
// ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) )
|
||||
//
|
||||
// bigger numbers of ACS mean more reserved concurrent requests (at the
|
||||
// expense of every other PL).
|
||||
// This field has a default value of 30.
|
||||
// +optional
|
||||
AssuredConcurrencyShares int32 `json:"assuredConcurrencyShares" protobuf:"varint,1,opt,name=assuredConcurrencyShares"`
|
||||
|
||||
// `limitResponse` indicates what to do with requests that can not be executed right now
|
||||
LimitResponse LimitResponse `json:"limitResponse,omitempty" protobuf:"bytes,2,opt,name=limitResponse"`
|
||||
}
|
||||
|
||||
// LimitResponse defines how to handle requests that can not be executed right now.
|
||||
// +union
|
||||
type LimitResponse struct {
|
||||
// `type` is "Queue" or "Reject".
|
||||
// "Queue" means that requests that can not be executed upon arrival
|
||||
// are held in a queue until they can be executed or a queuing limit
|
||||
// is reached.
|
||||
// "Reject" means that requests that can not be executed upon arrival
|
||||
// are rejected.
|
||||
// Required.
|
||||
// +unionDiscriminator
|
||||
Type LimitResponseType `json:"type" protobuf:"bytes,1,opt,name=type"`
|
||||
|
||||
// `queuing` holds the configuration parameters for queuing.
|
||||
// This field may be non-empty only if `type` is `"Queue"`.
|
||||
// +optional
|
||||
Queuing *QueuingConfiguration `json:"queuing,omitempty" protobuf:"bytes,2,opt,name=queuing"`
|
||||
}
|
||||
|
||||
// LimitResponseType identifies how a Limited priority level handles a request that can not be executed right now
|
||||
type LimitResponseType string
|
||||
|
||||
// Supported limit responses.
|
||||
const (
|
||||
// LimitResponseTypeQueue means that requests that can not be executed right now are queued until they can be executed or a queuing limit is hit
|
||||
LimitResponseTypeQueue LimitResponseType = "Queue"
|
||||
|
||||
// LimitResponseTypeReject means that requests that can not be executed right now are rejected
|
||||
LimitResponseTypeReject LimitResponseType = "Reject"
|
||||
)
|
||||
|
||||
// QueuingConfiguration holds the configuration parameters for queuing
|
||||
type QueuingConfiguration struct {
|
||||
// `queues` is the number of queues for this priority level. The
|
||||
// queues exist independently at each apiserver. The value must be
|
||||
// positive. Setting it to 1 effectively precludes
|
||||
// shufflesharding and thus makes the distinguisher method of
|
||||
// associated flow schemas irrelevant. This field has a default
|
||||
// value of 64.
|
||||
// +optional
|
||||
Queues int32 `json:"queues" protobuf:"varint,1,opt,name=queues"`
|
||||
|
||||
// `handSize` is a small positive number that configures the
|
||||
// shuffle sharding of requests into queues. When enqueuing a request
|
||||
// at this priority level the request's flow identifier (a string
|
||||
// pair) is hashed and the hash value is used to shuffle the list
|
||||
// of queues and deal a hand of the size specified here. The
|
||||
// request is put into one of the shortest queues in that hand.
|
||||
// `handSize` must be no larger than `queues`, and should be
|
||||
// significantly smaller (so that a few heavy flows do not
|
||||
// saturate most of the queues). See the user-facing
|
||||
// documentation for more extensive guidance on setting this
|
||||
// field. This field has a default value of 8.
|
||||
// +optional
|
||||
HandSize int32 `json:"handSize" protobuf:"varint,2,opt,name=handSize"`
|
||||
|
||||
// `queueLengthLimit` is the maximum number of requests allowed to
|
||||
// be waiting in a given queue of this priority level at a time;
|
||||
// excess requests are rejected. This value must be positive. If
|
||||
// not specified, it will be defaulted to 50.
|
||||
// +optional
|
||||
QueueLengthLimit int32 `json:"queueLengthLimit" protobuf:"varint,3,opt,name=queueLengthLimit"`
|
||||
}
|
||||
|
||||
// PriorityLevelConfigurationConditionType is a valid value for PriorityLevelConfigurationStatusCondition.Type
|
||||
type PriorityLevelConfigurationConditionType string
|
||||
|
||||
// PriorityLevelConfigurationStatus represents the current state of a "request-priority".
|
||||
type PriorityLevelConfigurationStatus struct {
|
||||
// `conditions` is the current state of "request-priority".
|
||||
// +listType=map
|
||||
// +listMapKey=type
|
||||
// +optional
|
||||
Conditions []PriorityLevelConfigurationCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"`
|
||||
}
|
||||
|
||||
// PriorityLevelConfigurationCondition defines the condition of priority level.
|
||||
type PriorityLevelConfigurationCondition struct {
|
||||
// `type` is the type of the condition.
|
||||
// Required.
|
||||
Type PriorityLevelConfigurationConditionType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type"`
|
||||
// `status` is the status of the condition.
|
||||
// Can be True, False, Unknown.
|
||||
// Required.
|
||||
Status ConditionStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"`
|
||||
// `lastTransitionTime` is the last time the condition transitioned from one status to another.
|
||||
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"`
|
||||
// `reason` is a unique, one-word, CamelCase reason for the condition's last transition.
|
||||
Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`
|
||||
// `message` is a human-readable message indicating details about last transition.
|
||||
Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"`
|
||||
}
|
||||
|
||||
// ConditionStatus is the status of the condition.
|
||||
type ConditionStatus string
|
||||
|
||||
// These are valid condition statuses. "ConditionTrue" means a resource is in the condition.
|
||||
// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes
|
||||
// can't decide if a resource is in the condition or not. In the future, we could add other
|
||||
// intermediate conditions, e.g. ConditionDegraded.
|
||||
const (
|
||||
ConditionTrue ConditionStatus = "True"
|
||||
ConditionFalse ConditionStatus = "False"
|
||||
ConditionUnknown ConditionStatus = "Unknown"
|
||||
)
|
||||
258
vendor/k8s.io/api/flowcontrol/v1alpha1/types_swagger_doc_generated.go
generated
vendored
Normal file
258
vendor/k8s.io/api/flowcontrol/v1alpha1/types_swagger_doc_generated.go
generated
vendored
Normal file
|
|
@ -0,0 +1,258 @@
|
|||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
// This file contains a collection of methods that can be used from go-restful to
|
||||
// generate Swagger API documentation for its models. Please read this PR for more
|
||||
// information on the implementation: https://github.com/emicklei/go-restful/pull/215
|
||||
//
|
||||
// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
|
||||
// they are on one line! For multiple line or blocks that you want to ignore use ---.
|
||||
// Any context after a --- is ignored.
|
||||
//
|
||||
// Those methods can be generated by using hack/update-generated-swagger-docs.sh
|
||||
|
||||
// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
|
||||
var map_FlowDistinguisherMethod = map[string]string{
|
||||
"": "FlowDistinguisherMethod specifies the method of a flow distinguisher.",
|
||||
"type": "`type` is the type of flow distinguisher method The supported types are \"ByUser\" and \"ByNamespace\". Required.",
|
||||
}
|
||||
|
||||
func (FlowDistinguisherMethod) SwaggerDoc() map[string]string {
|
||||
return map_FlowDistinguisherMethod
|
||||
}
|
||||
|
||||
var map_FlowSchema = map[string]string{
|
||||
"": "FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with similar attributes and is identified by a pair of strings: the name of the FlowSchema and a \"flow distinguisher\".",
|
||||
"metadata": "`metadata` is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
|
||||
"spec": "`spec` is the specification of the desired behavior of a FlowSchema. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
|
||||
"status": "`status` is the current status of a FlowSchema. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
|
||||
}
|
||||
|
||||
func (FlowSchema) SwaggerDoc() map[string]string {
|
||||
return map_FlowSchema
|
||||
}
|
||||
|
||||
var map_FlowSchemaCondition = map[string]string{
|
||||
"": "FlowSchemaCondition describes conditions for a FlowSchema.",
|
||||
"type": "`type` is the type of the condition. Required.",
|
||||
"status": "`status` is the status of the condition. Can be True, False, Unknown. Required.",
|
||||
"lastTransitionTime": "`lastTransitionTime` is the last time the condition transitioned from one status to another.",
|
||||
"reason": "`reason` is a unique, one-word, CamelCase reason for the condition's last transition.",
|
||||
"message": "`message` is a human-readable message indicating details about last transition.",
|
||||
}
|
||||
|
||||
func (FlowSchemaCondition) SwaggerDoc() map[string]string {
|
||||
return map_FlowSchemaCondition
|
||||
}
|
||||
|
||||
var map_FlowSchemaList = map[string]string{
|
||||
"": "FlowSchemaList is a list of FlowSchema objects.",
|
||||
"metadata": "`metadata` is the standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
|
||||
"items": "`items` is a list of FlowSchemas.",
|
||||
}
|
||||
|
||||
func (FlowSchemaList) SwaggerDoc() map[string]string {
|
||||
return map_FlowSchemaList
|
||||
}
|
||||
|
||||
var map_FlowSchemaSpec = map[string]string{
|
||||
"": "FlowSchemaSpec describes how the FlowSchema's specification looks like.",
|
||||
"priorityLevelConfiguration": "`priorityLevelConfiguration` should reference a PriorityLevelConfiguration in the cluster. If the reference cannot be resolved, the FlowSchema will be ignored and marked as invalid in its status. Required.",
|
||||
"matchingPrecedence": "`matchingPrecedence` is used to choose among the FlowSchemas that match a given request. The chosen FlowSchema is among those with the numerically lowest (which we take to be logically highest) MatchingPrecedence. Each MatchingPrecedence value must be non-negative. Note that if the precedence is not specified or zero, it will be set to 1000 as default.",
|
||||
"distinguisherMethod": "`distinguisherMethod` defines how to compute the flow distinguisher for requests that match this schema. `nil` specifies that the distinguisher is disabled and thus will always be the empty string.",
|
||||
"rules": "`rules` describes which requests will match this flow schema. This FlowSchema matches a request if and only if at least one member of rules matches the request. if it is an empty slice, there will be no requests matching the FlowSchema.",
|
||||
}
|
||||
|
||||
func (FlowSchemaSpec) SwaggerDoc() map[string]string {
|
||||
return map_FlowSchemaSpec
|
||||
}
|
||||
|
||||
var map_FlowSchemaStatus = map[string]string{
|
||||
"": "FlowSchemaStatus represents the current state of a FlowSchema.",
|
||||
"conditions": "`conditions` is a list of the current states of FlowSchema.",
|
||||
}
|
||||
|
||||
func (FlowSchemaStatus) SwaggerDoc() map[string]string {
|
||||
return map_FlowSchemaStatus
|
||||
}
|
||||
|
||||
var map_GroupSubject = map[string]string{
|
||||
"": "GroupSubject holds detailed information for group-kind subject.",
|
||||
"name": "name is the user group that matches, or \"*\" to match all user groups. See https://github.com/kubernetes/apiserver/blob/master/pkg/authentication/user/user.go for some well-known group names. Required.",
|
||||
}
|
||||
|
||||
func (GroupSubject) SwaggerDoc() map[string]string {
|
||||
return map_GroupSubject
|
||||
}
|
||||
|
||||
var map_LimitResponse = map[string]string{
|
||||
"": "LimitResponse defines how to handle requests that can not be executed right now.",
|
||||
"type": "`type` is \"Queue\" or \"Reject\". \"Queue\" means that requests that can not be executed upon arrival are held in a queue until they can be executed or a queuing limit is reached. \"Reject\" means that requests that can not be executed upon arrival are rejected. Required.",
|
||||
"queuing": "`queuing` holds the configuration parameters for queuing. This field may be non-empty only if `type` is `\"Queue\"`.",
|
||||
}
|
||||
|
||||
func (LimitResponse) SwaggerDoc() map[string]string {
|
||||
return map_LimitResponse
|
||||
}
|
||||
|
||||
var map_LimitedPriorityLevelConfiguration = map[string]string{
|
||||
"": "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n * How are requests for this priority level limited?\n * What should be done with requests that exceed the limit?",
|
||||
"assuredConcurrencyShares": "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be exeucting at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) ",
|
||||
"limitResponse": "`limitResponse` indicates what to do with requests that can not be executed right now",
|
||||
}
|
||||
|
||||
func (LimitedPriorityLevelConfiguration) SwaggerDoc() map[string]string {
|
||||
return map_LimitedPriorityLevelConfiguration
|
||||
}
|
||||
|
||||
var map_NonResourcePolicyRule = map[string]string{
|
||||
"": "NonResourcePolicyRule is a predicate that matches non-resource requests according to their verb and the target non-resource URL. A NonResourcePolicyRule matches a request if and only if both (a) at least one member of verbs matches the request and (b) at least one member of nonResourceURLs matches the request.",
|
||||
"verbs": "`verbs` is a list of matching verbs and may not be empty. \"*\" matches all verbs. If it is present, it must be the only entry. Required.",
|
||||
"nonResourceURLs": "`nonResourceURLs` is a set of url prefixes that a user should have access to and may not be empty. For example:\n - \"/healthz\" is legal\n - \"/hea*\" is illegal\n - \"/hea\" is legal but matches nothing\n - \"/hea/*\" also matches nothing\n - \"/healthz/*\" matches all per-component health checks.\n\"*\" matches all non-resource urls. if it is present, it must be the only entry. Required.",
|
||||
}
|
||||
|
||||
func (NonResourcePolicyRule) SwaggerDoc() map[string]string {
|
||||
return map_NonResourcePolicyRule
|
||||
}
|
||||
|
||||
var map_PolicyRulesWithSubjects = map[string]string{
|
||||
"": "PolicyRulesWithSubjects prescribes a test that applies to a request to an apiserver. The test considers the subject making the request, the verb being requested, and the resource to be acted upon. This PolicyRulesWithSubjects matches a request if and only if both (a) at least one member of subjects matches the request and (b) at least one member of resourceRules or nonResourceRules matches the request.",
|
||||
"subjects": "subjects is the list of normal user, serviceaccount, or group that this rule cares about. There must be at least one member in this slice. A slice that includes both the system:authenticated and system:unauthenticated user groups matches every request. Required.",
|
||||
"resourceRules": "`resourceRules` is a slice of ResourcePolicyRules that identify matching requests according to their verb and the target resource. At least one of `resourceRules` and `nonResourceRules` has to be non-empty.",
|
||||
"nonResourceRules": "`nonResourceRules` is a list of NonResourcePolicyRules that identify matching requests according to their verb and the target non-resource URL.",
|
||||
}
|
||||
|
||||
func (PolicyRulesWithSubjects) SwaggerDoc() map[string]string {
|
||||
return map_PolicyRulesWithSubjects
|
||||
}
|
||||
|
||||
var map_PriorityLevelConfiguration = map[string]string{
|
||||
"": "PriorityLevelConfiguration represents the configuration of a priority level.",
|
||||
"metadata": "`metadata` is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
|
||||
"spec": "`spec` is the specification of the desired behavior of a \"request-priority\". More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
|
||||
"status": "`status` is the current status of a \"request-priority\". More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
|
||||
}
|
||||
|
||||
func (PriorityLevelConfiguration) SwaggerDoc() map[string]string {
|
||||
return map_PriorityLevelConfiguration
|
||||
}
|
||||
|
||||
var map_PriorityLevelConfigurationCondition = map[string]string{
|
||||
"": "PriorityLevelConfigurationCondition defines the condition of priority level.",
|
||||
"type": "`type` is the type of the condition. Required.",
|
||||
"status": "`status` is the status of the condition. Can be True, False, Unknown. Required.",
|
||||
"lastTransitionTime": "`lastTransitionTime` is the last time the condition transitioned from one status to another.",
|
||||
"reason": "`reason` is a unique, one-word, CamelCase reason for the condition's last transition.",
|
||||
"message": "`message` is a human-readable message indicating details about last transition.",
|
||||
}
|
||||
|
||||
func (PriorityLevelConfigurationCondition) SwaggerDoc() map[string]string {
|
||||
return map_PriorityLevelConfigurationCondition
|
||||
}
|
||||
|
||||
var map_PriorityLevelConfigurationList = map[string]string{
|
||||
"": "PriorityLevelConfigurationList is a list of PriorityLevelConfiguration objects.",
|
||||
"metadata": "`metadata` is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
|
||||
"items": "`items` is a list of request-priorities.",
|
||||
}
|
||||
|
||||
func (PriorityLevelConfigurationList) SwaggerDoc() map[string]string {
|
||||
return map_PriorityLevelConfigurationList
|
||||
}
|
||||
|
||||
var map_PriorityLevelConfigurationReference = map[string]string{
|
||||
"": "PriorityLevelConfigurationReference contains information that points to the \"request-priority\" being used.",
|
||||
"name": "`name` is the name of the priority level configuration being referenced Required.",
|
||||
}
|
||||
|
||||
func (PriorityLevelConfigurationReference) SwaggerDoc() map[string]string {
|
||||
return map_PriorityLevelConfigurationReference
|
||||
}
|
||||
|
||||
var map_PriorityLevelConfigurationSpec = map[string]string{
|
||||
"": "PriorityLevelConfigurationSpec specifies the configuration of a priority level.",
|
||||
"type": "`type` indicates whether this priority level is subject to limitation on request execution. A value of `\"Exempt\"` means that requests of this priority level are not subject to a limit (and thus are never queued) and do not detract from the capacity made available to other priority levels. A value of `\"Limited\"` means that (a) requests of this priority level _are_ subject to limits and (b) some of the server's limited capacity is made available exclusively to this priority level. Required.",
|
||||
"limited": "`limited` specifies how requests are handled for a Limited priority level. This field must be non-empty if and only if `type` is `\"Limited\"`.",
|
||||
}
|
||||
|
||||
func (PriorityLevelConfigurationSpec) SwaggerDoc() map[string]string {
|
||||
return map_PriorityLevelConfigurationSpec
|
||||
}
|
||||
|
||||
var map_PriorityLevelConfigurationStatus = map[string]string{
|
||||
"": "PriorityLevelConfigurationStatus represents the current state of a \"request-priority\".",
|
||||
"conditions": "`conditions` is the current state of \"request-priority\".",
|
||||
}
|
||||
|
||||
func (PriorityLevelConfigurationStatus) SwaggerDoc() map[string]string {
|
||||
return map_PriorityLevelConfigurationStatus
|
||||
}
|
||||
|
||||
var map_QueuingConfiguration = map[string]string{
|
||||
"": "QueuingConfiguration holds the configuration parameters for queuing",
|
||||
"queues": "`queues` is the number of queues for this priority level. The queues exist independently at each apiserver. The value must be positive. Setting it to 1 effectively precludes shufflesharding and thus makes the distinguisher method of associated flow schemas irrelevant. This field has a default value of 64.",
|
||||
"handSize": "`handSize` is a small positive number that configures the shuffle sharding of requests into queues. When enqueuing a request at this priority level the request's flow identifier (a string pair) is hashed and the hash value is used to shuffle the list of queues and deal a hand of the size specified here. The request is put into one of the shortest queues in that hand. `handSize` must be no larger than `queues`, and should be significantly smaller (so that a few heavy flows do not saturate most of the queues). See the user-facing documentation for more extensive guidance on setting this field. This field has a default value of 8.",
|
||||
"queueLengthLimit": "`queueLengthLimit` is the maximum number of requests allowed to be waiting in a given queue of this priority level at a time; excess requests are rejected. This value must be positive. If not specified, it will be defaulted to 50.",
|
||||
}
|
||||
|
||||
func (QueuingConfiguration) SwaggerDoc() map[string]string {
|
||||
return map_QueuingConfiguration
|
||||
}
|
||||
|
||||
var map_ResourcePolicyRule = map[string]string{
|
||||
"": "ResourcePolicyRule is a predicate that matches some resource requests, testing the request's verb and the target resource. A ResourcePolicyRule matches a resource request if and only if: (a) at least one member of verbs matches the request, (b) at least one member of apiGroups matches the request, (c) at least one member of resources matches the request, and (d) least one member of namespaces matches the request.",
|
||||
"verbs": "`verbs` is a list of matching verbs and may not be empty. \"*\" matches all verbs and, if present, must be the only entry. Required.",
|
||||
"apiGroups": "`apiGroups` is a list of matching API groups and may not be empty. \"*\" matches all API groups and, if present, must be the only entry. Required.",
|
||||
"resources": "`resources` is a list of matching resources (i.e., lowercase and plural) with, if desired, subresource. For example, [ \"services\", \"nodes/status\" ]. This list may not be empty. \"*\" matches all resources and, if present, must be the only entry. Required.",
|
||||
"clusterScope": "`clusterScope` indicates whether to match requests that do not specify a namespace (which happens either because the resource is not namespaced or the request targets all namespaces). If this field is omitted or false then the `namespaces` field must contain a non-empty list.",
|
||||
"namespaces": "`namespaces` is a list of target namespaces that restricts matches. A request that specifies a target namespace matches only if either (a) this list contains that target namespace or (b) this list contains \"*\". Note that \"*\" matches any specified namespace but does not match a request that _does not specify_ a namespace (see the `clusterScope` field for that). This list may be empty, but only if `clusterScope` is true.",
|
||||
}
|
||||
|
||||
func (ResourcePolicyRule) SwaggerDoc() map[string]string {
|
||||
return map_ResourcePolicyRule
|
||||
}
|
||||
|
||||
var map_ServiceAccountSubject = map[string]string{
|
||||
"": "ServiceAccountSubject holds detailed information for service-account-kind subject.",
|
||||
"namespace": "`namespace` is the namespace of matching ServiceAccount objects. Required.",
|
||||
"name": "`name` is the name of matching ServiceAccount objects, or \"*\" to match regardless of name. Required.",
|
||||
}
|
||||
|
||||
func (ServiceAccountSubject) SwaggerDoc() map[string]string {
|
||||
return map_ServiceAccountSubject
|
||||
}
|
||||
|
||||
var map_Subject = map[string]string{
|
||||
"": "Subject matches the originator of a request, as identified by the request authentication system. There are three ways of matching an originator; by user, group, or service account.",
|
||||
"kind": "Required",
|
||||
}
|
||||
|
||||
func (Subject) SwaggerDoc() map[string]string {
|
||||
return map_Subject
|
||||
}
|
||||
|
||||
var map_UserSubject = map[string]string{
|
||||
"": "UserSubject holds detailed information for user-kind subject.",
|
||||
"name": "`name` is the username that matches, or \"*\" to match all usernames. Required.",
|
||||
}
|
||||
|
||||
func (UserSubject) SwaggerDoc() map[string]string {
|
||||
return map_UserSubject
|
||||
}
|
||||
|
||||
// AUTO-GENERATED FUNCTIONS END HERE
|
||||
|
|
@ -0,0 +1,541 @@
|
|||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by deepcopy-gen. DO NOT EDIT.
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *FlowDistinguisherMethod) DeepCopyInto(out *FlowDistinguisherMethod) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowDistinguisherMethod.
|
||||
func (in *FlowDistinguisherMethod) DeepCopy() *FlowDistinguisherMethod {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(FlowDistinguisherMethod)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *FlowSchema) DeepCopyInto(out *FlowSchema) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowSchema.
|
||||
func (in *FlowSchema) DeepCopy() *FlowSchema {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(FlowSchema)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *FlowSchema) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *FlowSchemaCondition) DeepCopyInto(out *FlowSchemaCondition) {
|
||||
*out = *in
|
||||
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowSchemaCondition.
|
||||
func (in *FlowSchemaCondition) DeepCopy() *FlowSchemaCondition {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(FlowSchemaCondition)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *FlowSchemaList) DeepCopyInto(out *FlowSchemaList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]FlowSchema, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowSchemaList.
|
||||
func (in *FlowSchemaList) DeepCopy() *FlowSchemaList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(FlowSchemaList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *FlowSchemaList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *FlowSchemaSpec) DeepCopyInto(out *FlowSchemaSpec) {
|
||||
*out = *in
|
||||
out.PriorityLevelConfiguration = in.PriorityLevelConfiguration
|
||||
if in.DistinguisherMethod != nil {
|
||||
in, out := &in.DistinguisherMethod, &out.DistinguisherMethod
|
||||
*out = new(FlowDistinguisherMethod)
|
||||
**out = **in
|
||||
}
|
||||
if in.Rules != nil {
|
||||
in, out := &in.Rules, &out.Rules
|
||||
*out = make([]PolicyRulesWithSubjects, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowSchemaSpec.
|
||||
func (in *FlowSchemaSpec) DeepCopy() *FlowSchemaSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(FlowSchemaSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *FlowSchemaStatus) DeepCopyInto(out *FlowSchemaStatus) {
|
||||
*out = *in
|
||||
if in.Conditions != nil {
|
||||
in, out := &in.Conditions, &out.Conditions
|
||||
*out = make([]FlowSchemaCondition, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowSchemaStatus.
|
||||
func (in *FlowSchemaStatus) DeepCopy() *FlowSchemaStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(FlowSchemaStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *GroupSubject) DeepCopyInto(out *GroupSubject) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupSubject.
|
||||
func (in *GroupSubject) DeepCopy() *GroupSubject {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(GroupSubject)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *LimitResponse) DeepCopyInto(out *LimitResponse) {
|
||||
*out = *in
|
||||
if in.Queuing != nil {
|
||||
in, out := &in.Queuing, &out.Queuing
|
||||
*out = new(QueuingConfiguration)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitResponse.
|
||||
func (in *LimitResponse) DeepCopy() *LimitResponse {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(LimitResponse)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *LimitedPriorityLevelConfiguration) DeepCopyInto(out *LimitedPriorityLevelConfiguration) {
|
||||
*out = *in
|
||||
in.LimitResponse.DeepCopyInto(&out.LimitResponse)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitedPriorityLevelConfiguration.
|
||||
func (in *LimitedPriorityLevelConfiguration) DeepCopy() *LimitedPriorityLevelConfiguration {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(LimitedPriorityLevelConfiguration)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *NonResourcePolicyRule) DeepCopyInto(out *NonResourcePolicyRule) {
|
||||
*out = *in
|
||||
if in.Verbs != nil {
|
||||
in, out := &in.Verbs, &out.Verbs
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.NonResourceURLs != nil {
|
||||
in, out := &in.NonResourceURLs, &out.NonResourceURLs
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NonResourcePolicyRule.
|
||||
func (in *NonResourcePolicyRule) DeepCopy() *NonResourcePolicyRule {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(NonResourcePolicyRule)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PolicyRulesWithSubjects) DeepCopyInto(out *PolicyRulesWithSubjects) {
|
||||
*out = *in
|
||||
if in.Subjects != nil {
|
||||
in, out := &in.Subjects, &out.Subjects
|
||||
*out = make([]Subject, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.ResourceRules != nil {
|
||||
in, out := &in.ResourceRules, &out.ResourceRules
|
||||
*out = make([]ResourcePolicyRule, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.NonResourceRules != nil {
|
||||
in, out := &in.NonResourceRules, &out.NonResourceRules
|
||||
*out = make([]NonResourcePolicyRule, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyRulesWithSubjects.
|
||||
func (in *PolicyRulesWithSubjects) DeepCopy() *PolicyRulesWithSubjects {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PolicyRulesWithSubjects)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PriorityLevelConfiguration) DeepCopyInto(out *PriorityLevelConfiguration) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfiguration.
|
||||
func (in *PriorityLevelConfiguration) DeepCopy() *PriorityLevelConfiguration {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PriorityLevelConfiguration)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *PriorityLevelConfiguration) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PriorityLevelConfigurationCondition) DeepCopyInto(out *PriorityLevelConfigurationCondition) {
|
||||
*out = *in
|
||||
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfigurationCondition.
|
||||
func (in *PriorityLevelConfigurationCondition) DeepCopy() *PriorityLevelConfigurationCondition {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PriorityLevelConfigurationCondition)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PriorityLevelConfigurationList) DeepCopyInto(out *PriorityLevelConfigurationList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]PriorityLevelConfiguration, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfigurationList.
|
||||
func (in *PriorityLevelConfigurationList) DeepCopy() *PriorityLevelConfigurationList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PriorityLevelConfigurationList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *PriorityLevelConfigurationList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PriorityLevelConfigurationReference) DeepCopyInto(out *PriorityLevelConfigurationReference) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfigurationReference.
|
||||
func (in *PriorityLevelConfigurationReference) DeepCopy() *PriorityLevelConfigurationReference {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PriorityLevelConfigurationReference)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PriorityLevelConfigurationSpec) DeepCopyInto(out *PriorityLevelConfigurationSpec) {
|
||||
*out = *in
|
||||
if in.Limited != nil {
|
||||
in, out := &in.Limited, &out.Limited
|
||||
*out = new(LimitedPriorityLevelConfiguration)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfigurationSpec.
|
||||
func (in *PriorityLevelConfigurationSpec) DeepCopy() *PriorityLevelConfigurationSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PriorityLevelConfigurationSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PriorityLevelConfigurationStatus) DeepCopyInto(out *PriorityLevelConfigurationStatus) {
|
||||
*out = *in
|
||||
if in.Conditions != nil {
|
||||
in, out := &in.Conditions, &out.Conditions
|
||||
*out = make([]PriorityLevelConfigurationCondition, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfigurationStatus.
|
||||
func (in *PriorityLevelConfigurationStatus) DeepCopy() *PriorityLevelConfigurationStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PriorityLevelConfigurationStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *QueuingConfiguration) DeepCopyInto(out *QueuingConfiguration) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueuingConfiguration.
|
||||
func (in *QueuingConfiguration) DeepCopy() *QueuingConfiguration {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(QueuingConfiguration)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ResourcePolicyRule) DeepCopyInto(out *ResourcePolicyRule) {
|
||||
*out = *in
|
||||
if in.Verbs != nil {
|
||||
in, out := &in.Verbs, &out.Verbs
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.APIGroups != nil {
|
||||
in, out := &in.APIGroups, &out.APIGroups
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Resources != nil {
|
||||
in, out := &in.Resources, &out.Resources
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Namespaces != nil {
|
||||
in, out := &in.Namespaces, &out.Namespaces
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePolicyRule.
|
||||
func (in *ResourcePolicyRule) DeepCopy() *ResourcePolicyRule {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ResourcePolicyRule)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ServiceAccountSubject) DeepCopyInto(out *ServiceAccountSubject) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountSubject.
|
||||
func (in *ServiceAccountSubject) DeepCopy() *ServiceAccountSubject {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ServiceAccountSubject)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Subject) DeepCopyInto(out *Subject) {
|
||||
*out = *in
|
||||
if in.User != nil {
|
||||
in, out := &in.User, &out.User
|
||||
*out = new(UserSubject)
|
||||
**out = **in
|
||||
}
|
||||
if in.Group != nil {
|
||||
in, out := &in.Group, &out.Group
|
||||
*out = new(GroupSubject)
|
||||
**out = **in
|
||||
}
|
||||
if in.ServiceAccount != nil {
|
||||
in, out := &in.ServiceAccount, &out.ServiceAccount
|
||||
*out = new(ServiceAccountSubject)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Subject.
|
||||
func (in *Subject) DeepCopy() *Subject {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Subject)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *UserSubject) DeepCopyInto(out *UserSubject) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSubject.
|
||||
func (in *UserSubject) DeepCopy() *UserSubject {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(UserSubject)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
|
@ -151,7 +151,7 @@ message PodDisruptionBudgetSpec {
|
|||
// PodDisruptionBudget. Status may trail the actual state of a system.
|
||||
message PodDisruptionBudgetStatus {
|
||||
// Most recent generation observed when updating this PDB status. PodDisruptionsAllowed and other
|
||||
// status informatio is valid only if observedGeneration equals to PDB's object generation.
|
||||
// status information is valid only if observedGeneration equals to PDB's object generation.
|
||||
// +optional
|
||||
optional int64 observedGeneration = 1;
|
||||
|
||||
|
|
|
|||
|
|
@ -48,7 +48,7 @@ type PodDisruptionBudgetSpec struct {
|
|||
// PodDisruptionBudget. Status may trail the actual state of a system.
|
||||
type PodDisruptionBudgetStatus struct {
|
||||
// Most recent generation observed when updating this PDB status. PodDisruptionsAllowed and other
|
||||
// status informatio is valid only if observedGeneration equals to PDB's object generation.
|
||||
// status information is valid only if observedGeneration equals to PDB's object generation.
|
||||
// +optional
|
||||
ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"`
|
||||
|
||||
|
|
@ -276,7 +276,7 @@ var AllowAllCapabilities v1.Capability = "*"
|
|||
// FSType gives strong typing to different file systems that are used by volumes.
|
||||
type FSType string
|
||||
|
||||
var (
|
||||
const (
|
||||
AzureFile FSType = "azureFile"
|
||||
Flocker FSType = "flocker"
|
||||
FlexVolume FSType = "flexVolume"
|
||||
|
|
|
|||
|
|
@ -126,7 +126,7 @@ func (PodDisruptionBudgetSpec) SwaggerDoc() map[string]string {
|
|||
|
||||
var map_PodDisruptionBudgetStatus = map[string]string{
|
||||
"": "PodDisruptionBudgetStatus represents information about the status of a PodDisruptionBudget. Status may trail the actual state of a system.",
|
||||
"observedGeneration": "Most recent generation observed when updating this PDB status. PodDisruptionsAllowed and other status informatio is valid only if observedGeneration equals to PDB's object generation.",
|
||||
"observedGeneration": "Most recent generation observed when updating this PDB status. PodDisruptionsAllowed and other status information is valid only if observedGeneration equals to PDB's object generation.",
|
||||
"disruptedPods": "DisruptedPods contains information about pods whose eviction was processed by the API server eviction subresource handler but has not yet been observed by the PodDisruptionBudget controller. A pod will be in this map from the time when the API server processed the eviction request to the time when the pod is seen by PDB controller as having been marked for deletion (or after a timeout). The key in the map is the name of the pod and the value is the time when the API server processed the eviction request. If the deletion didn't occur and a pod is still there it will be removed from the list automatically by PodDisruptionBudget controller after some time. If everything goes smooth this map should be empty for the most of the time. Large number of entries in the map may indicate problems with pod deletions.",
|
||||
"disruptionsAllowed": "Number of pod disruptions that are currently allowed.",
|
||||
"currentHealthy": "current number of healthy pods",
|
||||
|
|
|
|||
|
|
@ -37,6 +37,7 @@ message AggregationRule {
|
|||
}
|
||||
|
||||
// ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.
|
||||
// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRole, and will no longer be served in v1.20.
|
||||
message ClusterRole {
|
||||
// Standard object's metadata.
|
||||
// +optional
|
||||
|
|
@ -55,6 +56,7 @@ message ClusterRole {
|
|||
|
||||
// ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace,
|
||||
// and adds who information via Subject.
|
||||
// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBinding, and will no longer be served in v1.20.
|
||||
message ClusterRoleBinding {
|
||||
// Standard object's metadata.
|
||||
// +optional
|
||||
|
|
@ -69,7 +71,8 @@ message ClusterRoleBinding {
|
|||
optional RoleRef roleRef = 3;
|
||||
}
|
||||
|
||||
// ClusterRoleBindingList is a collection of ClusterRoleBindings
|
||||
// ClusterRoleBindingList is a collection of ClusterRoleBindings.
|
||||
// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBindings, and will no longer be served in v1.20.
|
||||
message ClusterRoleBindingList {
|
||||
// Standard object's metadata.
|
||||
// +optional
|
||||
|
|
@ -79,7 +82,8 @@ message ClusterRoleBindingList {
|
|||
repeated ClusterRoleBinding items = 2;
|
||||
}
|
||||
|
||||
// ClusterRoleList is a collection of ClusterRoles
|
||||
// ClusterRoleList is a collection of ClusterRoles.
|
||||
// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoles, and will no longer be served in v1.20.
|
||||
message ClusterRoleList {
|
||||
// Standard object's metadata.
|
||||
// +optional
|
||||
|
|
@ -117,6 +121,7 @@ message PolicyRule {
|
|||
}
|
||||
|
||||
// Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding.
|
||||
// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 Role, and will no longer be served in v1.20.
|
||||
message Role {
|
||||
// Standard object's metadata.
|
||||
// +optional
|
||||
|
|
@ -130,6 +135,7 @@ message Role {
|
|||
// RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace.
|
||||
// It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given
|
||||
// namespace only have effect in that namespace.
|
||||
// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBinding, and will no longer be served in v1.20.
|
||||
message RoleBinding {
|
||||
// Standard object's metadata.
|
||||
// +optional
|
||||
|
|
@ -145,6 +151,7 @@ message RoleBinding {
|
|||
}
|
||||
|
||||
// RoleBindingList is a collection of RoleBindings
|
||||
// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBindingList, and will no longer be served in v1.20.
|
||||
message RoleBindingList {
|
||||
// Standard object's metadata.
|
||||
// +optional
|
||||
|
|
@ -154,7 +161,8 @@ message RoleBindingList {
|
|||
repeated RoleBinding items = 2;
|
||||
}
|
||||
|
||||
// RoleList is a collection of Roles
|
||||
// RoleList is a collection of Roles.
|
||||
// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleList, and will no longer be served in v1.20.
|
||||
message RoleList {
|
||||
// Standard object's metadata.
|
||||
// +optional
|
||||
|
|
|
|||
|
|
@ -103,6 +103,7 @@ type RoleRef struct {
|
|||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding.
|
||||
// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 Role, and will no longer be served in v1.20.
|
||||
type Role struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard object's metadata.
|
||||
|
|
@ -120,6 +121,7 @@ type Role struct {
|
|||
// RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace.
|
||||
// It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given
|
||||
// namespace only have effect in that namespace.
|
||||
// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBinding, and will no longer be served in v1.20.
|
||||
type RoleBinding struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard object's metadata.
|
||||
|
|
@ -138,6 +140,7 @@ type RoleBinding struct {
|
|||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// RoleBindingList is a collection of RoleBindings
|
||||
// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBindingList, and will no longer be served in v1.20.
|
||||
type RoleBindingList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard object's metadata.
|
||||
|
|
@ -150,7 +153,8 @@ type RoleBindingList struct {
|
|||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// RoleList is a collection of Roles
|
||||
// RoleList is a collection of Roles.
|
||||
// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleList, and will no longer be served in v1.20.
|
||||
type RoleList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard object's metadata.
|
||||
|
|
@ -166,6 +170,7 @@ type RoleList struct {
|
|||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.
|
||||
// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRole, and will no longer be served in v1.20.
|
||||
type ClusterRole struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard object's metadata.
|
||||
|
|
@ -197,6 +202,7 @@ type AggregationRule struct {
|
|||
|
||||
// ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace,
|
||||
// and adds who information via Subject.
|
||||
// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBinding, and will no longer be served in v1.20.
|
||||
type ClusterRoleBinding struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard object's metadata.
|
||||
|
|
@ -214,7 +220,8 @@ type ClusterRoleBinding struct {
|
|||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// ClusterRoleBindingList is a collection of ClusterRoleBindings
|
||||
// ClusterRoleBindingList is a collection of ClusterRoleBindings.
|
||||
// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBindings, and will no longer be served in v1.20.
|
||||
type ClusterRoleBindingList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard object's metadata.
|
||||
|
|
@ -227,7 +234,8 @@ type ClusterRoleBindingList struct {
|
|||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// ClusterRoleList is a collection of ClusterRoles
|
||||
// ClusterRoleList is a collection of ClusterRoles.
|
||||
// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoles, and will no longer be served in v1.20.
|
||||
type ClusterRoleList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard object's metadata.
|
||||
|
|
|
|||
|
|
@ -37,7 +37,7 @@ func (AggregationRule) SwaggerDoc() map[string]string {
|
|||
}
|
||||
|
||||
var map_ClusterRole = map[string]string{
|
||||
"": "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.",
|
||||
"": "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRole, and will no longer be served in v1.20.",
|
||||
"metadata": "Standard object's metadata.",
|
||||
"rules": "Rules holds all the PolicyRules for this ClusterRole",
|
||||
"aggregationRule": "AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be stomped by the controller.",
|
||||
|
|
@ -48,7 +48,7 @@ func (ClusterRole) SwaggerDoc() map[string]string {
|
|||
}
|
||||
|
||||
var map_ClusterRoleBinding = map[string]string{
|
||||
"": "ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, and adds who information via Subject.",
|
||||
"": "ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, and adds who information via Subject. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBinding, and will no longer be served in v1.20.",
|
||||
"metadata": "Standard object's metadata.",
|
||||
"subjects": "Subjects holds references to the objects the role applies to.",
|
||||
"roleRef": "RoleRef can only reference a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.",
|
||||
|
|
@ -59,7 +59,7 @@ func (ClusterRoleBinding) SwaggerDoc() map[string]string {
|
|||
}
|
||||
|
||||
var map_ClusterRoleBindingList = map[string]string{
|
||||
"": "ClusterRoleBindingList is a collection of ClusterRoleBindings",
|
||||
"": "ClusterRoleBindingList is a collection of ClusterRoleBindings. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBindings, and will no longer be served in v1.20.",
|
||||
"metadata": "Standard object's metadata.",
|
||||
"items": "Items is a list of ClusterRoleBindings",
|
||||
}
|
||||
|
|
@ -69,7 +69,7 @@ func (ClusterRoleBindingList) SwaggerDoc() map[string]string {
|
|||
}
|
||||
|
||||
var map_ClusterRoleList = map[string]string{
|
||||
"": "ClusterRoleList is a collection of ClusterRoles",
|
||||
"": "ClusterRoleList is a collection of ClusterRoles. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoles, and will no longer be served in v1.20.",
|
||||
"metadata": "Standard object's metadata.",
|
||||
"items": "Items is a list of ClusterRoles",
|
||||
}
|
||||
|
|
@ -92,7 +92,7 @@ func (PolicyRule) SwaggerDoc() map[string]string {
|
|||
}
|
||||
|
||||
var map_Role = map[string]string{
|
||||
"": "Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding.",
|
||||
"": "Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 Role, and will no longer be served in v1.20.",
|
||||
"metadata": "Standard object's metadata.",
|
||||
"rules": "Rules holds all the PolicyRules for this Role",
|
||||
}
|
||||
|
|
@ -102,7 +102,7 @@ func (Role) SwaggerDoc() map[string]string {
|
|||
}
|
||||
|
||||
var map_RoleBinding = map[string]string{
|
||||
"": "RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given namespace only have effect in that namespace.",
|
||||
"": "RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given namespace only have effect in that namespace. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBinding, and will no longer be served in v1.20.",
|
||||
"metadata": "Standard object's metadata.",
|
||||
"subjects": "Subjects holds references to the objects the role applies to.",
|
||||
"roleRef": "RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.",
|
||||
|
|
@ -113,7 +113,7 @@ func (RoleBinding) SwaggerDoc() map[string]string {
|
|||
}
|
||||
|
||||
var map_RoleBindingList = map[string]string{
|
||||
"": "RoleBindingList is a collection of RoleBindings",
|
||||
"": "RoleBindingList is a collection of RoleBindings Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBindingList, and will no longer be served in v1.20.",
|
||||
"metadata": "Standard object's metadata.",
|
||||
"items": "Items is a list of RoleBindings",
|
||||
}
|
||||
|
|
@ -123,7 +123,7 @@ func (RoleBindingList) SwaggerDoc() map[string]string {
|
|||
}
|
||||
|
||||
var map_RoleList = map[string]string{
|
||||
"": "RoleList is a collection of Roles",
|
||||
"": "RoleList is a collection of Roles. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleList, and will no longer be served in v1.20.",
|
||||
"metadata": "Standard object's metadata.",
|
||||
"items": "Items is a list of Roles",
|
||||
}
|
||||
|
|
|
|||
|
|
@ -37,6 +37,7 @@ message AggregationRule {
|
|||
}
|
||||
|
||||
// ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.
|
||||
// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRole, and will no longer be served in v1.20.
|
||||
message ClusterRole {
|
||||
// Standard object's metadata.
|
||||
// +optional
|
||||
|
|
@ -55,6 +56,7 @@ message ClusterRole {
|
|||
|
||||
// ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace,
|
||||
// and adds who information via Subject.
|
||||
// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBinding, and will no longer be served in v1.20.
|
||||
message ClusterRoleBinding {
|
||||
// Standard object's metadata.
|
||||
// +optional
|
||||
|
|
@ -69,7 +71,8 @@ message ClusterRoleBinding {
|
|||
optional RoleRef roleRef = 3;
|
||||
}
|
||||
|
||||
// ClusterRoleBindingList is a collection of ClusterRoleBindings
|
||||
// ClusterRoleBindingList is a collection of ClusterRoleBindings.
|
||||
// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBindingList, and will no longer be served in v1.20.
|
||||
message ClusterRoleBindingList {
|
||||
// Standard object's metadata.
|
||||
// +optional
|
||||
|
|
@ -79,7 +82,8 @@ message ClusterRoleBindingList {
|
|||
repeated ClusterRoleBinding items = 2;
|
||||
}
|
||||
|
||||
// ClusterRoleList is a collection of ClusterRoles
|
||||
// ClusterRoleList is a collection of ClusterRoles.
|
||||
// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoles, and will no longer be served in v1.20.
|
||||
message ClusterRoleList {
|
||||
// Standard object's metadata.
|
||||
// +optional
|
||||
|
|
@ -117,6 +121,7 @@ message PolicyRule {
|
|||
}
|
||||
|
||||
// Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding.
|
||||
// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 Role, and will no longer be served in v1.20.
|
||||
message Role {
|
||||
// Standard object's metadata.
|
||||
// +optional
|
||||
|
|
@ -130,6 +135,7 @@ message Role {
|
|||
// RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace.
|
||||
// It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given
|
||||
// namespace only have effect in that namespace.
|
||||
// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBinding, and will no longer be served in v1.20.
|
||||
message RoleBinding {
|
||||
// Standard object's metadata.
|
||||
// +optional
|
||||
|
|
@ -145,6 +151,7 @@ message RoleBinding {
|
|||
}
|
||||
|
||||
// RoleBindingList is a collection of RoleBindings
|
||||
// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBindingList, and will no longer be served in v1.20.
|
||||
message RoleBindingList {
|
||||
// Standard object's metadata.
|
||||
// +optional
|
||||
|
|
@ -155,6 +162,7 @@ message RoleBindingList {
|
|||
}
|
||||
|
||||
// RoleList is a collection of Roles
|
||||
// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleList, and will no longer be served in v1.20.
|
||||
message RoleList {
|
||||
// Standard object's metadata.
|
||||
// +optional
|
||||
|
|
|
|||
|
|
@ -102,6 +102,7 @@ type RoleRef struct {
|
|||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding.
|
||||
// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 Role, and will no longer be served in v1.20.
|
||||
type Role struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard object's metadata.
|
||||
|
|
@ -119,6 +120,7 @@ type Role struct {
|
|||
// RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace.
|
||||
// It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given
|
||||
// namespace only have effect in that namespace.
|
||||
// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBinding, and will no longer be served in v1.20.
|
||||
type RoleBinding struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard object's metadata.
|
||||
|
|
@ -137,6 +139,7 @@ type RoleBinding struct {
|
|||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// RoleBindingList is a collection of RoleBindings
|
||||
// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBindingList, and will no longer be served in v1.20.
|
||||
type RoleBindingList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard object's metadata.
|
||||
|
|
@ -150,6 +153,7 @@ type RoleBindingList struct {
|
|||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// RoleList is a collection of Roles
|
||||
// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleList, and will no longer be served in v1.20.
|
||||
type RoleList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard object's metadata.
|
||||
|
|
@ -165,6 +169,7 @@ type RoleList struct {
|
|||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.
|
||||
// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRole, and will no longer be served in v1.20.
|
||||
type ClusterRole struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard object's metadata.
|
||||
|
|
@ -195,6 +200,7 @@ type AggregationRule struct {
|
|||
|
||||
// ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace,
|
||||
// and adds who information via Subject.
|
||||
// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBinding, and will no longer be served in v1.20.
|
||||
type ClusterRoleBinding struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard object's metadata.
|
||||
|
|
@ -212,7 +218,8 @@ type ClusterRoleBinding struct {
|
|||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// ClusterRoleBindingList is a collection of ClusterRoleBindings
|
||||
// ClusterRoleBindingList is a collection of ClusterRoleBindings.
|
||||
// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBindingList, and will no longer be served in v1.20.
|
||||
type ClusterRoleBindingList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard object's metadata.
|
||||
|
|
@ -225,7 +232,8 @@ type ClusterRoleBindingList struct {
|
|||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// ClusterRoleList is a collection of ClusterRoles
|
||||
// ClusterRoleList is a collection of ClusterRoles.
|
||||
// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoles, and will no longer be served in v1.20.
|
||||
type ClusterRoleList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard object's metadata.
|
||||
|
|
|
|||
|
|
@ -37,7 +37,7 @@ func (AggregationRule) SwaggerDoc() map[string]string {
|
|||
}
|
||||
|
||||
var map_ClusterRole = map[string]string{
|
||||
"": "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.",
|
||||
"": "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRole, and will no longer be served in v1.20.",
|
||||
"metadata": "Standard object's metadata.",
|
||||
"rules": "Rules holds all the PolicyRules for this ClusterRole",
|
||||
"aggregationRule": "AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be stomped by the controller.",
|
||||
|
|
@ -48,7 +48,7 @@ func (ClusterRole) SwaggerDoc() map[string]string {
|
|||
}
|
||||
|
||||
var map_ClusterRoleBinding = map[string]string{
|
||||
"": "ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, and adds who information via Subject.",
|
||||
"": "ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, and adds who information via Subject. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBinding, and will no longer be served in v1.20.",
|
||||
"metadata": "Standard object's metadata.",
|
||||
"subjects": "Subjects holds references to the objects the role applies to.",
|
||||
"roleRef": "RoleRef can only reference a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.",
|
||||
|
|
@ -59,7 +59,7 @@ func (ClusterRoleBinding) SwaggerDoc() map[string]string {
|
|||
}
|
||||
|
||||
var map_ClusterRoleBindingList = map[string]string{
|
||||
"": "ClusterRoleBindingList is a collection of ClusterRoleBindings",
|
||||
"": "ClusterRoleBindingList is a collection of ClusterRoleBindings. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBindingList, and will no longer be served in v1.20.",
|
||||
"metadata": "Standard object's metadata.",
|
||||
"items": "Items is a list of ClusterRoleBindings",
|
||||
}
|
||||
|
|
@ -69,7 +69,7 @@ func (ClusterRoleBindingList) SwaggerDoc() map[string]string {
|
|||
}
|
||||
|
||||
var map_ClusterRoleList = map[string]string{
|
||||
"": "ClusterRoleList is a collection of ClusterRoles",
|
||||
"": "ClusterRoleList is a collection of ClusterRoles. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoles, and will no longer be served in v1.20.",
|
||||
"metadata": "Standard object's metadata.",
|
||||
"items": "Items is a list of ClusterRoles",
|
||||
}
|
||||
|
|
@ -92,7 +92,7 @@ func (PolicyRule) SwaggerDoc() map[string]string {
|
|||
}
|
||||
|
||||
var map_Role = map[string]string{
|
||||
"": "Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding.",
|
||||
"": "Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 Role, and will no longer be served in v1.20.",
|
||||
"metadata": "Standard object's metadata.",
|
||||
"rules": "Rules holds all the PolicyRules for this Role",
|
||||
}
|
||||
|
|
@ -102,7 +102,7 @@ func (Role) SwaggerDoc() map[string]string {
|
|||
}
|
||||
|
||||
var map_RoleBinding = map[string]string{
|
||||
"": "RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given namespace only have effect in that namespace.",
|
||||
"": "RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given namespace only have effect in that namespace. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBinding, and will no longer be served in v1.20.",
|
||||
"metadata": "Standard object's metadata.",
|
||||
"subjects": "Subjects holds references to the objects the role applies to.",
|
||||
"roleRef": "RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.",
|
||||
|
|
@ -113,7 +113,7 @@ func (RoleBinding) SwaggerDoc() map[string]string {
|
|||
}
|
||||
|
||||
var map_RoleBindingList = map[string]string{
|
||||
"": "RoleBindingList is a collection of RoleBindings",
|
||||
"": "RoleBindingList is a collection of RoleBindings Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBindingList, and will no longer be served in v1.20.",
|
||||
"metadata": "Standard object's metadata.",
|
||||
"items": "Items is a list of RoleBindings",
|
||||
}
|
||||
|
|
@ -123,7 +123,7 @@ func (RoleBindingList) SwaggerDoc() map[string]string {
|
|||
}
|
||||
|
||||
var map_RoleList = map[string]string{
|
||||
"": "RoleList is a collection of Roles",
|
||||
"": "RoleList is a collection of Roles Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleList, and will no longer be served in v1.20.",
|
||||
"metadata": "Standard object's metadata.",
|
||||
"items": "Items is a list of Roles",
|
||||
}
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
|
|
@ -29,6 +29,80 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
|
|||
// Package-wide variables from generator "generated".
|
||||
option go_package = "v1";
|
||||
|
||||
// CSINode holds information about all CSI drivers installed on a node.
|
||||
// CSI drivers do not need to create the CSINode object directly. As long as
|
||||
// they use the node-driver-registrar sidecar container, the kubelet will
|
||||
// automatically populate the CSINode object for the CSI driver as part of
|
||||
// kubelet plugin registration.
|
||||
// CSINode has the same name as a node. If the object is missing, it means either
|
||||
// there are no CSI Drivers available on the node, or the Kubelet version is low
|
||||
// enough that it doesn't create this object.
|
||||
// CSINode has an OwnerReference that points to the corresponding node object.
|
||||
message CSINode {
|
||||
// metadata.name must be the Kubernetes node name.
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||
|
||||
// spec is the specification of CSINode
|
||||
optional CSINodeSpec spec = 2;
|
||||
}
|
||||
|
||||
// CSINodeDriver holds information about the specification of one CSI driver installed on a node
|
||||
message CSINodeDriver {
|
||||
// This is the name of the CSI driver that this object refers to.
|
||||
// This MUST be the same name returned by the CSI GetPluginName() call for
|
||||
// that driver.
|
||||
optional string name = 1;
|
||||
|
||||
// nodeID of the node from the driver point of view.
|
||||
// This field enables Kubernetes to communicate with storage systems that do
|
||||
// not share the same nomenclature for nodes. For example, Kubernetes may
|
||||
// refer to a given node as "node1", but the storage system may refer to
|
||||
// the same node as "nodeA". When Kubernetes issues a command to the storage
|
||||
// system to attach a volume to a specific node, it can use this field to
|
||||
// refer to the node name using the ID that the storage system will
|
||||
// understand, e.g. "nodeA" instead of "node1". This field is required.
|
||||
optional string nodeID = 2;
|
||||
|
||||
// topologyKeys is the list of keys supported by the driver.
|
||||
// When a driver is initialized on a cluster, it provides a set of topology
|
||||
// keys that it understands (e.g. "company.com/zone", "company.com/region").
|
||||
// When a driver is initialized on a node, it provides the same topology keys
|
||||
// along with values. Kubelet will expose these topology keys as labels
|
||||
// on its own node object.
|
||||
// When Kubernetes does topology aware provisioning, it can use this list to
|
||||
// determine which labels it should retrieve from the node object and pass
|
||||
// back to the driver.
|
||||
// It is possible for different nodes to use different topology keys.
|
||||
// This can be empty if driver does not support topology.
|
||||
// +optional
|
||||
repeated string topologyKeys = 3;
|
||||
|
||||
// allocatable represents the volume resources of a node that are available for scheduling.
|
||||
// This field is beta.
|
||||
// +optional
|
||||
optional VolumeNodeResources allocatable = 4;
|
||||
}
|
||||
|
||||
// CSINodeList is a collection of CSINode objects.
|
||||
message CSINodeList {
|
||||
// Standard list metadata
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
||||
|
||||
// items is the list of CSINode
|
||||
repeated CSINode items = 2;
|
||||
}
|
||||
|
||||
// CSINodeSpec holds information about the specification of all CSI drivers installed on a node
|
||||
message CSINodeSpec {
|
||||
// drivers is a list of information of all CSI Drivers existing on a node.
|
||||
// If all drivers in the list are uninstalled, this can become empty.
|
||||
// +patchMergeKey=name
|
||||
// +patchStrategy=merge
|
||||
repeated CSINodeDriver drivers = 1;
|
||||
}
|
||||
|
||||
// StorageClass describes the parameters for a class of storage for
|
||||
// which PersistentVolumes can be dynamically provisioned.
|
||||
//
|
||||
|
|
@ -193,3 +267,13 @@ message VolumeError {
|
|||
optional string message = 2;
|
||||
}
|
||||
|
||||
// VolumeNodeResources is a set of resource limits for scheduling of volumes.
|
||||
message VolumeNodeResources {
|
||||
// Maximum number of unique volumes managed by the CSI driver that can be used on a node.
|
||||
// A volume that is both attached and mounted on a node is considered to be used once, not twice.
|
||||
// The same rule applies for a unique volume that is shared among multiple pods on the same node.
|
||||
// If this field is not specified, then the supported number of volumes on this node is unbounded.
|
||||
// +optional
|
||||
optional int32 count = 1;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -49,6 +49,9 @@ func addKnownTypes(scheme *runtime.Scheme) error {
|
|||
|
||||
&VolumeAttachment{},
|
||||
&VolumeAttachmentList{},
|
||||
|
||||
&CSINode{},
|
||||
&CSINodeList{},
|
||||
)
|
||||
|
||||
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ limitations under the License.
|
|||
package v1
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
|
|
@ -216,3 +216,97 @@ type VolumeError struct {
|
|||
// +optional
|
||||
Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"`
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +genclient:nonNamespaced
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// CSINode holds information about all CSI drivers installed on a node.
|
||||
// CSI drivers do not need to create the CSINode object directly. As long as
|
||||
// they use the node-driver-registrar sidecar container, the kubelet will
|
||||
// automatically populate the CSINode object for the CSI driver as part of
|
||||
// kubelet plugin registration.
|
||||
// CSINode has the same name as a node. If the object is missing, it means either
|
||||
// there are no CSI Drivers available on the node, or the Kubelet version is low
|
||||
// enough that it doesn't create this object.
|
||||
// CSINode has an OwnerReference that points to the corresponding node object.
|
||||
type CSINode struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
// metadata.name must be the Kubernetes node name.
|
||||
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
// spec is the specification of CSINode
|
||||
Spec CSINodeSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
|
||||
}
|
||||
|
||||
// CSINodeSpec holds information about the specification of all CSI drivers installed on a node
|
||||
type CSINodeSpec struct {
|
||||
// drivers is a list of information of all CSI Drivers existing on a node.
|
||||
// If all drivers in the list are uninstalled, this can become empty.
|
||||
// +patchMergeKey=name
|
||||
// +patchStrategy=merge
|
||||
Drivers []CSINodeDriver `json:"drivers" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,1,rep,name=drivers"`
|
||||
}
|
||||
|
||||
// CSINodeDriver holds information about the specification of one CSI driver installed on a node
|
||||
type CSINodeDriver struct {
|
||||
// This is the name of the CSI driver that this object refers to.
|
||||
// This MUST be the same name returned by the CSI GetPluginName() call for
|
||||
// that driver.
|
||||
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
|
||||
|
||||
// nodeID of the node from the driver point of view.
|
||||
// This field enables Kubernetes to communicate with storage systems that do
|
||||
// not share the same nomenclature for nodes. For example, Kubernetes may
|
||||
// refer to a given node as "node1", but the storage system may refer to
|
||||
// the same node as "nodeA". When Kubernetes issues a command to the storage
|
||||
// system to attach a volume to a specific node, it can use this field to
|
||||
// refer to the node name using the ID that the storage system will
|
||||
// understand, e.g. "nodeA" instead of "node1". This field is required.
|
||||
NodeID string `json:"nodeID" protobuf:"bytes,2,opt,name=nodeID"`
|
||||
|
||||
// topologyKeys is the list of keys supported by the driver.
|
||||
// When a driver is initialized on a cluster, it provides a set of topology
|
||||
// keys that it understands (e.g. "company.com/zone", "company.com/region").
|
||||
// When a driver is initialized on a node, it provides the same topology keys
|
||||
// along with values. Kubelet will expose these topology keys as labels
|
||||
// on its own node object.
|
||||
// When Kubernetes does topology aware provisioning, it can use this list to
|
||||
// determine which labels it should retrieve from the node object and pass
|
||||
// back to the driver.
|
||||
// It is possible for different nodes to use different topology keys.
|
||||
// This can be empty if driver does not support topology.
|
||||
// +optional
|
||||
TopologyKeys []string `json:"topologyKeys" protobuf:"bytes,3,rep,name=topologyKeys"`
|
||||
|
||||
// allocatable represents the volume resources of a node that are available for scheduling.
|
||||
// This field is beta.
|
||||
// +optional
|
||||
Allocatable *VolumeNodeResources `json:"allocatable,omitempty" protobuf:"bytes,4,opt,name=allocatable"`
|
||||
}
|
||||
|
||||
// VolumeNodeResources is a set of resource limits for scheduling of volumes.
|
||||
type VolumeNodeResources struct {
|
||||
// Maximum number of unique volumes managed by the CSI driver that can be used on a node.
|
||||
// A volume that is both attached and mounted on a node is considered to be used once, not twice.
|
||||
// The same rule applies for a unique volume that is shared among multiple pods on the same node.
|
||||
// If this field is not specified, then the supported number of volumes on this node is unbounded.
|
||||
// +optional
|
||||
Count *int32 `json:"count,omitempty" protobuf:"varint,1,opt,name=count"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// CSINodeList is a collection of CSINode objects.
|
||||
type CSINodeList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
// Standard list metadata
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||||
// +optional
|
||||
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
// items is the list of CSINode
|
||||
Items []CSINode `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||
}
|
||||
|
|
|
|||
|
|
@ -27,6 +27,47 @@ package v1
|
|||
// Those methods can be generated by using hack/update-generated-swagger-docs.sh
|
||||
|
||||
// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
|
||||
var map_CSINode = map[string]string{
|
||||
"": "CSINode holds information about all CSI drivers installed on a node. CSI drivers do not need to create the CSINode object directly. As long as they use the node-driver-registrar sidecar container, the kubelet will automatically populate the CSINode object for the CSI driver as part of kubelet plugin registration. CSINode has the same name as a node. If the object is missing, it means either there are no CSI Drivers available on the node, or the Kubelet version is low enough that it doesn't create this object. CSINode has an OwnerReference that points to the corresponding node object.",
|
||||
"metadata": "metadata.name must be the Kubernetes node name.",
|
||||
"spec": "spec is the specification of CSINode",
|
||||
}
|
||||
|
||||
func (CSINode) SwaggerDoc() map[string]string {
|
||||
return map_CSINode
|
||||
}
|
||||
|
||||
var map_CSINodeDriver = map[string]string{
|
||||
"": "CSINodeDriver holds information about the specification of one CSI driver installed on a node",
|
||||
"name": "This is the name of the CSI driver that this object refers to. This MUST be the same name returned by the CSI GetPluginName() call for that driver.",
|
||||
"nodeID": "nodeID of the node from the driver point of view. This field enables Kubernetes to communicate with storage systems that do not share the same nomenclature for nodes. For example, Kubernetes may refer to a given node as \"node1\", but the storage system may refer to the same node as \"nodeA\". When Kubernetes issues a command to the storage system to attach a volume to a specific node, it can use this field to refer to the node name using the ID that the storage system will understand, e.g. \"nodeA\" instead of \"node1\". This field is required.",
|
||||
"topologyKeys": "topologyKeys is the list of keys supported by the driver. When a driver is initialized on a cluster, it provides a set of topology keys that it understands (e.g. \"company.com/zone\", \"company.com/region\"). When a driver is initialized on a node, it provides the same topology keys along with values. Kubelet will expose these topology keys as labels on its own node object. When Kubernetes does topology aware provisioning, it can use this list to determine which labels it should retrieve from the node object and pass back to the driver. It is possible for different nodes to use different topology keys. This can be empty if driver does not support topology.",
|
||||
"allocatable": "allocatable represents the volume resources of a node that are available for scheduling. This field is beta.",
|
||||
}
|
||||
|
||||
func (CSINodeDriver) SwaggerDoc() map[string]string {
|
||||
return map_CSINodeDriver
|
||||
}
|
||||
|
||||
var map_CSINodeList = map[string]string{
|
||||
"": "CSINodeList is a collection of CSINode objects.",
|
||||
"metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
|
||||
"items": "items is the list of CSINode",
|
||||
}
|
||||
|
||||
func (CSINodeList) SwaggerDoc() map[string]string {
|
||||
return map_CSINodeList
|
||||
}
|
||||
|
||||
var map_CSINodeSpec = map[string]string{
|
||||
"": "CSINodeSpec holds information about the specification of all CSI drivers installed on a node",
|
||||
"drivers": "drivers is a list of information of all CSI Drivers existing on a node. If all drivers in the list are uninstalled, this can become empty.",
|
||||
}
|
||||
|
||||
func (CSINodeSpec) SwaggerDoc() map[string]string {
|
||||
return map_CSINodeSpec
|
||||
}
|
||||
|
||||
var map_StorageClass = map[string]string{
|
||||
"": "StorageClass describes the parameters for a class of storage for which PersistentVolumes can be dynamically provisioned.\n\nStorageClasses are non-namespaced; the name of the storage class according to etcd is in ObjectMeta.Name.",
|
||||
"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
|
||||
|
|
@ -116,4 +157,13 @@ func (VolumeError) SwaggerDoc() map[string]string {
|
|||
return map_VolumeError
|
||||
}
|
||||
|
||||
var map_VolumeNodeResources = map[string]string{
|
||||
"": "VolumeNodeResources is a set of resource limits for scheduling of volumes.",
|
||||
"count": "Maximum number of unique volumes managed by the CSI driver that can be used on a node. A volume that is both attached and mounted on a node is considered to be used once, not twice. The same rule applies for a unique volume that is shared among multiple pods on the same node. If this field is not specified, then the supported number of volumes on this node is unbounded.",
|
||||
}
|
||||
|
||||
func (VolumeNodeResources) SwaggerDoc() map[string]string {
|
||||
return map_VolumeNodeResources
|
||||
}
|
||||
|
||||
// AUTO-GENERATED FUNCTIONS END HERE
|
||||
|
|
|
|||
|
|
@ -25,6 +25,115 @@ import (
|
|||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *CSINode) DeepCopyInto(out *CSINode) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSINode.
|
||||
func (in *CSINode) DeepCopy() *CSINode {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(CSINode)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *CSINode) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *CSINodeDriver) DeepCopyInto(out *CSINodeDriver) {
|
||||
*out = *in
|
||||
if in.TopologyKeys != nil {
|
||||
in, out := &in.TopologyKeys, &out.TopologyKeys
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Allocatable != nil {
|
||||
in, out := &in.Allocatable, &out.Allocatable
|
||||
*out = new(VolumeNodeResources)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSINodeDriver.
|
||||
func (in *CSINodeDriver) DeepCopy() *CSINodeDriver {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(CSINodeDriver)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *CSINodeList) DeepCopyInto(out *CSINodeList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]CSINode, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSINodeList.
|
||||
func (in *CSINodeList) DeepCopy() *CSINodeList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(CSINodeList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *CSINodeList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *CSINodeSpec) DeepCopyInto(out *CSINodeSpec) {
|
||||
*out = *in
|
||||
if in.Drivers != nil {
|
||||
in, out := &in.Drivers, &out.Drivers
|
||||
*out = make([]CSINodeDriver, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSINodeSpec.
|
||||
func (in *CSINodeSpec) DeepCopy() *CSINodeSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(CSINodeSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *StorageClass) DeepCopyInto(out *StorageClass) {
|
||||
*out = *in
|
||||
|
|
@ -271,3 +380,24 @@ func (in *VolumeError) DeepCopy() *VolumeError {
|
|||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *VolumeNodeResources) DeepCopyInto(out *VolumeNodeResources) {
|
||||
*out = *in
|
||||
if in.Count != nil {
|
||||
in, out := &in.Count, &out.Count
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeNodeResources.
|
||||
func (in *VolumeNodeResources) DeepCopy() *VolumeNodeResources {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(VolumeNodeResources)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
|
|
|||
|
|
@ -121,6 +121,8 @@ message CSIDriverSpec {
|
|||
repeated string volumeLifecycleModes = 3;
|
||||
}
|
||||
|
||||
// DEPRECATED - This group version of CSINode is deprecated by storage/v1/CSINode.
|
||||
// See the release notes for more information.
|
||||
// CSINode holds information about all CSI drivers installed on a node.
|
||||
// CSI drivers do not need to create the CSINode object directly. As long as
|
||||
// they use the node-driver-registrar sidecar container, the kubelet will
|
||||
|
|
|
|||
|
|
@ -348,6 +348,8 @@ const (
|
|||
// +genclient:nonNamespaced
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// DEPRECATED - This group version of CSINode is deprecated by storage/v1/CSINode.
|
||||
// See the release notes for more information.
|
||||
// CSINode holds information about all CSI drivers installed on a node.
|
||||
// CSI drivers do not need to create the CSINode object directly. As long as
|
||||
// they use the node-driver-registrar sidecar container, the kubelet will
|
||||
|
|
|
|||
|
|
@ -59,7 +59,7 @@ func (CSIDriverSpec) SwaggerDoc() map[string]string {
|
|||
}
|
||||
|
||||
var map_CSINode = map[string]string{
|
||||
"": "CSINode holds information about all CSI drivers installed on a node. CSI drivers do not need to create the CSINode object directly. As long as they use the node-driver-registrar sidecar container, the kubelet will automatically populate the CSINode object for the CSI driver as part of kubelet plugin registration. CSINode has the same name as a node. If the object is missing, it means either there are no CSI Drivers available on the node, or the Kubelet version is low enough that it doesn't create this object. CSINode has an OwnerReference that points to the corresponding node object.",
|
||||
"": "DEPRECATED - This group version of CSINode is deprecated by storage/v1/CSINode. See the release notes for more information. CSINode holds information about all CSI drivers installed on a node. CSI drivers do not need to create the CSINode object directly. As long as they use the node-driver-registrar sidecar container, the kubelet will automatically populate the CSINode object for the CSI driver as part of kubelet plugin registration. CSINode has the same name as a node. If the object is missing, it means either there are no CSI Drivers available on the node, or the Kubelet version is low enough that it doesn't create this object. CSINode has an OwnerReference that points to the corresponding node object.",
|
||||
"metadata": "metadata.name must be the Kubernetes node name.",
|
||||
"spec": "spec is the specification of CSINode",
|
||||
}
|
||||
|
|
|
|||
|
|
@ -70,6 +70,28 @@ func (e *StatusError) DebugError() (string, []interface{}) {
|
|||
return "server response object: %#v", []interface{}{e.ErrStatus}
|
||||
}
|
||||
|
||||
// HasStatusCause returns true if the provided error has a details cause
|
||||
// with the provided type name.
|
||||
func HasStatusCause(err error, name metav1.CauseType) bool {
|
||||
_, ok := StatusCause(err, name)
|
||||
return ok
|
||||
}
|
||||
|
||||
// StatusCause returns the named cause from the provided error if it exists and
|
||||
// the error is of the type APIStatus. Otherwise it returns false.
|
||||
func StatusCause(err error, name metav1.CauseType) (metav1.StatusCause, bool) {
|
||||
apierr, ok := err.(APIStatus)
|
||||
if !ok || apierr == nil || apierr.Status().Details == nil {
|
||||
return metav1.StatusCause{}, false
|
||||
}
|
||||
for _, cause := range apierr.Status().Details.Causes {
|
||||
if cause.Type == name {
|
||||
return cause, true
|
||||
}
|
||||
}
|
||||
return metav1.StatusCause{}, false
|
||||
}
|
||||
|
||||
// UnexpectedObjectError can be returned by FromObject if it's passed a non-status object.
|
||||
type UnexpectedObjectError struct {
|
||||
Object runtime.Object
|
||||
|
|
@ -201,6 +223,7 @@ func NewApplyConflict(causes []metav1.StatusCause, message string) *StatusError
|
|||
}
|
||||
|
||||
// NewGone returns an error indicating the item no longer available at the server and no forwarding address is known.
|
||||
// DEPRECATED: Please use NewResourceExpired instead.
|
||||
func NewGone(message string) *StatusError {
|
||||
return &StatusError{metav1.Status{
|
||||
Status: metav1.StatusFailure,
|
||||
|
|
|
|||
|
|
@ -21,15 +21,11 @@ import (
|
|||
metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
)
|
||||
|
||||
// GroupName is the group name for this API.
|
||||
const GroupName = "meta.k8s.io"
|
||||
|
||||
// Scheme is the registry for any type that adheres to the meta API spec.
|
||||
var scheme = runtime.NewScheme()
|
||||
|
||||
var (
|
||||
// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
|
||||
// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
|
||||
|
|
@ -38,22 +34,16 @@ var (
|
|||
AddToScheme = localSchemeBuilder.AddToScheme
|
||||
)
|
||||
|
||||
// Codecs provides access to encoding and decoding for the scheme.
|
||||
var Codecs = serializer.NewCodecFactory(scheme)
|
||||
|
||||
// SchemeGroupVersion is group version used to register these objects
|
||||
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
|
||||
|
||||
// ParameterCodec handles versioning of objects that are converted to query parameters.
|
||||
var ParameterCodec = runtime.NewParameterCodec(scheme)
|
||||
|
||||
// Kind takes an unqualified kind and returns a Group qualified GroupKind
|
||||
func Kind(kind string) schema.GroupKind {
|
||||
return SchemeGroupVersion.WithKind(kind).GroupKind()
|
||||
}
|
||||
|
||||
// addToGroupVersion registers common meta types into schemas.
|
||||
func addToGroupVersion(scheme *runtime.Scheme, groupVersion schema.GroupVersion) error {
|
||||
func addToGroupVersion(scheme *runtime.Scheme) error {
|
||||
if err := scheme.AddIgnoredConversionType(&metav1.TypeMeta{}, &metav1.TypeMeta{}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -104,7 +94,6 @@ func addToGroupVersion(scheme *runtime.Scheme, groupVersion schema.GroupVersion)
|
|||
// Unlike other API groups, meta internal knows about all meta external versions, but keeps
|
||||
// the logic for conversion private.
|
||||
func init() {
|
||||
if err := addToGroupVersion(scheme, SchemeGroupVersion); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
localSchemeBuilder.Register(addToGroupVersion)
|
||||
localSchemeBuilder.Register(metav1.RegisterConversions)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ import (
|
|||
|
||||
// IsControlledBy checks if the object has a controllerRef set to the given owner
|
||||
func IsControlledBy(obj Object, owner Object) bool {
|
||||
ref := GetControllerOf(obj)
|
||||
ref := GetControllerOfNoCopy(obj)
|
||||
if ref == nil {
|
||||
return false
|
||||
}
|
||||
|
|
@ -31,9 +31,20 @@ func IsControlledBy(obj Object, owner Object) bool {
|
|||
|
||||
// GetControllerOf returns a pointer to a copy of the controllerRef if controllee has a controller
|
||||
func GetControllerOf(controllee Object) *OwnerReference {
|
||||
for _, ref := range controllee.GetOwnerReferences() {
|
||||
if ref.Controller != nil && *ref.Controller {
|
||||
return &ref
|
||||
ref := GetControllerOfNoCopy(controllee)
|
||||
if ref == nil {
|
||||
return nil
|
||||
}
|
||||
cp := *ref
|
||||
return &cp
|
||||
}
|
||||
|
||||
// GetControllerOf returns a pointer to the controllerRef if controllee has a controller
|
||||
func GetControllerOfNoCopy(controllee Object) *OwnerReference {
|
||||
refs := controllee.GetOwnerReferences()
|
||||
for i := range refs {
|
||||
if refs[i].Controller != nil && *refs[i].Controller {
|
||||
return &refs[i]
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -18,6 +18,7 @@ package v1
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
|
|
@ -26,6 +27,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
)
|
||||
|
||||
|
|
@ -35,12 +37,17 @@ func AddConversionFuncs(scheme *runtime.Scheme) error {
|
|||
|
||||
Convert_v1_ListMeta_To_v1_ListMeta,
|
||||
|
||||
Convert_v1_DeleteOptions_To_v1_DeleteOptions,
|
||||
|
||||
Convert_intstr_IntOrString_To_intstr_IntOrString,
|
||||
Convert_Pointer_intstr_IntOrString_To_intstr_IntOrString,
|
||||
Convert_intstr_IntOrString_To_Pointer_intstr_IntOrString,
|
||||
|
||||
Convert_Pointer_v1_Duration_To_v1_Duration,
|
||||
Convert_v1_Duration_To_Pointer_v1_Duration,
|
||||
|
||||
Convert_Slice_string_To_v1_Time,
|
||||
Convert_Slice_string_To_Pointer_v1_Time,
|
||||
|
||||
Convert_v1_Time_To_v1_Time,
|
||||
Convert_v1_MicroTime_To_v1_MicroTime,
|
||||
|
|
@ -76,7 +83,7 @@ func AddConversionFuncs(scheme *runtime.Scheme) error {
|
|||
|
||||
Convert_Slice_string_To_Slice_int32,
|
||||
|
||||
Convert_Slice_string_To_v1_DeletionPropagation,
|
||||
Convert_Slice_string_To_Pointer_v1_DeletionPropagation,
|
||||
|
||||
Convert_Slice_string_To_v1_IncludeObjectPolicy,
|
||||
)
|
||||
|
|
@ -194,12 +201,33 @@ func Convert_v1_ListMeta_To_v1_ListMeta(in, out *ListMeta, s conversion.Scope) e
|
|||
return nil
|
||||
}
|
||||
|
||||
// +k8s:conversion-fn=copy-only
|
||||
func Convert_v1_DeleteOptions_To_v1_DeleteOptions(in, out *DeleteOptions, s conversion.Scope) error {
|
||||
*out = *in
|
||||
return nil
|
||||
}
|
||||
|
||||
// +k8s:conversion-fn=copy-only
|
||||
func Convert_intstr_IntOrString_To_intstr_IntOrString(in, out *intstr.IntOrString, s conversion.Scope) error {
|
||||
*out = *in
|
||||
return nil
|
||||
}
|
||||
|
||||
func Convert_Pointer_intstr_IntOrString_To_intstr_IntOrString(in **intstr.IntOrString, out *intstr.IntOrString, s conversion.Scope) error {
|
||||
if *in == nil {
|
||||
*out = intstr.IntOrString{} // zero value
|
||||
return nil
|
||||
}
|
||||
*out = **in // copy
|
||||
return nil
|
||||
}
|
||||
|
||||
func Convert_intstr_IntOrString_To_Pointer_intstr_IntOrString(in *intstr.IntOrString, out **intstr.IntOrString, s conversion.Scope) error {
|
||||
temp := *in // copy
|
||||
*out = &temp
|
||||
return nil
|
||||
}
|
||||
|
||||
// +k8s:conversion-fn=copy-only
|
||||
func Convert_v1_Time_To_v1_Time(in *Time, out *Time, s conversion.Scope) error {
|
||||
// Cannot deep copy these, because time.Time has unexported fields.
|
||||
|
|
@ -230,14 +258,30 @@ func Convert_v1_Duration_To_Pointer_v1_Duration(in *Duration, out **Duration, s
|
|||
}
|
||||
|
||||
// Convert_Slice_string_To_v1_Time allows converting a URL query parameter value
|
||||
func Convert_Slice_string_To_v1_Time(input *[]string, out *Time, s conversion.Scope) error {
|
||||
func Convert_Slice_string_To_v1_Time(in *[]string, out *Time, s conversion.Scope) error {
|
||||
str := ""
|
||||
if len(*input) > 0 {
|
||||
str = (*input)[0]
|
||||
if len(*in) > 0 {
|
||||
str = (*in)[0]
|
||||
}
|
||||
return out.UnmarshalQueryParameter(str)
|
||||
}
|
||||
|
||||
func Convert_Slice_string_To_Pointer_v1_Time(in *[]string, out **Time, s conversion.Scope) error {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
str := ""
|
||||
if len(*in) > 0 {
|
||||
str = (*in)[0]
|
||||
}
|
||||
temp := Time{}
|
||||
if err := temp.UnmarshalQueryParameter(str); err != nil {
|
||||
return err
|
||||
}
|
||||
*out = &temp
|
||||
return nil
|
||||
}
|
||||
|
||||
func Convert_string_To_labels_Selector(in *string, out *labels.Selector, s conversion.Scope) error {
|
||||
selector, err := labels.Parse(*in)
|
||||
if err != nil {
|
||||
|
|
@ -310,20 +354,53 @@ func Convert_Slice_string_To_Slice_int32(in *[]string, out *[]int32, s conversio
|
|||
return nil
|
||||
}
|
||||
|
||||
// Convert_Slice_string_To_v1_DeletionPropagation allows converting a URL query parameter propagationPolicy
|
||||
func Convert_Slice_string_To_v1_DeletionPropagation(input *[]string, out *DeletionPropagation, s conversion.Scope) error {
|
||||
if len(*input) > 0 {
|
||||
*out = DeletionPropagation((*input)[0])
|
||||
// Convert_Slice_string_To_Pointer_v1_DeletionPropagation allows converting a URL query parameter propagationPolicy
|
||||
func Convert_Slice_string_To_Pointer_v1_DeletionPropagation(in *[]string, out **DeletionPropagation, s conversion.Scope) error {
|
||||
var str string
|
||||
if len(*in) > 0 {
|
||||
str = (*in)[0]
|
||||
} else {
|
||||
*out = ""
|
||||
str = ""
|
||||
}
|
||||
temp := DeletionPropagation(str)
|
||||
*out = &temp
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_Slice_string_To_v1_IncludeObjectPolicy allows converting a URL query parameter value
|
||||
func Convert_Slice_string_To_v1_IncludeObjectPolicy(input *[]string, out *IncludeObjectPolicy, s conversion.Scope) error {
|
||||
if len(*input) > 0 {
|
||||
*out = IncludeObjectPolicy((*input)[0])
|
||||
func Convert_Slice_string_To_v1_IncludeObjectPolicy(in *[]string, out *IncludeObjectPolicy, s conversion.Scope) error {
|
||||
if len(*in) > 0 {
|
||||
*out = IncludeObjectPolicy((*in)[0])
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_url_Values_To_v1_DeleteOptions allows converting a URL to DeleteOptions.
|
||||
func Convert_url_Values_To_v1_DeleteOptions(in *url.Values, out *DeleteOptions, s conversion.Scope) error {
|
||||
if err := autoConvert_url_Values_To_v1_DeleteOptions(in, out, s); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
uid := types.UID("")
|
||||
if values, ok := (*in)["uid"]; ok && len(values) > 0 {
|
||||
uid = types.UID(values[0])
|
||||
}
|
||||
|
||||
resourceVersion := ""
|
||||
if values, ok := (*in)["resourceVersion"]; ok && len(values) > 0 {
|
||||
resourceVersion = values[0]
|
||||
}
|
||||
|
||||
if len(uid) > 0 || len(resourceVersion) > 0 {
|
||||
if out.Preconditions == nil {
|
||||
out.Preconditions = &Preconditions{}
|
||||
}
|
||||
if len(uid) > 0 {
|
||||
out.Preconditions.UID = &uid
|
||||
}
|
||||
if len(resourceVersion) > 0 {
|
||||
out.Preconditions.ResourceVersion = &resourceVersion
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -14,6 +14,7 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
// +k8s:conversion-gen=false
|
||||
// +k8s:deepcopy-gen=package
|
||||
// +k8s:openapi-gen=true
|
||||
// +k8s:defaulter-gen=TypeMeta
|
||||
|
|
|
|||
|
|
@ -163,6 +163,7 @@ message DeleteOptions {
|
|||
|
||||
// Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be
|
||||
// returned.
|
||||
// +k8s:conversion-gen=false
|
||||
// +optional
|
||||
optional Preconditions preconditions = 2;
|
||||
|
||||
|
|
@ -416,9 +417,6 @@ message ListOptions {
|
|||
// If this is not a watch, this field is ignored.
|
||||
// If the feature gate WatchBookmarks is not enabled in apiserver,
|
||||
// this field is ignored.
|
||||
//
|
||||
// This field is beta.
|
||||
//
|
||||
// +optional
|
||||
optional bool allowWatchBookmarks = 9;
|
||||
|
||||
|
|
@ -663,6 +661,15 @@ message ObjectMeta {
|
|||
// is an identifier for the responsible component that will remove the entry
|
||||
// from the list. If the deletionTimestamp of the object is non-nil, entries
|
||||
// in this list can only be removed.
|
||||
// Finalizers may be processed and removed in any order. Order is NOT enforced
|
||||
// because it introduces significant risk of stuck finalizers.
|
||||
// finalizers is a shared field, any actor with permission can reorder it.
|
||||
// If the finalizer list is processed in order, then this can lead to a situation
|
||||
// in which the component responsible for the first finalizer in the list is
|
||||
// waiting for a signal (field value, external system, or other) produced by a
|
||||
// component responsible for a finalizer later in the list, resulting in a deadlock.
|
||||
// Without enforced ordering finalizers are free to order amongst themselves and
|
||||
// are not vulnerable to ordering changes in the list.
|
||||
// +optional
|
||||
// +patchStrategy=merge
|
||||
repeated string finalizers = 14;
|
||||
|
|
|
|||
|
|
@ -252,7 +252,9 @@ func ResetObjectMetaForStatus(meta, existingMeta Object) {
|
|||
meta.SetAnnotations(existingMeta.GetAnnotations())
|
||||
meta.SetFinalizers(existingMeta.GetFinalizers())
|
||||
meta.SetOwnerReferences(existingMeta.GetOwnerReferences())
|
||||
meta.SetManagedFields(existingMeta.GetManagedFields())
|
||||
// managedFields must be preserved since it's been modified to
|
||||
// track changed fields in the status update.
|
||||
//meta.SetManagedFields(existingMeta.GetManagedFields())
|
||||
}
|
||||
|
||||
// MarshalJSON implements json.Marshaler
|
||||
|
|
|
|||
|
|
@ -25,6 +25,13 @@ import (
|
|||
// GroupName is the group name for this API.
|
||||
const GroupName = "meta.k8s.io"
|
||||
|
||||
var (
|
||||
// localSchemeBuilder is used to make compiler happy for autogenerated
|
||||
// conversions. However, it's not used.
|
||||
schemeBuilder runtime.SchemeBuilder
|
||||
localSchemeBuilder = &schemeBuilder
|
||||
)
|
||||
|
||||
// SchemeGroupVersion is group version used to register these objects
|
||||
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
|
||||
|
||||
|
|
@ -40,6 +47,31 @@ func Kind(kind string) schema.GroupKind {
|
|||
return SchemeGroupVersion.WithKind(kind).GroupKind()
|
||||
}
|
||||
|
||||
// scheme is the registry for the common types that adhere to the meta v1 API spec.
|
||||
var scheme = runtime.NewScheme()
|
||||
|
||||
// ParameterCodec knows about query parameters used with the meta v1 API spec.
|
||||
var ParameterCodec = runtime.NewParameterCodec(scheme)
|
||||
|
||||
func addEventConversionFuncs(scheme *runtime.Scheme) error {
|
||||
return scheme.AddConversionFuncs(
|
||||
Convert_v1_WatchEvent_To_watch_Event,
|
||||
Convert_v1_InternalEvent_To_v1_WatchEvent,
|
||||
Convert_watch_Event_To_v1_WatchEvent,
|
||||
Convert_v1_WatchEvent_To_v1_InternalEvent,
|
||||
)
|
||||
}
|
||||
|
||||
var optionsTypes = []runtime.Object{
|
||||
&ListOptions{},
|
||||
&ExportOptions{},
|
||||
&GetOptions{},
|
||||
&DeleteOptions{},
|
||||
&CreateOptions{},
|
||||
&UpdateOptions{},
|
||||
&PatchOptions{},
|
||||
}
|
||||
|
||||
// AddToGroupVersion registers common meta types into schemas.
|
||||
func AddToGroupVersion(scheme *runtime.Scheme, groupVersion schema.GroupVersion) {
|
||||
scheme.AddKnownTypeWithName(groupVersion.WithKind(WatchEventKind), &WatchEvent{})
|
||||
|
|
@ -48,21 +80,7 @@ func AddToGroupVersion(scheme *runtime.Scheme, groupVersion schema.GroupVersion)
|
|||
&InternalEvent{},
|
||||
)
|
||||
// Supports legacy code paths, most callers should use metav1.ParameterCodec for now
|
||||
scheme.AddKnownTypes(groupVersion,
|
||||
&ListOptions{},
|
||||
&ExportOptions{},
|
||||
&GetOptions{},
|
||||
&DeleteOptions{},
|
||||
&CreateOptions{},
|
||||
&UpdateOptions{},
|
||||
&PatchOptions{},
|
||||
)
|
||||
utilruntime.Must(scheme.AddConversionFuncs(
|
||||
Convert_v1_WatchEvent_To_watch_Event,
|
||||
Convert_v1_InternalEvent_To_v1_WatchEvent,
|
||||
Convert_watch_Event_To_v1_WatchEvent,
|
||||
Convert_v1_WatchEvent_To_v1_InternalEvent,
|
||||
))
|
||||
scheme.AddKnownTypes(groupVersion, optionsTypes...)
|
||||
// Register Unversioned types under their own special group
|
||||
scheme.AddUnversionedTypes(Unversioned,
|
||||
&Status{},
|
||||
|
|
@ -72,36 +90,14 @@ func AddToGroupVersion(scheme *runtime.Scheme, groupVersion schema.GroupVersion)
|
|||
&APIResourceList{},
|
||||
)
|
||||
|
||||
utilruntime.Must(addEventConversionFuncs(scheme))
|
||||
|
||||
// register manually. This usually goes through the SchemeBuilder, which we cannot use here.
|
||||
utilruntime.Must(AddConversionFuncs(scheme))
|
||||
utilruntime.Must(RegisterDefaults(scheme))
|
||||
}
|
||||
|
||||
// scheme is the registry for the common types that adhere to the meta v1 API spec.
|
||||
var scheme = runtime.NewScheme()
|
||||
|
||||
// ParameterCodec knows about query parameters used with the meta v1 API spec.
|
||||
var ParameterCodec = runtime.NewParameterCodec(scheme)
|
||||
|
||||
func init() {
|
||||
scheme.AddUnversionedTypes(SchemeGroupVersion,
|
||||
&ListOptions{},
|
||||
&ExportOptions{},
|
||||
&GetOptions{},
|
||||
&DeleteOptions{},
|
||||
&CreateOptions{},
|
||||
&UpdateOptions{},
|
||||
&PatchOptions{},
|
||||
)
|
||||
|
||||
if err := AddMetaToScheme(scheme); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// register manually. This usually goes through the SchemeBuilder, which we cannot use here.
|
||||
utilruntime.Must(RegisterDefaults(scheme))
|
||||
}
|
||||
|
||||
// AddMetaToScheme registers base meta types into schemas.
|
||||
func AddMetaToScheme(scheme *runtime.Scheme) error {
|
||||
scheme.AddKnownTypes(SchemeGroupVersion,
|
||||
&Table{},
|
||||
|
|
@ -114,3 +110,12 @@ func AddMetaToScheme(scheme *runtime.Scheme) error {
|
|||
Convert_Slice_string_To_v1_IncludeObjectPolicy,
|
||||
)
|
||||
}
|
||||
|
||||
func init() {
|
||||
scheme.AddUnversionedTypes(SchemeGroupVersion, optionsTypes...)
|
||||
|
||||
utilruntime.Must(AddMetaToScheme(scheme))
|
||||
|
||||
// register manually. This usually goes through the SchemeBuilder, which we cannot use here.
|
||||
utilruntime.Must(RegisterDefaults(scheme))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -250,6 +250,15 @@ type ObjectMeta struct {
|
|||
// is an identifier for the responsible component that will remove the entry
|
||||
// from the list. If the deletionTimestamp of the object is non-nil, entries
|
||||
// in this list can only be removed.
|
||||
// Finalizers may be processed and removed in any order. Order is NOT enforced
|
||||
// because it introduces significant risk of stuck finalizers.
|
||||
// finalizers is a shared field, any actor with permission can reorder it.
|
||||
// If the finalizer list is processed in order, then this can lead to a situation
|
||||
// in which the component responsible for the first finalizer in the list is
|
||||
// waiting for a signal (field value, external system, or other) produced by a
|
||||
// component responsible for a finalizer later in the list, resulting in a deadlock.
|
||||
// Without enforced ordering finalizers are free to order amongst themselves and
|
||||
// are not vulnerable to ordering changes in the list.
|
||||
// +optional
|
||||
// +patchStrategy=merge
|
||||
Finalizers []string `json:"finalizers,omitempty" patchStrategy:"merge" protobuf:"bytes,14,rep,name=finalizers"`
|
||||
|
|
@ -313,6 +322,7 @@ type OwnerReference struct {
|
|||
BlockOwnerDeletion *bool `json:"blockOwnerDeletion,omitempty" protobuf:"varint,7,opt,name=blockOwnerDeletion"`
|
||||
}
|
||||
|
||||
// +k8s:conversion-gen:explicit-from=net/url.Values
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// ListOptions is the query options to a standard REST list call.
|
||||
|
|
@ -342,9 +352,6 @@ type ListOptions struct {
|
|||
// If this is not a watch, this field is ignored.
|
||||
// If the feature gate WatchBookmarks is not enabled in apiserver,
|
||||
// this field is ignored.
|
||||
//
|
||||
// This field is beta.
|
||||
//
|
||||
// +optional
|
||||
AllowWatchBookmarks bool `json:"allowWatchBookmarks,omitempty" protobuf:"varint,9,opt,name=allowWatchBookmarks"`
|
||||
|
||||
|
|
@ -395,6 +402,7 @@ type ListOptions struct {
|
|||
Continue string `json:"continue,omitempty" protobuf:"bytes,8,opt,name=continue"`
|
||||
}
|
||||
|
||||
// +k8s:conversion-gen:explicit-from=net/url.Values
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// ExportOptions is the query options to the standard REST get call.
|
||||
|
|
@ -409,6 +417,7 @@ type ExportOptions struct {
|
|||
Exact bool `json:"exact" protobuf:"varint,2,opt,name=exact"`
|
||||
}
|
||||
|
||||
// +k8s:conversion-gen:explicit-from=net/url.Values
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// GetOptions is the standard query options to the standard REST get call.
|
||||
|
|
@ -446,6 +455,7 @@ const (
|
|||
DryRunAll = "All"
|
||||
)
|
||||
|
||||
// +k8s:conversion-gen:explicit-from=net/url.Values
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// DeleteOptions may be provided when deleting an API object.
|
||||
|
|
@ -461,6 +471,7 @@ type DeleteOptions struct {
|
|||
|
||||
// Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be
|
||||
// returned.
|
||||
// +k8s:conversion-gen=false
|
||||
// +optional
|
||||
Preconditions *Preconditions `json:"preconditions,omitempty" protobuf:"bytes,2,opt,name=preconditions"`
|
||||
|
||||
|
|
@ -491,6 +502,7 @@ type DeleteOptions struct {
|
|||
DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,5,rep,name=dryRun"`
|
||||
}
|
||||
|
||||
// +k8s:conversion-gen:explicit-from=net/url.Values
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// CreateOptions may be provided when creating an API object.
|
||||
|
|
@ -514,6 +526,7 @@ type CreateOptions struct {
|
|||
FieldManager string `json:"fieldManager,omitempty" protobuf:"bytes,3,name=fieldManager"`
|
||||
}
|
||||
|
||||
// +k8s:conversion-gen:explicit-from=net/url.Values
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// PatchOptions may be provided when patching an API object.
|
||||
|
|
@ -546,6 +559,7 @@ type PatchOptions struct {
|
|||
FieldManager string `json:"fieldManager,omitempty" protobuf:"bytes,3,name=fieldManager"`
|
||||
}
|
||||
|
||||
// +k8s:conversion-gen:explicit-from=net/url.Values
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// UpdateOptions may be provided when updating an API object.
|
||||
|
|
@ -1258,6 +1272,7 @@ const (
|
|||
)
|
||||
|
||||
// TableOptions are used when a Table is requested by the caller.
|
||||
// +k8s:conversion-gen:explicit-from=net/url.Values
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
type TableOptions struct {
|
||||
TypeMeta `json:",inline"`
|
||||
|
|
|
|||
|
|
@ -194,7 +194,7 @@ var map_ListOptions = map[string]string{
|
|||
"labelSelector": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
|
||||
"fieldSelector": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
|
||||
"watch": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
|
||||
"allowWatchBookmarks": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.\n\nThis field is beta.",
|
||||
"allowWatchBookmarks": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.",
|
||||
"resourceVersion": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.",
|
||||
"timeoutSeconds": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
|
||||
"limit": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
|
||||
|
|
@ -234,7 +234,7 @@ var map_ObjectMeta = map[string]string{
|
|||
"labels": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels",
|
||||
"annotations": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations",
|
||||
"ownerReferences": "List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.",
|
||||
"finalizers": "Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed.",
|
||||
"finalizers": "Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list.",
|
||||
"clusterName": "The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.",
|
||||
"managedFields": "ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \"ci-cd\". The set of fields is always in the version that the workflow used when modifying the object.",
|
||||
}
|
||||
|
|
|
|||
|
|
@ -27,6 +27,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/json"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
// NestedFieldCopy returns a deep copy of the value of a nested field.
|
||||
|
|
@ -329,6 +330,8 @@ var UnstructuredJSONScheme runtime.Codec = unstructuredJSONScheme{}
|
|||
|
||||
type unstructuredJSONScheme struct{}
|
||||
|
||||
const unstructuredJSONSchemeIdentifier runtime.Identifier = "unstructuredJSON"
|
||||
|
||||
func (s unstructuredJSONScheme) Decode(data []byte, _ *schema.GroupVersionKind, obj runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) {
|
||||
var err error
|
||||
if obj != nil {
|
||||
|
|
@ -349,7 +352,14 @@ func (s unstructuredJSONScheme) Decode(data []byte, _ *schema.GroupVersionKind,
|
|||
return obj, &gvk, nil
|
||||
}
|
||||
|
||||
func (unstructuredJSONScheme) Encode(obj runtime.Object, w io.Writer) error {
|
||||
func (s unstructuredJSONScheme) Encode(obj runtime.Object, w io.Writer) error {
|
||||
if co, ok := obj.(runtime.CacheableObject); ok {
|
||||
return co.CacheEncode(s.Identifier(), s.doEncode, w)
|
||||
}
|
||||
return s.doEncode(obj, w)
|
||||
}
|
||||
|
||||
func (unstructuredJSONScheme) doEncode(obj runtime.Object, w io.Writer) error {
|
||||
switch t := obj.(type) {
|
||||
case *Unstructured:
|
||||
return json.NewEncoder(w).Encode(t.Object)
|
||||
|
|
@ -373,6 +383,11 @@ func (unstructuredJSONScheme) Encode(obj runtime.Object, w io.Writer) error {
|
|||
}
|
||||
}
|
||||
|
||||
// Identifier implements runtime.Encoder interface.
|
||||
func (unstructuredJSONScheme) Identifier() runtime.Identifier {
|
||||
return unstructuredJSONSchemeIdentifier
|
||||
}
|
||||
|
||||
func (s unstructuredJSONScheme) decode(data []byte) (runtime.Object, error) {
|
||||
type detector struct {
|
||||
Items gojson.RawMessage
|
||||
|
|
@ -400,12 +415,6 @@ func (s unstructuredJSONScheme) decodeInto(data []byte, obj runtime.Object) erro
|
|||
return s.decodeToUnstructured(data, x)
|
||||
case *UnstructuredList:
|
||||
return s.decodeToList(data, x)
|
||||
case *runtime.VersionedObjects:
|
||||
o, err := s.decode(data)
|
||||
if err == nil {
|
||||
x.Objects = []runtime.Object{o}
|
||||
}
|
||||
return err
|
||||
default:
|
||||
return json.Unmarshal(data, x)
|
||||
}
|
||||
|
|
@ -460,12 +469,30 @@ func (s unstructuredJSONScheme) decodeToList(data []byte, list *UnstructuredList
|
|||
return nil
|
||||
}
|
||||
|
||||
type JSONFallbackEncoder struct {
|
||||
runtime.Encoder
|
||||
type jsonFallbackEncoder struct {
|
||||
encoder runtime.Encoder
|
||||
identifier runtime.Identifier
|
||||
}
|
||||
|
||||
func (c JSONFallbackEncoder) Encode(obj runtime.Object, w io.Writer) error {
|
||||
err := c.Encoder.Encode(obj, w)
|
||||
func NewJSONFallbackEncoder(encoder runtime.Encoder) runtime.Encoder {
|
||||
result := map[string]string{
|
||||
"name": "fallback",
|
||||
"base": string(encoder.Identifier()),
|
||||
}
|
||||
identifier, err := gojson.Marshal(result)
|
||||
if err != nil {
|
||||
klog.Fatalf("Failed marshaling identifier for jsonFallbackEncoder: %v", err)
|
||||
}
|
||||
return &jsonFallbackEncoder{
|
||||
encoder: encoder,
|
||||
identifier: runtime.Identifier(identifier),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *jsonFallbackEncoder) Encode(obj runtime.Object, w io.Writer) error {
|
||||
// There is no need to handle runtime.CacheableObject, as we only
|
||||
// fallback to other encoders here.
|
||||
err := c.encoder.Encode(obj, w)
|
||||
if runtime.IsNotRegisteredError(err) {
|
||||
switch obj.(type) {
|
||||
case *Unstructured, *UnstructuredList:
|
||||
|
|
@ -474,3 +501,8 @@ func (c JSONFallbackEncoder) Encode(obj runtime.Object, w io.Writer) error {
|
|||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Identifier implements runtime.Encoder interface.
|
||||
func (c *jsonFallbackEncoder) Identifier() runtime.Identifier {
|
||||
return c.identifier
|
||||
}
|
||||
|
|
|
|||
523
vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go
generated
vendored
Normal file
523
vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go
generated
vendored
Normal file
|
|
@ -0,0 +1,523 @@
|
|||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by conversion-gen. DO NOT EDIT.
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
url "net/url"
|
||||
unsafe "unsafe"
|
||||
|
||||
resource "k8s.io/apimachinery/pkg/api/resource"
|
||||
conversion "k8s.io/apimachinery/pkg/conversion"
|
||||
fields "k8s.io/apimachinery/pkg/fields"
|
||||
labels "k8s.io/apimachinery/pkg/labels"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
intstr "k8s.io/apimachinery/pkg/util/intstr"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
)
|
||||
|
||||
func init() {
|
||||
localSchemeBuilder.Register(RegisterConversions)
|
||||
}
|
||||
|
||||
// RegisterConversions adds conversion functions to the given scheme.
|
||||
// Public to allow building arbitrary schemes.
|
||||
func RegisterConversions(s *runtime.Scheme) error {
|
||||
if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*CreateOptions)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_url_Values_To_v1_CreateOptions(a.(*url.Values), b.(*CreateOptions), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*DeleteOptions)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_url_Values_To_v1_DeleteOptions(a.(*url.Values), b.(*DeleteOptions), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*ExportOptions)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_url_Values_To_v1_ExportOptions(a.(*url.Values), b.(*ExportOptions), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*GetOptions)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_url_Values_To_v1_GetOptions(a.(*url.Values), b.(*GetOptions), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*ListOptions)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_url_Values_To_v1_ListOptions(a.(*url.Values), b.(*ListOptions), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*PatchOptions)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_url_Values_To_v1_PatchOptions(a.(*url.Values), b.(*PatchOptions), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*TableOptions)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_url_Values_To_v1_TableOptions(a.(*url.Values), b.(*TableOptions), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*UpdateOptions)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_url_Values_To_v1_UpdateOptions(a.(*url.Values), b.(*UpdateOptions), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddConversionFunc((*map[string]string)(nil), (*LabelSelector)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_Map_string_To_string_To_v1_LabelSelector(a.(*map[string]string), b.(*LabelSelector), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddConversionFunc((**bool)(nil), (*bool)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_Pointer_bool_To_bool(a.(**bool), b.(*bool), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddConversionFunc((**float64)(nil), (*float64)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_Pointer_float64_To_float64(a.(**float64), b.(*float64), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddConversionFunc((**int32)(nil), (*int32)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_Pointer_int32_To_int32(a.(**int32), b.(*int32), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddConversionFunc((**int64)(nil), (*int)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_Pointer_int64_To_int(a.(**int64), b.(*int), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddConversionFunc((**int64)(nil), (*int64)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_Pointer_int64_To_int64(a.(**int64), b.(*int64), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddConversionFunc((**intstr.IntOrString)(nil), (*intstr.IntOrString)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_Pointer_intstr_IntOrString_To_intstr_IntOrString(a.(**intstr.IntOrString), b.(*intstr.IntOrString), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddConversionFunc((**string)(nil), (*string)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_Pointer_string_To_string(a.(**string), b.(*string), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddConversionFunc((**Duration)(nil), (*Duration)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_Pointer_v1_Duration_To_v1_Duration(a.(**Duration), b.(*Duration), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddConversionFunc((*[]string)(nil), (**DeletionPropagation)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_Slice_string_To_Pointer_v1_DeletionPropagation(a.(*[]string), b.(**DeletionPropagation), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddConversionFunc((*[]string)(nil), (**Time)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_Slice_string_To_Pointer_v1_Time(a.(*[]string), b.(**Time), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddConversionFunc((*[]string)(nil), (*[]int32)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_Slice_string_To_Slice_int32(a.(*[]string), b.(*[]int32), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddConversionFunc((*[]string)(nil), (*IncludeObjectPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_Slice_string_To_v1_IncludeObjectPolicy(a.(*[]string), b.(*IncludeObjectPolicy), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddConversionFunc((*[]string)(nil), (*Time)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_Slice_string_To_v1_Time(a.(*[]string), b.(*Time), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddConversionFunc((*bool)(nil), (**bool)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_bool_To_Pointer_bool(a.(*bool), b.(**bool), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddConversionFunc((*fields.Selector)(nil), (*string)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_fields_Selector_To_string(a.(*fields.Selector), b.(*string), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddConversionFunc((*float64)(nil), (**float64)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_float64_To_Pointer_float64(a.(*float64), b.(**float64), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddConversionFunc((*int32)(nil), (**int32)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_int32_To_Pointer_int32(a.(*int32), b.(**int32), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddConversionFunc((*int64)(nil), (**int64)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_int64_To_Pointer_int64(a.(*int64), b.(**int64), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddConversionFunc((*int)(nil), (**int64)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_int_To_Pointer_int64(a.(*int), b.(**int64), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddConversionFunc((*intstr.IntOrString)(nil), (**intstr.IntOrString)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_intstr_IntOrString_To_Pointer_intstr_IntOrString(a.(*intstr.IntOrString), b.(**intstr.IntOrString), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddConversionFunc((*intstr.IntOrString)(nil), (*intstr.IntOrString)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_intstr_IntOrString_To_intstr_IntOrString(a.(*intstr.IntOrString), b.(*intstr.IntOrString), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddConversionFunc((*labels.Selector)(nil), (*string)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_labels_Selector_To_string(a.(*labels.Selector), b.(*string), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddConversionFunc((*resource.Quantity)(nil), (*resource.Quantity)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_resource_Quantity_To_resource_Quantity(a.(*resource.Quantity), b.(*resource.Quantity), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddConversionFunc((*string)(nil), (**string)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_string_To_Pointer_string(a.(*string), b.(**string), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddConversionFunc((*string)(nil), (*fields.Selector)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_string_To_fields_Selector(a.(*string), b.(*fields.Selector), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddConversionFunc((*string)(nil), (*labels.Selector)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_string_To_labels_Selector(a.(*string), b.(*labels.Selector), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddConversionFunc((*url.Values)(nil), (*DeleteOptions)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_url_Values_To_v1_DeleteOptions(a.(*url.Values), b.(*DeleteOptions), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddConversionFunc((*DeleteOptions)(nil), (*DeleteOptions)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1_DeleteOptions_To_v1_DeleteOptions(a.(*DeleteOptions), b.(*DeleteOptions), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddConversionFunc((*Duration)(nil), (**Duration)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1_Duration_To_Pointer_v1_Duration(a.(*Duration), b.(**Duration), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddConversionFunc((*InternalEvent)(nil), (*WatchEvent)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1_InternalEvent_To_v1_WatchEvent(a.(*InternalEvent), b.(*WatchEvent), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddConversionFunc((*LabelSelector)(nil), (*map[string]string)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1_LabelSelector_To_Map_string_To_string(a.(*LabelSelector), b.(*map[string]string), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddConversionFunc((*ListMeta)(nil), (*ListMeta)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1_ListMeta_To_v1_ListMeta(a.(*ListMeta), b.(*ListMeta), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddConversionFunc((*MicroTime)(nil), (*MicroTime)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1_MicroTime_To_v1_MicroTime(a.(*MicroTime), b.(*MicroTime), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddConversionFunc((*Time)(nil), (*Time)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1_Time_To_v1_Time(a.(*Time), b.(*Time), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddConversionFunc((*TypeMeta)(nil), (*TypeMeta)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1_TypeMeta_To_v1_TypeMeta(a.(*TypeMeta), b.(*TypeMeta), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddConversionFunc((*WatchEvent)(nil), (*InternalEvent)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1_WatchEvent_To_v1_InternalEvent(a.(*WatchEvent), b.(*InternalEvent), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddConversionFunc((*WatchEvent)(nil), (*watch.Event)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1_WatchEvent_To_watch_Event(a.(*WatchEvent), b.(*watch.Event), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddConversionFunc((*watch.Event)(nil), (*WatchEvent)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_watch_Event_To_v1_WatchEvent(a.(*watch.Event), b.(*WatchEvent), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func autoConvert_url_Values_To_v1_CreateOptions(in *url.Values, out *CreateOptions, s conversion.Scope) error {
|
||||
// WARNING: Field TypeMeta does not have json tag, skipping.
|
||||
|
||||
if values, ok := map[string][]string(*in)["dryRun"]; ok && len(values) > 0 {
|
||||
out.DryRun = *(*[]string)(unsafe.Pointer(&values))
|
||||
} else {
|
||||
out.DryRun = nil
|
||||
}
|
||||
if values, ok := map[string][]string(*in)["fieldManager"]; ok && len(values) > 0 {
|
||||
if err := runtime.Convert_Slice_string_To_string(&values, &out.FieldManager, s); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
out.FieldManager = ""
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_url_Values_To_v1_CreateOptions is an autogenerated conversion function.
|
||||
func Convert_url_Values_To_v1_CreateOptions(in *url.Values, out *CreateOptions, s conversion.Scope) error {
|
||||
return autoConvert_url_Values_To_v1_CreateOptions(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_url_Values_To_v1_DeleteOptions(in *url.Values, out *DeleteOptions, s conversion.Scope) error {
|
||||
// WARNING: Field TypeMeta does not have json tag, skipping.
|
||||
|
||||
if values, ok := map[string][]string(*in)["gracePeriodSeconds"]; ok && len(values) > 0 {
|
||||
if err := runtime.Convert_Slice_string_To_Pointer_int64(&values, &out.GracePeriodSeconds, s); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
out.GracePeriodSeconds = nil
|
||||
}
|
||||
// INFO: in.Preconditions opted out of conversion generation
|
||||
if values, ok := map[string][]string(*in)["orphanDependents"]; ok && len(values) > 0 {
|
||||
if err := runtime.Convert_Slice_string_To_Pointer_bool(&values, &out.OrphanDependents, s); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
out.OrphanDependents = nil
|
||||
}
|
||||
if values, ok := map[string][]string(*in)["propagationPolicy"]; ok && len(values) > 0 {
|
||||
if err := Convert_Slice_string_To_Pointer_v1_DeletionPropagation(&values, &out.PropagationPolicy, s); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
out.PropagationPolicy = nil
|
||||
}
|
||||
if values, ok := map[string][]string(*in)["dryRun"]; ok && len(values) > 0 {
|
||||
out.DryRun = *(*[]string)(unsafe.Pointer(&values))
|
||||
} else {
|
||||
out.DryRun = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func autoConvert_url_Values_To_v1_ExportOptions(in *url.Values, out *ExportOptions, s conversion.Scope) error {
|
||||
// WARNING: Field TypeMeta does not have json tag, skipping.
|
||||
|
||||
if values, ok := map[string][]string(*in)["export"]; ok && len(values) > 0 {
|
||||
if err := runtime.Convert_Slice_string_To_bool(&values, &out.Export, s); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
out.Export = false
|
||||
}
|
||||
if values, ok := map[string][]string(*in)["exact"]; ok && len(values) > 0 {
|
||||
if err := runtime.Convert_Slice_string_To_bool(&values, &out.Exact, s); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
out.Exact = false
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_url_Values_To_v1_ExportOptions is an autogenerated conversion function.
|
||||
func Convert_url_Values_To_v1_ExportOptions(in *url.Values, out *ExportOptions, s conversion.Scope) error {
|
||||
return autoConvert_url_Values_To_v1_ExportOptions(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_url_Values_To_v1_GetOptions(in *url.Values, out *GetOptions, s conversion.Scope) error {
|
||||
// WARNING: Field TypeMeta does not have json tag, skipping.
|
||||
|
||||
if values, ok := map[string][]string(*in)["resourceVersion"]; ok && len(values) > 0 {
|
||||
if err := runtime.Convert_Slice_string_To_string(&values, &out.ResourceVersion, s); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
out.ResourceVersion = ""
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_url_Values_To_v1_GetOptions is an autogenerated conversion function.
|
||||
func Convert_url_Values_To_v1_GetOptions(in *url.Values, out *GetOptions, s conversion.Scope) error {
|
||||
return autoConvert_url_Values_To_v1_GetOptions(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_url_Values_To_v1_ListOptions(in *url.Values, out *ListOptions, s conversion.Scope) error {
|
||||
// WARNING: Field TypeMeta does not have json tag, skipping.
|
||||
|
||||
if values, ok := map[string][]string(*in)["labelSelector"]; ok && len(values) > 0 {
|
||||
if err := runtime.Convert_Slice_string_To_string(&values, &out.LabelSelector, s); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
out.LabelSelector = ""
|
||||
}
|
||||
if values, ok := map[string][]string(*in)["fieldSelector"]; ok && len(values) > 0 {
|
||||
if err := runtime.Convert_Slice_string_To_string(&values, &out.FieldSelector, s); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
out.FieldSelector = ""
|
||||
}
|
||||
if values, ok := map[string][]string(*in)["watch"]; ok && len(values) > 0 {
|
||||
if err := runtime.Convert_Slice_string_To_bool(&values, &out.Watch, s); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
out.Watch = false
|
||||
}
|
||||
if values, ok := map[string][]string(*in)["allowWatchBookmarks"]; ok && len(values) > 0 {
|
||||
if err := runtime.Convert_Slice_string_To_bool(&values, &out.AllowWatchBookmarks, s); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
out.AllowWatchBookmarks = false
|
||||
}
|
||||
if values, ok := map[string][]string(*in)["resourceVersion"]; ok && len(values) > 0 {
|
||||
if err := runtime.Convert_Slice_string_To_string(&values, &out.ResourceVersion, s); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
out.ResourceVersion = ""
|
||||
}
|
||||
if values, ok := map[string][]string(*in)["timeoutSeconds"]; ok && len(values) > 0 {
|
||||
if err := runtime.Convert_Slice_string_To_Pointer_int64(&values, &out.TimeoutSeconds, s); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
out.TimeoutSeconds = nil
|
||||
}
|
||||
if values, ok := map[string][]string(*in)["limit"]; ok && len(values) > 0 {
|
||||
if err := runtime.Convert_Slice_string_To_int64(&values, &out.Limit, s); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
out.Limit = 0
|
||||
}
|
||||
if values, ok := map[string][]string(*in)["continue"]; ok && len(values) > 0 {
|
||||
if err := runtime.Convert_Slice_string_To_string(&values, &out.Continue, s); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
out.Continue = ""
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_url_Values_To_v1_ListOptions is an autogenerated conversion function.
|
||||
func Convert_url_Values_To_v1_ListOptions(in *url.Values, out *ListOptions, s conversion.Scope) error {
|
||||
return autoConvert_url_Values_To_v1_ListOptions(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_url_Values_To_v1_PatchOptions(in *url.Values, out *PatchOptions, s conversion.Scope) error {
|
||||
// WARNING: Field TypeMeta does not have json tag, skipping.
|
||||
|
||||
if values, ok := map[string][]string(*in)["dryRun"]; ok && len(values) > 0 {
|
||||
out.DryRun = *(*[]string)(unsafe.Pointer(&values))
|
||||
} else {
|
||||
out.DryRun = nil
|
||||
}
|
||||
if values, ok := map[string][]string(*in)["force"]; ok && len(values) > 0 {
|
||||
if err := runtime.Convert_Slice_string_To_Pointer_bool(&values, &out.Force, s); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
out.Force = nil
|
||||
}
|
||||
if values, ok := map[string][]string(*in)["fieldManager"]; ok && len(values) > 0 {
|
||||
if err := runtime.Convert_Slice_string_To_string(&values, &out.FieldManager, s); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
out.FieldManager = ""
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_url_Values_To_v1_PatchOptions is an autogenerated conversion function.
|
||||
func Convert_url_Values_To_v1_PatchOptions(in *url.Values, out *PatchOptions, s conversion.Scope) error {
|
||||
return autoConvert_url_Values_To_v1_PatchOptions(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_url_Values_To_v1_TableOptions(in *url.Values, out *TableOptions, s conversion.Scope) error {
|
||||
// WARNING: Field TypeMeta does not have json tag, skipping.
|
||||
|
||||
if values, ok := map[string][]string(*in)["-"]; ok && len(values) > 0 {
|
||||
if err := runtime.Convert_Slice_string_To_bool(&values, &out.NoHeaders, s); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
out.NoHeaders = false
|
||||
}
|
||||
if values, ok := map[string][]string(*in)["includeObject"]; ok && len(values) > 0 {
|
||||
if err := Convert_Slice_string_To_v1_IncludeObjectPolicy(&values, &out.IncludeObject, s); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
out.IncludeObject = ""
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_url_Values_To_v1_TableOptions is an autogenerated conversion function.
|
||||
func Convert_url_Values_To_v1_TableOptions(in *url.Values, out *TableOptions, s conversion.Scope) error {
|
||||
return autoConvert_url_Values_To_v1_TableOptions(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_url_Values_To_v1_UpdateOptions(in *url.Values, out *UpdateOptions, s conversion.Scope) error {
|
||||
// WARNING: Field TypeMeta does not have json tag, skipping.
|
||||
|
||||
if values, ok := map[string][]string(*in)["dryRun"]; ok && len(values) > 0 {
|
||||
out.DryRun = *(*[]string)(unsafe.Pointer(&values))
|
||||
} else {
|
||||
out.DryRun = nil
|
||||
}
|
||||
if values, ok := map[string][]string(*in)["fieldManager"]; ok && len(values) > 0 {
|
||||
if err := runtime.Convert_Slice_string_To_string(&values, &out.FieldManager, s); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
out.FieldManager = ""
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_url_Values_To_v1_UpdateOptions is an autogenerated conversion function.
|
||||
func Convert_url_Values_To_v1_UpdateOptions(in *url.Values, out *UpdateOptions, s conversion.Scope) error {
|
||||
return autoConvert_url_Values_To_v1_UpdateOptions(in, out, s)
|
||||
}
|
||||
|
|
@ -19,6 +19,7 @@ package v1beta1
|
|||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
)
|
||||
|
||||
// GroupName is the group name for this API.
|
||||
|
|
@ -38,12 +39,7 @@ var scheme = runtime.NewScheme()
|
|||
// ParameterCodec knows about query parameters used with the meta v1beta1 API spec.
|
||||
var ParameterCodec = runtime.NewParameterCodec(scheme)
|
||||
|
||||
func init() {
|
||||
if err := AddMetaToScheme(scheme); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// AddMetaToScheme registers base meta types into schemas.
|
||||
func AddMetaToScheme(scheme *runtime.Scheme) error {
|
||||
scheme.AddKnownTypes(SchemeGroupVersion,
|
||||
&Table{},
|
||||
|
|
@ -55,7 +51,11 @@ func AddMetaToScheme(scheme *runtime.Scheme) error {
|
|||
return scheme.AddConversionFuncs(
|
||||
Convert_Slice_string_To_v1beta1_IncludeObjectPolicy,
|
||||
)
|
||||
}
|
||||
|
||||
func init() {
|
||||
utilruntime.Must(AddMetaToScheme(scheme))
|
||||
|
||||
// register manually. This usually goes through the SchemeBuilder, which we cannot use here.
|
||||
//scheme.AddGeneratedDeepCopyFuncs(GetGeneratedDeepCopyFuncs()...)
|
||||
utilruntime.Must(RegisterDefaults(scheme))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -54,6 +54,11 @@ type Selector interface {
|
|||
|
||||
// Make a deep copy of the selector.
|
||||
DeepCopySelector() Selector
|
||||
|
||||
// RequiresExactMatch allows a caller to introspect whether a given selector
|
||||
// requires a single specific label to be set, and if so returns the value it
|
||||
// requires.
|
||||
RequiresExactMatch(label string) (value string, found bool)
|
||||
}
|
||||
|
||||
// Everything returns a selector that matches all labels.
|
||||
|
|
@ -63,12 +68,13 @@ func Everything() Selector {
|
|||
|
||||
type nothingSelector struct{}
|
||||
|
||||
func (n nothingSelector) Matches(_ Labels) bool { return false }
|
||||
func (n nothingSelector) Empty() bool { return false }
|
||||
func (n nothingSelector) String() string { return "" }
|
||||
func (n nothingSelector) Add(_ ...Requirement) Selector { return n }
|
||||
func (n nothingSelector) Requirements() (Requirements, bool) { return nil, false }
|
||||
func (n nothingSelector) DeepCopySelector() Selector { return n }
|
||||
func (n nothingSelector) Matches(_ Labels) bool { return false }
|
||||
func (n nothingSelector) Empty() bool { return false }
|
||||
func (n nothingSelector) String() string { return "" }
|
||||
func (n nothingSelector) Add(_ ...Requirement) Selector { return n }
|
||||
func (n nothingSelector) Requirements() (Requirements, bool) { return nil, false }
|
||||
func (n nothingSelector) DeepCopySelector() Selector { return n }
|
||||
func (n nothingSelector) RequiresExactMatch(label string) (value string, found bool) { return "", false }
|
||||
|
||||
// Nothing returns a selector that matches no labels
|
||||
func Nothing() Selector {
|
||||
|
|
@ -358,6 +364,23 @@ func (lsel internalSelector) String() string {
|
|||
return strings.Join(reqs, ",")
|
||||
}
|
||||
|
||||
// RequiresExactMatch introspect whether a given selector requires a single specific field
|
||||
// to be set, and if so returns the value it requires.
|
||||
func (lsel internalSelector) RequiresExactMatch(label string) (value string, found bool) {
|
||||
for ix := range lsel {
|
||||
if lsel[ix].key == label {
|
||||
switch lsel[ix].operator {
|
||||
case selection.Equals, selection.DoubleEquals, selection.In:
|
||||
if len(lsel[ix].strValues) == 1 {
|
||||
return lsel[ix].strValues[0], true
|
||||
}
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
// Token represents constant definition for lexer token
|
||||
type Token int
|
||||
|
||||
|
|
@ -850,7 +873,7 @@ func SelectorFromSet(ls Set) Selector {
|
|||
if ls == nil || len(ls) == 0 {
|
||||
return internalSelector{}
|
||||
}
|
||||
var requirements internalSelector
|
||||
requirements := make([]Requirement, 0, len(ls))
|
||||
for label, value := range ls {
|
||||
r, err := NewRequirement(label, selection.Equals, []string{value})
|
||||
if err == nil {
|
||||
|
|
@ -862,7 +885,7 @@ func SelectorFromSet(ls Set) Selector {
|
|||
}
|
||||
// sort to have deterministic string representation
|
||||
sort.Sort(ByKey(requirements))
|
||||
return requirements
|
||||
return internalSelector(requirements)
|
||||
}
|
||||
|
||||
// SelectorFromValidatedSet returns a Selector which will match exactly the given Set.
|
||||
|
|
@ -872,13 +895,13 @@ func SelectorFromValidatedSet(ls Set) Selector {
|
|||
if ls == nil || len(ls) == 0 {
|
||||
return internalSelector{}
|
||||
}
|
||||
var requirements internalSelector
|
||||
requirements := make([]Requirement, 0, len(ls))
|
||||
for label, value := range ls {
|
||||
requirements = append(requirements, Requirement{key: label, operator: selection.Equals, strValues: []string{value}})
|
||||
}
|
||||
// sort to have deterministic string representation
|
||||
sort.Sort(ByKey(requirements))
|
||||
return requirements
|
||||
return internalSelector(requirements)
|
||||
}
|
||||
|
||||
// ParseToRequirements takes a string representing a selector and returns a list of
|
||||
|
|
|
|||
|
|
@ -19,13 +19,17 @@ package runtime
|
|||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"k8s.io/apimachinery/pkg/conversion/queryparams"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
// codec binds an encoder and decoder.
|
||||
|
|
@ -100,10 +104,19 @@ type NoopEncoder struct {
|
|||
|
||||
var _ Serializer = NoopEncoder{}
|
||||
|
||||
const noopEncoderIdentifier Identifier = "noop"
|
||||
|
||||
func (n NoopEncoder) Encode(obj Object, w io.Writer) error {
|
||||
// There is no need to handle runtime.CacheableObject, as we don't
|
||||
// process the obj at all.
|
||||
return fmt.Errorf("encoding is not allowed for this codec: %v", reflect.TypeOf(n.Decoder))
|
||||
}
|
||||
|
||||
// Identifier implements runtime.Encoder interface.
|
||||
func (n NoopEncoder) Identifier() Identifier {
|
||||
return noopEncoderIdentifier
|
||||
}
|
||||
|
||||
// NoopDecoder converts an Encoder to a Serializer or Codec for code that expects them but only uses encoding.
|
||||
type NoopDecoder struct {
|
||||
Encoder
|
||||
|
|
@ -193,19 +206,51 @@ func (c *parameterCodec) EncodeParameters(obj Object, to schema.GroupVersion) (u
|
|||
type base64Serializer struct {
|
||||
Encoder
|
||||
Decoder
|
||||
|
||||
identifier Identifier
|
||||
}
|
||||
|
||||
func NewBase64Serializer(e Encoder, d Decoder) Serializer {
|
||||
return &base64Serializer{e, d}
|
||||
return &base64Serializer{
|
||||
Encoder: e,
|
||||
Decoder: d,
|
||||
identifier: identifier(e),
|
||||
}
|
||||
}
|
||||
|
||||
func identifier(e Encoder) Identifier {
|
||||
result := map[string]string{
|
||||
"name": "base64",
|
||||
}
|
||||
if e != nil {
|
||||
result["encoder"] = string(e.Identifier())
|
||||
}
|
||||
identifier, err := json.Marshal(result)
|
||||
if err != nil {
|
||||
klog.Fatalf("Failed marshaling identifier for base64Serializer: %v", err)
|
||||
}
|
||||
return Identifier(identifier)
|
||||
}
|
||||
|
||||
func (s base64Serializer) Encode(obj Object, stream io.Writer) error {
|
||||
if co, ok := obj.(CacheableObject); ok {
|
||||
return co.CacheEncode(s.Identifier(), s.doEncode, stream)
|
||||
}
|
||||
return s.doEncode(obj, stream)
|
||||
}
|
||||
|
||||
func (s base64Serializer) doEncode(obj Object, stream io.Writer) error {
|
||||
e := base64.NewEncoder(base64.StdEncoding, stream)
|
||||
err := s.Encoder.Encode(obj, e)
|
||||
e.Close()
|
||||
return err
|
||||
}
|
||||
|
||||
// Identifier implements runtime.Encoder interface.
|
||||
func (s base64Serializer) Identifier() Identifier {
|
||||
return s.identifier
|
||||
}
|
||||
|
||||
func (s base64Serializer) Decode(data []byte, defaults *schema.GroupVersionKind, into Object) (Object, *schema.GroupVersionKind, error) {
|
||||
out := make([]byte, base64.StdEncoding.DecodedLen(len(data)))
|
||||
n, err := base64.StdEncoding.Decode(out, data)
|
||||
|
|
@ -238,6 +283,11 @@ var (
|
|||
DisabledGroupVersioner GroupVersioner = disabledGroupVersioner{}
|
||||
)
|
||||
|
||||
const (
|
||||
internalGroupVersionerIdentifier = "internal"
|
||||
disabledGroupVersionerIdentifier = "disabled"
|
||||
)
|
||||
|
||||
type internalGroupVersioner struct{}
|
||||
|
||||
// KindForGroupVersionKinds returns an internal Kind if one is found, or converts the first provided kind to the internal version.
|
||||
|
|
@ -253,6 +303,11 @@ func (internalGroupVersioner) KindForGroupVersionKinds(kinds []schema.GroupVersi
|
|||
return schema.GroupVersionKind{}, false
|
||||
}
|
||||
|
||||
// Identifier implements GroupVersioner interface.
|
||||
func (internalGroupVersioner) Identifier() string {
|
||||
return internalGroupVersionerIdentifier
|
||||
}
|
||||
|
||||
type disabledGroupVersioner struct{}
|
||||
|
||||
// KindForGroupVersionKinds returns false for any input.
|
||||
|
|
@ -260,19 +315,9 @@ func (disabledGroupVersioner) KindForGroupVersionKinds(kinds []schema.GroupVersi
|
|||
return schema.GroupVersionKind{}, false
|
||||
}
|
||||
|
||||
// GroupVersioners implements GroupVersioner and resolves to the first exact match for any kind.
|
||||
type GroupVersioners []GroupVersioner
|
||||
|
||||
// KindForGroupVersionKinds returns the first match of any of the group versioners, or false if no match occurred.
|
||||
func (gvs GroupVersioners) KindForGroupVersionKinds(kinds []schema.GroupVersionKind) (schema.GroupVersionKind, bool) {
|
||||
for _, gv := range gvs {
|
||||
target, ok := gv.KindForGroupVersionKinds(kinds)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
return target, true
|
||||
}
|
||||
return schema.GroupVersionKind{}, false
|
||||
// Identifier implements GroupVersioner interface.
|
||||
func (disabledGroupVersioner) Identifier() string {
|
||||
return disabledGroupVersionerIdentifier
|
||||
}
|
||||
|
||||
// Assert that schema.GroupVersion and GroupVersions implement GroupVersioner
|
||||
|
|
@ -330,3 +375,22 @@ func (v multiGroupVersioner) KindForGroupVersionKinds(kinds []schema.GroupVersio
|
|||
}
|
||||
return schema.GroupVersionKind{}, false
|
||||
}
|
||||
|
||||
// Identifier implements GroupVersioner interface.
|
||||
func (v multiGroupVersioner) Identifier() string {
|
||||
groupKinds := make([]string, 0, len(v.acceptedGroupKinds))
|
||||
for _, gk := range v.acceptedGroupKinds {
|
||||
groupKinds = append(groupKinds, gk.String())
|
||||
}
|
||||
result := map[string]string{
|
||||
"name": "multi",
|
||||
"target": v.target.String(),
|
||||
"accepted": strings.Join(groupKinds, ","),
|
||||
"coerce": strconv.FormatBool(v.coerce),
|
||||
}
|
||||
identifier, err := json.Marshal(result)
|
||||
if err != nil {
|
||||
klog.Fatalf("Failed marshaling Identifier for %#v: %v", v, err)
|
||||
}
|
||||
return string(identifier)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -61,19 +61,21 @@ var DefaultStringConversions = []interface{}{
|
|||
Convert_Slice_string_To_int64,
|
||||
}
|
||||
|
||||
func Convert_Slice_string_To_string(input *[]string, out *string, s conversion.Scope) error {
|
||||
if len(*input) == 0 {
|
||||
func Convert_Slice_string_To_string(in *[]string, out *string, s conversion.Scope) error {
|
||||
if len(*in) == 0 {
|
||||
*out = ""
|
||||
return nil
|
||||
}
|
||||
*out = (*input)[0]
|
||||
*out = (*in)[0]
|
||||
return nil
|
||||
}
|
||||
|
||||
func Convert_Slice_string_To_int(input *[]string, out *int, s conversion.Scope) error {
|
||||
if len(*input) == 0 {
|
||||
func Convert_Slice_string_To_int(in *[]string, out *int, s conversion.Scope) error {
|
||||
if len(*in) == 0 {
|
||||
*out = 0
|
||||
return nil
|
||||
}
|
||||
str := (*input)[0]
|
||||
str := (*in)[0]
|
||||
i, err := strconv.Atoi(str)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -83,15 +85,16 @@ func Convert_Slice_string_To_int(input *[]string, out *int, s conversion.Scope)
|
|||
}
|
||||
|
||||
// Convert_Slice_string_To_bool will convert a string parameter to boolean.
|
||||
// Only the absence of a value, a value of "false", or a value of "0" resolve to false.
|
||||
// Only the absence of a value (i.e. zero-length slice), a value of "false", or a
|
||||
// value of "0" resolve to false.
|
||||
// Any other value (including empty string) resolves to true.
|
||||
func Convert_Slice_string_To_bool(input *[]string, out *bool, s conversion.Scope) error {
|
||||
if len(*input) == 0 {
|
||||
func Convert_Slice_string_To_bool(in *[]string, out *bool, s conversion.Scope) error {
|
||||
if len(*in) == 0 {
|
||||
*out = false
|
||||
return nil
|
||||
}
|
||||
switch strings.ToLower((*input)[0]) {
|
||||
case "false", "0":
|
||||
switch {
|
||||
case (*in)[0] == "0", strings.EqualFold((*in)[0], "false"):
|
||||
*out = false
|
||||
default:
|
||||
*out = true
|
||||
|
|
@ -99,15 +102,79 @@ func Convert_Slice_string_To_bool(input *[]string, out *bool, s conversion.Scope
|
|||
return nil
|
||||
}
|
||||
|
||||
func Convert_Slice_string_To_int64(input *[]string, out *int64, s conversion.Scope) error {
|
||||
if len(*input) == 0 {
|
||||
*out = 0
|
||||
// Convert_Slice_string_To_bool will convert a string parameter to boolean.
|
||||
// Only the absence of a value (i.e. zero-length slice), a value of "false", or a
|
||||
// value of "0" resolve to false.
|
||||
// Any other value (including empty string) resolves to true.
|
||||
func Convert_Slice_string_To_Pointer_bool(in *[]string, out **bool, s conversion.Scope) error {
|
||||
if len(*in) == 0 {
|
||||
boolVar := false
|
||||
*out = &boolVar
|
||||
return nil
|
||||
}
|
||||
str := (*input)[0]
|
||||
i, err := strconv.ParseInt(str, 10, 64)
|
||||
switch {
|
||||
case (*in)[0] == "0", strings.EqualFold((*in)[0], "false"):
|
||||
boolVar := false
|
||||
*out = &boolVar
|
||||
default:
|
||||
boolVar := true
|
||||
*out = &boolVar
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func string_to_int64(in string) (int64, error) {
|
||||
return strconv.ParseInt(in, 10, 64)
|
||||
}
|
||||
|
||||
func Convert_string_To_int64(in *string, out *int64, s conversion.Scope) error {
|
||||
if in == nil {
|
||||
*out = 0
|
||||
return nil
|
||||
}
|
||||
i, err := string_to_int64(*in)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*out = i
|
||||
return nil
|
||||
}
|
||||
|
||||
func Convert_Slice_string_To_int64(in *[]string, out *int64, s conversion.Scope) error {
|
||||
if len(*in) == 0 {
|
||||
*out = 0
|
||||
return nil
|
||||
}
|
||||
i, err := string_to_int64((*in)[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*out = i
|
||||
return nil
|
||||
}
|
||||
|
||||
func Convert_string_To_Pointer_int64(in *string, out **int64, s conversion.Scope) error {
|
||||
if in == nil {
|
||||
*out = nil
|
||||
return nil
|
||||
}
|
||||
i, err := string_to_int64(*in)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*out = &i
|
||||
return nil
|
||||
}
|
||||
|
||||
func Convert_Slice_string_To_Pointer_int64(in *[]string, out **int64, s conversion.Scope) error {
|
||||
if len(*in) == 0 {
|
||||
*out = nil
|
||||
return nil
|
||||
}
|
||||
i, err := string_to_int64((*in)[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*out = &i
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -37,13 +37,36 @@ type GroupVersioner interface {
|
|||
// Scheme.New(target) and then perform a conversion between the current Go type and the destination Go type.
|
||||
// Sophisticated implementations may use additional information about the input kinds to pick a destination kind.
|
||||
KindForGroupVersionKinds(kinds []schema.GroupVersionKind) (target schema.GroupVersionKind, ok bool)
|
||||
// Identifier returns string representation of the object.
|
||||
// Identifiers of two different encoders should be equal only if for every input
|
||||
// kinds they return the same result.
|
||||
Identifier() string
|
||||
}
|
||||
|
||||
// Identifier represents an identifier.
|
||||
// Identitier of two different objects should be equal if and only if for every
|
||||
// input the output they produce is exactly the same.
|
||||
type Identifier string
|
||||
|
||||
// Encoder writes objects to a serialized form
|
||||
type Encoder interface {
|
||||
// Encode writes an object to a stream. Implementations may return errors if the versions are
|
||||
// incompatible, or if no conversion is defined.
|
||||
Encode(obj Object, w io.Writer) error
|
||||
// Identifier returns an identifier of the encoder.
|
||||
// Identifiers of two different encoders should be equal if and only if for every input
|
||||
// object it will be encoded to the same representation by both of them.
|
||||
//
|
||||
// Identifier is inteted for use with CacheableObject#CacheEncode method. In order to
|
||||
// correctly handle CacheableObject, Encode() method should look similar to below, where
|
||||
// doEncode() is the encoding logic of implemented encoder:
|
||||
// func (e *MyEncoder) Encode(obj Object, w io.Writer) error {
|
||||
// if co, ok := obj.(CacheableObject); ok {
|
||||
// return co.CacheEncode(e.Identifier(), e.doEncode, w)
|
||||
// }
|
||||
// return e.doEncode(obj, w)
|
||||
// }
|
||||
Identifier() Identifier
|
||||
}
|
||||
|
||||
// Decoder attempts to load an object from data.
|
||||
|
|
@ -132,6 +155,28 @@ type NegotiatedSerializer interface {
|
|||
DecoderToVersion(serializer Decoder, gv GroupVersioner) Decoder
|
||||
}
|
||||
|
||||
// ClientNegotiator handles turning an HTTP content type into the appropriate encoder.
|
||||
// Use NewClientNegotiator or NewVersionedClientNegotiator to create this interface from
|
||||
// a NegotiatedSerializer.
|
||||
type ClientNegotiator interface {
|
||||
// Encoder returns the appropriate encoder for the provided contentType (e.g. application/json)
|
||||
// and any optional mediaType parameters (e.g. pretty=1), or an error. If no serializer is found
|
||||
// a NegotiateError will be returned. The current client implementations consider params to be
|
||||
// optional modifiers to the contentType and will ignore unrecognized parameters.
|
||||
Encoder(contentType string, params map[string]string) (Encoder, error)
|
||||
// Decoder returns the appropriate decoder for the provided contentType (e.g. application/json)
|
||||
// and any optional mediaType parameters (e.g. pretty=1), or an error. If no serializer is found
|
||||
// a NegotiateError will be returned. The current client implementations consider params to be
|
||||
// optional modifiers to the contentType and will ignore unrecognized parameters.
|
||||
Decoder(contentType string, params map[string]string) (Decoder, error)
|
||||
// StreamDecoder returns the appropriate stream decoder for the provided contentType (e.g.
|
||||
// application/json) and any optional mediaType parameters (e.g. pretty=1), or an error. If no
|
||||
// serializer is found a NegotiateError will be returned. The Serializer and Framer will always
|
||||
// be returned if a Decoder is returned. The current client implementations consider params to be
|
||||
// optional modifiers to the contentType and will ignore unrecognized parameters.
|
||||
StreamDecoder(contentType string, params map[string]string) (Decoder, Serializer, Framer, error)
|
||||
}
|
||||
|
||||
// StorageSerializer is an interface used for obtaining encoders, decoders, and serializers
|
||||
// that can read and write data at rest. This would commonly be used by client tools that must
|
||||
// read files, or server side storage interfaces that persist restful objects.
|
||||
|
|
@ -256,6 +301,27 @@ type Object interface {
|
|||
DeepCopyObject() Object
|
||||
}
|
||||
|
||||
// CacheableObject allows an object to cache its different serializations
|
||||
// to avoid performing the same serialization multiple times.
|
||||
type CacheableObject interface {
|
||||
// CacheEncode writes an object to a stream. The <encode> function will
|
||||
// be used in case of cache miss. The <encode> function takes ownership
|
||||
// of the object.
|
||||
// If CacheableObject is a wrapper, then deep-copy of the wrapped object
|
||||
// should be passed to <encode> function.
|
||||
// CacheEncode assumes that for two different calls with the same <id>,
|
||||
// <encode> function will also be the same.
|
||||
CacheEncode(id Identifier, encode func(Object, io.Writer) error, w io.Writer) error
|
||||
// GetObject returns a deep-copy of an object to be encoded - the caller of
|
||||
// GetObject() is the owner of returned object. The reason for making a copy
|
||||
// is to avoid bugs, where caller modifies the object and forgets to copy it,
|
||||
// thus modifying the object for everyone.
|
||||
// The object returned by GetObject should be the same as the one that is supposed
|
||||
// to be passed to <encode> function in CacheEncode method.
|
||||
// If CacheableObject is a wrapper, the copy of wrapped object should be returned.
|
||||
GetObject() Object
|
||||
}
|
||||
|
||||
// Unstructured objects store values as map[string]interface{}, with only values that can be serialized
|
||||
// to JSON allowed.
|
||||
type Unstructured interface {
|
||||
|
|
|
|||
|
|
@ -0,0 +1,146 @@
|
|||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
// NegotiateError is returned when a ClientNegotiator is unable to locate
|
||||
// a serializer for the requested operation.
|
||||
type NegotiateError struct {
|
||||
ContentType string
|
||||
Stream bool
|
||||
}
|
||||
|
||||
func (e NegotiateError) Error() string {
|
||||
if e.Stream {
|
||||
return fmt.Sprintf("no stream serializers registered for %s", e.ContentType)
|
||||
}
|
||||
return fmt.Sprintf("no serializers registered for %s", e.ContentType)
|
||||
}
|
||||
|
||||
type clientNegotiator struct {
|
||||
serializer NegotiatedSerializer
|
||||
encode, decode GroupVersioner
|
||||
}
|
||||
|
||||
func (n *clientNegotiator) Encoder(contentType string, params map[string]string) (Encoder, error) {
|
||||
// TODO: `pretty=1` is handled in NegotiateOutputMediaType, consider moving it to this method
|
||||
// if client negotiators truly need to use it
|
||||
mediaTypes := n.serializer.SupportedMediaTypes()
|
||||
info, ok := SerializerInfoForMediaType(mediaTypes, contentType)
|
||||
if !ok {
|
||||
if len(contentType) != 0 || len(mediaTypes) == 0 {
|
||||
return nil, NegotiateError{ContentType: contentType}
|
||||
}
|
||||
info = mediaTypes[0]
|
||||
}
|
||||
return n.serializer.EncoderForVersion(info.Serializer, n.encode), nil
|
||||
}
|
||||
|
||||
func (n *clientNegotiator) Decoder(contentType string, params map[string]string) (Decoder, error) {
|
||||
mediaTypes := n.serializer.SupportedMediaTypes()
|
||||
info, ok := SerializerInfoForMediaType(mediaTypes, contentType)
|
||||
if !ok {
|
||||
if len(contentType) != 0 || len(mediaTypes) == 0 {
|
||||
return nil, NegotiateError{ContentType: contentType}
|
||||
}
|
||||
info = mediaTypes[0]
|
||||
}
|
||||
return n.serializer.DecoderToVersion(info.Serializer, n.decode), nil
|
||||
}
|
||||
|
||||
func (n *clientNegotiator) StreamDecoder(contentType string, params map[string]string) (Decoder, Serializer, Framer, error) {
|
||||
mediaTypes := n.serializer.SupportedMediaTypes()
|
||||
info, ok := SerializerInfoForMediaType(mediaTypes, contentType)
|
||||
if !ok {
|
||||
if len(contentType) != 0 || len(mediaTypes) == 0 {
|
||||
return nil, nil, nil, NegotiateError{ContentType: contentType, Stream: true}
|
||||
}
|
||||
info = mediaTypes[0]
|
||||
}
|
||||
if info.StreamSerializer == nil {
|
||||
return nil, nil, nil, NegotiateError{ContentType: info.MediaType, Stream: true}
|
||||
}
|
||||
return n.serializer.DecoderToVersion(info.Serializer, n.decode), info.StreamSerializer.Serializer, info.StreamSerializer.Framer, nil
|
||||
}
|
||||
|
||||
// NewClientNegotiator will attempt to retrieve the appropriate encoder, decoder, or
|
||||
// stream decoder for a given content type. Does not perform any conversion, but will
|
||||
// encode the object to the desired group, version, and kind. Use when creating a client.
|
||||
func NewClientNegotiator(serializer NegotiatedSerializer, gv schema.GroupVersion) ClientNegotiator {
|
||||
return &clientNegotiator{
|
||||
serializer: serializer,
|
||||
encode: gv,
|
||||
}
|
||||
}
|
||||
|
||||
// NewInternalClientNegotiator applies the default client rules for connecting to a Kubernetes apiserver
|
||||
// where objects are converted to gv prior to sending and decoded to their internal representation prior
|
||||
// to retrieval.
|
||||
//
|
||||
// DEPRECATED: Internal clients are deprecated and will be removed in a future Kubernetes release.
|
||||
func NewInternalClientNegotiator(serializer NegotiatedSerializer, gv schema.GroupVersion) ClientNegotiator {
|
||||
decode := schema.GroupVersions{
|
||||
{
|
||||
Group: gv.Group,
|
||||
Version: APIVersionInternal,
|
||||
},
|
||||
// always include the legacy group as a decoding target to handle non-error `Status` return types
|
||||
{
|
||||
Group: "",
|
||||
Version: APIVersionInternal,
|
||||
},
|
||||
}
|
||||
return &clientNegotiator{
|
||||
encode: gv,
|
||||
decode: decode,
|
||||
serializer: serializer,
|
||||
}
|
||||
}
|
||||
|
||||
// NewSimpleClientNegotiator will negotiate for a single serializer. This should only be used
|
||||
// for testing or when the caller is taking responsibility for setting the GVK on encoded objects.
|
||||
func NewSimpleClientNegotiator(info SerializerInfo, gv schema.GroupVersion) ClientNegotiator {
|
||||
return &clientNegotiator{
|
||||
serializer: &simpleNegotiatedSerializer{info: info},
|
||||
encode: gv,
|
||||
}
|
||||
}
|
||||
|
||||
type simpleNegotiatedSerializer struct {
|
||||
info SerializerInfo
|
||||
}
|
||||
|
||||
func NewSimpleNegotiatedSerializer(info SerializerInfo) NegotiatedSerializer {
|
||||
return &simpleNegotiatedSerializer{info: info}
|
||||
}
|
||||
|
||||
func (n *simpleNegotiatedSerializer) SupportedMediaTypes() []SerializerInfo {
|
||||
return []SerializerInfo{n.info}
|
||||
}
|
||||
|
||||
func (n *simpleNegotiatedSerializer) EncoderForVersion(e Encoder, _ GroupVersioner) Encoder {
|
||||
return e
|
||||
}
|
||||
|
||||
func (n *simpleNegotiatedSerializer) DecoderToVersion(d Decoder, _gv GroupVersioner) Decoder {
|
||||
return d
|
||||
}
|
||||
|
|
@ -29,33 +29,3 @@ func (obj *TypeMeta) GroupVersionKind() schema.GroupVersionKind {
|
|||
}
|
||||
|
||||
func (obj *TypeMeta) GetObjectKind() schema.ObjectKind { return obj }
|
||||
|
||||
// GetObjectKind implements Object for VersionedObjects, returning an empty ObjectKind
|
||||
// interface if no objects are provided, or the ObjectKind interface of the object in the
|
||||
// highest array position.
|
||||
func (obj *VersionedObjects) GetObjectKind() schema.ObjectKind {
|
||||
last := obj.Last()
|
||||
if last == nil {
|
||||
return schema.EmptyObjectKind
|
||||
}
|
||||
return last.GetObjectKind()
|
||||
}
|
||||
|
||||
// First returns the leftmost object in the VersionedObjects array, which is usually the
|
||||
// object as serialized on the wire.
|
||||
func (obj *VersionedObjects) First() Object {
|
||||
if len(obj.Objects) == 0 {
|
||||
return nil
|
||||
}
|
||||
return obj.Objects[0]
|
||||
}
|
||||
|
||||
// Last is the rightmost object in the VersionedObjects array, which is the object after
|
||||
// all transformations have been applied. This is the same object that would be returned
|
||||
// by Decode in a normal invocation (without VersionedObjects in the into argument).
|
||||
func (obj *VersionedObjects) Last() Object {
|
||||
if len(obj.Objects) == 0 {
|
||||
return nil
|
||||
}
|
||||
return obj.Objects[len(obj.Objects)-1]
|
||||
}
|
||||
|
|
|
|||
|
|
@ -191,6 +191,11 @@ func (gv GroupVersion) String() string {
|
|||
return gv.Version
|
||||
}
|
||||
|
||||
// Identifier implements runtime.GroupVersioner interface.
|
||||
func (gv GroupVersion) Identifier() string {
|
||||
return gv.String()
|
||||
}
|
||||
|
||||
// KindForGroupVersionKinds identifies the preferred GroupVersionKind out of a list. It returns ok false
|
||||
// if none of the options match the group. It prefers a match to group and version over just group.
|
||||
// TODO: Move GroupVersion to a package under pkg/runtime, since it's used by scheme.
|
||||
|
|
@ -246,6 +251,15 @@ func (gv GroupVersion) WithResource(resource string) GroupVersionResource {
|
|||
// in fewer places.
|
||||
type GroupVersions []GroupVersion
|
||||
|
||||
// Identifier implements runtime.GroupVersioner interface.
|
||||
func (gv GroupVersions) Identifier() string {
|
||||
groupVersions := make([]string, 0, len(gv))
|
||||
for i := range gv {
|
||||
groupVersions = append(groupVersions, gv[i].String())
|
||||
}
|
||||
return fmt.Sprintf("[%s]", strings.Join(groupVersions, ","))
|
||||
}
|
||||
|
||||
// KindForGroupVersionKinds identifies the preferred GroupVersionKind out of a list. It returns ok false
|
||||
// if none of the options match the group.
|
||||
func (gvs GroupVersions) KindForGroupVersionKinds(kinds []GroupVersionKind) (GroupVersionKind, bool) {
|
||||
|
|
|
|||
|
|
@ -31,6 +31,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/runtime/serializer/recognizer"
|
||||
"k8s.io/apimachinery/pkg/util/framer"
|
||||
utilyaml "k8s.io/apimachinery/pkg/util/yaml"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
// NewSerializer creates a JSON serializer that handles encoding versioned objects into the proper JSON form. If typer
|
||||
|
|
@ -53,13 +54,28 @@ func NewYAMLSerializer(meta MetaFactory, creater runtime.ObjectCreater, typer ru
|
|||
// and are immutable.
|
||||
func NewSerializerWithOptions(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper, options SerializerOptions) *Serializer {
|
||||
return &Serializer{
|
||||
meta: meta,
|
||||
creater: creater,
|
||||
typer: typer,
|
||||
options: options,
|
||||
meta: meta,
|
||||
creater: creater,
|
||||
typer: typer,
|
||||
options: options,
|
||||
identifier: identifier(options),
|
||||
}
|
||||
}
|
||||
|
||||
// identifier computes Identifier of Encoder based on the given options.
|
||||
func identifier(options SerializerOptions) runtime.Identifier {
|
||||
result := map[string]string{
|
||||
"name": "json",
|
||||
"yaml": strconv.FormatBool(options.Yaml),
|
||||
"pretty": strconv.FormatBool(options.Pretty),
|
||||
}
|
||||
identifier, err := json.Marshal(result)
|
||||
if err != nil {
|
||||
klog.Fatalf("Failed marshaling identifier for json Serializer: %v", err)
|
||||
}
|
||||
return runtime.Identifier(identifier)
|
||||
}
|
||||
|
||||
// SerializerOptions holds the options which are used to configure a JSON/YAML serializer.
|
||||
// example:
|
||||
// (1) To configure a JSON serializer, set `Yaml` to `false`.
|
||||
|
|
@ -85,6 +101,8 @@ type Serializer struct {
|
|||
options SerializerOptions
|
||||
creater runtime.ObjectCreater
|
||||
typer runtime.ObjectTyper
|
||||
|
||||
identifier runtime.Identifier
|
||||
}
|
||||
|
||||
// Serializer implements Serializer
|
||||
|
|
@ -122,27 +140,7 @@ func (customNumberDecoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) {
|
|||
}
|
||||
iter.ReportError("DecodeNumber", err.Error())
|
||||
default:
|
||||
// init depth, if needed
|
||||
if iter.Attachment == nil {
|
||||
iter.Attachment = int(1)
|
||||
}
|
||||
|
||||
// remember current depth
|
||||
originalAttachment := iter.Attachment
|
||||
|
||||
// increment depth before descending
|
||||
if i, ok := iter.Attachment.(int); ok {
|
||||
iter.Attachment = i + 1
|
||||
if i > 10000 {
|
||||
iter.ReportError("parse", "exceeded max depth")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
*(*interface{})(ptr) = iter.Read()
|
||||
|
||||
// restore current depth
|
||||
iter.Attachment = originalAttachment
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -208,16 +206,6 @@ func gvkWithDefaults(actual, defaultGVK schema.GroupVersionKind) schema.GroupVer
|
|||
// On success or most errors, the method will return the calculated schema kind.
|
||||
// The gvk calculate priority will be originalData > default gvk > into
|
||||
func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) {
|
||||
if versioned, ok := into.(*runtime.VersionedObjects); ok {
|
||||
into = versioned.Last()
|
||||
obj, actual, err := s.Decode(originalData, gvk, into)
|
||||
if err != nil {
|
||||
return nil, actual, err
|
||||
}
|
||||
versioned.Objects = []runtime.Object{obj}
|
||||
return versioned, actual, nil
|
||||
}
|
||||
|
||||
data := originalData
|
||||
if s.options.Yaml {
|
||||
altered, err := yaml.YAMLToJSON(data)
|
||||
|
|
@ -306,6 +294,13 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i
|
|||
|
||||
// Encode serializes the provided object to the given writer.
|
||||
func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error {
|
||||
if co, ok := obj.(runtime.CacheableObject); ok {
|
||||
return co.CacheEncode(s.Identifier(), s.doEncode, w)
|
||||
}
|
||||
return s.doEncode(obj, w)
|
||||
}
|
||||
|
||||
func (s *Serializer) doEncode(obj runtime.Object, w io.Writer) error {
|
||||
if s.options.Yaml {
|
||||
json, err := caseSensitiveJsonIterator.Marshal(obj)
|
||||
if err != nil {
|
||||
|
|
@ -331,6 +326,11 @@ func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error {
|
|||
return encoder.Encode(obj)
|
||||
}
|
||||
|
||||
// Identifier implements runtime.Encoder interface.
|
||||
func (s *Serializer) Identifier() runtime.Identifier {
|
||||
return s.identifier
|
||||
}
|
||||
|
||||
// RecognizesData implements the RecognizingDecoder interface.
|
||||
func (s *Serializer) RecognizesData(peek io.Reader) (ok, unknown bool, err error) {
|
||||
if s.options.Yaml {
|
||||
|
|
|
|||
|
|
@ -86,6 +86,8 @@ type Serializer struct {
|
|||
var _ runtime.Serializer = &Serializer{}
|
||||
var _ recognizer.RecognizingDecoder = &Serializer{}
|
||||
|
||||
const serializerIdentifier runtime.Identifier = "protobuf"
|
||||
|
||||
// Decode attempts to convert the provided data into a protobuf message, extract the stored schema kind, apply the provided default
|
||||
// gvk, and then load that data into an object matching the desired schema kind or the provided into. If into is *runtime.Unknown,
|
||||
// the raw data will be extracted and no decoding will be performed. If into is not registered with the typer, then the object will
|
||||
|
|
@ -93,23 +95,6 @@ var _ recognizer.RecognizingDecoder = &Serializer{}
|
|||
// not fully qualified with kind/version/group, the type of the into will be used to alter the returned gvk. On success or most
|
||||
// errors, the method will return the calculated schema kind.
|
||||
func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) {
|
||||
if versioned, ok := into.(*runtime.VersionedObjects); ok {
|
||||
into = versioned.Last()
|
||||
obj, actual, err := s.Decode(originalData, gvk, into)
|
||||
if err != nil {
|
||||
return nil, actual, err
|
||||
}
|
||||
// the last item in versioned becomes into, so if versioned was not originally empty we reset the object
|
||||
// array so the first position is the decoded object and the second position is the outermost object.
|
||||
// if there were no objects in the versioned list passed to us, only add ourselves.
|
||||
if into != nil && into != obj {
|
||||
versioned.Objects = []runtime.Object{obj, into}
|
||||
} else {
|
||||
versioned.Objects = []runtime.Object{obj}
|
||||
}
|
||||
return versioned, actual, err
|
||||
}
|
||||
|
||||
prefixLen := len(s.prefix)
|
||||
switch {
|
||||
case len(originalData) == 0:
|
||||
|
|
@ -176,6 +161,13 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i
|
|||
|
||||
// Encode serializes the provided object to the given writer.
|
||||
func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error {
|
||||
if co, ok := obj.(runtime.CacheableObject); ok {
|
||||
return co.CacheEncode(s.Identifier(), s.doEncode, w)
|
||||
}
|
||||
return s.doEncode(obj, w)
|
||||
}
|
||||
|
||||
func (s *Serializer) doEncode(obj runtime.Object, w io.Writer) error {
|
||||
prefixSize := uint64(len(s.prefix))
|
||||
|
||||
var unk runtime.Unknown
|
||||
|
|
@ -245,6 +237,11 @@ func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error {
|
|||
}
|
||||
}
|
||||
|
||||
// Identifier implements runtime.Encoder interface.
|
||||
func (s *Serializer) Identifier() runtime.Identifier {
|
||||
return serializerIdentifier
|
||||
}
|
||||
|
||||
// RecognizesData implements the RecognizingDecoder interface.
|
||||
func (s *Serializer) RecognizesData(peek io.Reader) (bool, bool, error) {
|
||||
prefix := make([]byte, 4)
|
||||
|
|
@ -321,6 +318,8 @@ type RawSerializer struct {
|
|||
|
||||
var _ runtime.Serializer = &RawSerializer{}
|
||||
|
||||
const rawSerializerIdentifier runtime.Identifier = "raw-protobuf"
|
||||
|
||||
// Decode attempts to convert the provided data into a protobuf message, extract the stored schema kind, apply the provided default
|
||||
// gvk, and then load that data into an object matching the desired schema kind or the provided into. If into is *runtime.Unknown,
|
||||
// the raw data will be extracted and no decoding will be performed. If into is not registered with the typer, then the object will
|
||||
|
|
@ -332,20 +331,6 @@ func (s *RawSerializer) Decode(originalData []byte, gvk *schema.GroupVersionKind
|
|||
return nil, nil, fmt.Errorf("this serializer requires an object to decode into: %#v", s)
|
||||
}
|
||||
|
||||
if versioned, ok := into.(*runtime.VersionedObjects); ok {
|
||||
into = versioned.Last()
|
||||
obj, actual, err := s.Decode(originalData, gvk, into)
|
||||
if err != nil {
|
||||
return nil, actual, err
|
||||
}
|
||||
if into != nil && into != obj {
|
||||
versioned.Objects = []runtime.Object{obj, into}
|
||||
} else {
|
||||
versioned.Objects = []runtime.Object{obj}
|
||||
}
|
||||
return versioned, actual, err
|
||||
}
|
||||
|
||||
if len(originalData) == 0 {
|
||||
// TODO: treat like decoding {} from JSON with defaulting
|
||||
return nil, nil, fmt.Errorf("empty data")
|
||||
|
|
@ -419,6 +404,13 @@ func unmarshalToObject(typer runtime.ObjectTyper, creater runtime.ObjectCreater,
|
|||
|
||||
// Encode serializes the provided object to the given writer. Overrides is ignored.
|
||||
func (s *RawSerializer) Encode(obj runtime.Object, w io.Writer) error {
|
||||
if co, ok := obj.(runtime.CacheableObject); ok {
|
||||
return co.CacheEncode(s.Identifier(), s.doEncode, w)
|
||||
}
|
||||
return s.doEncode(obj, w)
|
||||
}
|
||||
|
||||
func (s *RawSerializer) doEncode(obj runtime.Object, w io.Writer) error {
|
||||
switch t := obj.(type) {
|
||||
case bufferedReverseMarshaller:
|
||||
// this path performs a single allocation during write but requires the caller to implement
|
||||
|
|
@ -460,6 +452,11 @@ func (s *RawSerializer) Encode(obj runtime.Object, w io.Writer) error {
|
|||
}
|
||||
}
|
||||
|
||||
// Identifier implements runtime.Encoder interface.
|
||||
func (s *RawSerializer) Identifier() runtime.Identifier {
|
||||
return rawSerializerIdentifier
|
||||
}
|
||||
|
||||
var LengthDelimitedFramer = lengthDelimitedFramer{}
|
||||
|
||||
type lengthDelimitedFramer struct{}
|
||||
|
|
|
|||
|
|
@ -17,12 +17,15 @@ limitations under the License.
|
|||
package versioning
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"reflect"
|
||||
"sync"
|
||||
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
// NewDefaultingCodecForScheme is a convenience method for callers that are using a scheme.
|
||||
|
|
@ -62,6 +65,8 @@ func NewCodec(
|
|||
encodeVersion: encodeVersion,
|
||||
decodeVersion: decodeVersion,
|
||||
|
||||
identifier: identifier(encodeVersion, encoder),
|
||||
|
||||
originalSchemeName: originalSchemeName,
|
||||
}
|
||||
return internal
|
||||
|
|
@ -78,19 +83,47 @@ type codec struct {
|
|||
encodeVersion runtime.GroupVersioner
|
||||
decodeVersion runtime.GroupVersioner
|
||||
|
||||
identifier runtime.Identifier
|
||||
|
||||
// originalSchemeName is optional, but when filled in it holds the name of the scheme from which this codec originates
|
||||
originalSchemeName string
|
||||
}
|
||||
|
||||
var identifiersMap sync.Map
|
||||
|
||||
type codecIdentifier struct {
|
||||
EncodeGV string `json:"encodeGV,omitempty"`
|
||||
Encoder string `json:"encoder,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
}
|
||||
|
||||
// identifier computes Identifier of Encoder based on codec parameters.
|
||||
func identifier(encodeGV runtime.GroupVersioner, encoder runtime.Encoder) runtime.Identifier {
|
||||
result := codecIdentifier{
|
||||
Name: "versioning",
|
||||
}
|
||||
|
||||
if encodeGV != nil {
|
||||
result.EncodeGV = encodeGV.Identifier()
|
||||
}
|
||||
if encoder != nil {
|
||||
result.Encoder = string(encoder.Identifier())
|
||||
}
|
||||
if id, ok := identifiersMap.Load(result); ok {
|
||||
return id.(runtime.Identifier)
|
||||
}
|
||||
identifier, err := json.Marshal(result)
|
||||
if err != nil {
|
||||
klog.Fatalf("Failed marshaling identifier for codec: %v", err)
|
||||
}
|
||||
identifiersMap.Store(result, runtime.Identifier(identifier))
|
||||
return runtime.Identifier(identifier)
|
||||
}
|
||||
|
||||
// Decode attempts a decode of the object, then tries to convert it to the internal version. If into is provided and the decoding is
|
||||
// successful, the returned runtime.Object will be the value passed as into. Note that this may bypass conversion if you pass an
|
||||
// into that matches the serialized version.
|
||||
func (c *codec) Decode(data []byte, defaultGVK *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) {
|
||||
versioned, isVersioned := into.(*runtime.VersionedObjects)
|
||||
if isVersioned {
|
||||
into = versioned.Last()
|
||||
}
|
||||
|
||||
// If the into object is unstructured and expresses an opinion about its group/version,
|
||||
// create a new instance of the type so we always exercise the conversion path (skips short-circuiting on `into == obj`)
|
||||
decodeInto := into
|
||||
|
|
@ -115,22 +148,11 @@ func (c *codec) Decode(data []byte, defaultGVK *schema.GroupVersionKind, into ru
|
|||
if into != nil {
|
||||
// perform defaulting if requested
|
||||
if c.defaulter != nil {
|
||||
// create a copy to ensure defaulting is not applied to the original versioned objects
|
||||
if isVersioned {
|
||||
versioned.Objects = []runtime.Object{obj.DeepCopyObject()}
|
||||
}
|
||||
c.defaulter.Default(obj)
|
||||
} else {
|
||||
if isVersioned {
|
||||
versioned.Objects = []runtime.Object{obj}
|
||||
}
|
||||
}
|
||||
|
||||
// Short-circuit conversion if the into object is same object
|
||||
if into == obj {
|
||||
if isVersioned {
|
||||
return versioned, gvk, nil
|
||||
}
|
||||
return into, gvk, nil
|
||||
}
|
||||
|
||||
|
|
@ -138,19 +160,9 @@ func (c *codec) Decode(data []byte, defaultGVK *schema.GroupVersionKind, into ru
|
|||
return nil, gvk, err
|
||||
}
|
||||
|
||||
if isVersioned {
|
||||
versioned.Objects = append(versioned.Objects, into)
|
||||
return versioned, gvk, nil
|
||||
}
|
||||
return into, gvk, nil
|
||||
}
|
||||
|
||||
// Convert if needed.
|
||||
if isVersioned {
|
||||
// create a copy, because ConvertToVersion does not guarantee non-mutation of objects
|
||||
versioned.Objects = []runtime.Object{obj.DeepCopyObject()}
|
||||
}
|
||||
|
||||
// perform defaulting if requested
|
||||
if c.defaulter != nil {
|
||||
c.defaulter.Default(obj)
|
||||
|
|
@ -160,18 +172,19 @@ func (c *codec) Decode(data []byte, defaultGVK *schema.GroupVersionKind, into ru
|
|||
if err != nil {
|
||||
return nil, gvk, err
|
||||
}
|
||||
if isVersioned {
|
||||
if versioned.Last() != out {
|
||||
versioned.Objects = append(versioned.Objects, out)
|
||||
}
|
||||
return versioned, gvk, nil
|
||||
}
|
||||
return out, gvk, nil
|
||||
}
|
||||
|
||||
// Encode ensures the provided object is output in the appropriate group and version, invoking
|
||||
// conversion if necessary. Unversioned objects (according to the ObjectTyper) are output as is.
|
||||
func (c *codec) Encode(obj runtime.Object, w io.Writer) error {
|
||||
if co, ok := obj.(runtime.CacheableObject); ok {
|
||||
return co.CacheEncode(c.Identifier(), c.doEncode, w)
|
||||
}
|
||||
return c.doEncode(obj, w)
|
||||
}
|
||||
|
||||
func (c *codec) doEncode(obj runtime.Object, w io.Writer) error {
|
||||
switch obj := obj.(type) {
|
||||
case *runtime.Unknown:
|
||||
return c.encoder.Encode(obj, w)
|
||||
|
|
@ -230,3 +243,8 @@ func (c *codec) Encode(obj runtime.Object, w io.Writer) error {
|
|||
// Conversion is responsible for setting the proper group, version, and kind onto the outgoing object
|
||||
return c.encoder.Encode(out, w)
|
||||
}
|
||||
|
||||
// Identifier implements runtime.Encoder interface.
|
||||
func (c *codec) Identifier() runtime.Identifier {
|
||||
return c.identifier
|
||||
}
|
||||
|
|
|
|||
|
|
@ -124,16 +124,3 @@ type Unknown struct {
|
|||
// Unspecified means ContentTypeJSON.
|
||||
ContentType string `protobuf:"bytes,4,opt,name=contentType"`
|
||||
}
|
||||
|
||||
// VersionedObjects is used by Decoders to give callers a way to access all versions
|
||||
// of an object during the decoding process.
|
||||
//
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:deepcopy-gen=true
|
||||
type VersionedObjects struct {
|
||||
// Objects is the set of objects retrieved during decoding, in order of conversion.
|
||||
// The 0 index is the object as serialized on the wire. If conversion has occurred,
|
||||
// other objects may be present. The right most object is the same as would be returned
|
||||
// by a normal Decode call.
|
||||
Objects []Object
|
||||
}
|
||||
|
|
|
|||
|
|
@ -73,36 +73,3 @@ func (in *Unknown) DeepCopyObject() Object {
|
|||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *VersionedObjects) DeepCopyInto(out *VersionedObjects) {
|
||||
*out = *in
|
||||
if in.Objects != nil {
|
||||
in, out := &in.Objects, &out.Objects
|
||||
*out = make([]Object, len(*in))
|
||||
for i := range *in {
|
||||
if (*in)[i] != nil {
|
||||
(*out)[i] = (*in)[i].DeepCopyObject()
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VersionedObjects.
|
||||
func (in *VersionedObjects) DeepCopy() *VersionedObjects {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(VersionedObjects)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new Object.
|
||||
func (in *VersionedObjects) DeepCopyObject() Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,83 +0,0 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
const (
|
||||
shardsCount int = 32
|
||||
)
|
||||
|
||||
type Cache []*cacheShard
|
||||
|
||||
func NewCache(maxSize int) Cache {
|
||||
if maxSize < shardsCount {
|
||||
maxSize = shardsCount
|
||||
}
|
||||
cache := make(Cache, shardsCount)
|
||||
for i := 0; i < shardsCount; i++ {
|
||||
cache[i] = &cacheShard{
|
||||
items: make(map[uint64]interface{}),
|
||||
maxSize: maxSize / shardsCount,
|
||||
}
|
||||
}
|
||||
return cache
|
||||
}
|
||||
|
||||
func (c Cache) getShard(index uint64) *cacheShard {
|
||||
return c[index%uint64(shardsCount)]
|
||||
}
|
||||
|
||||
// Returns true if object already existed, false otherwise.
|
||||
func (c *Cache) Add(index uint64, obj interface{}) bool {
|
||||
return c.getShard(index).add(index, obj)
|
||||
}
|
||||
|
||||
func (c *Cache) Get(index uint64) (obj interface{}, found bool) {
|
||||
return c.getShard(index).get(index)
|
||||
}
|
||||
|
||||
type cacheShard struct {
|
||||
items map[uint64]interface{}
|
||||
sync.RWMutex
|
||||
maxSize int
|
||||
}
|
||||
|
||||
// Returns true if object already existed, false otherwise.
|
||||
func (s *cacheShard) add(index uint64, obj interface{}) bool {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
_, isOverwrite := s.items[index]
|
||||
if !isOverwrite && len(s.items) >= s.maxSize {
|
||||
var randomKey uint64
|
||||
for randomKey = range s.items {
|
||||
break
|
||||
}
|
||||
delete(s.items, randomKey)
|
||||
}
|
||||
s.items[index] = obj
|
||||
return isOverwrite
|
||||
}
|
||||
|
||||
func (s *cacheShard) get(index uint64) (obj interface{}, found bool) {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
obj, found = s.items[index]
|
||||
return
|
||||
}
|
||||
|
|
@ -0,0 +1,192 @@
|
|||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"container/heap"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
utilclock "k8s.io/apimachinery/pkg/util/clock"
|
||||
)
|
||||
|
||||
// NewExpiring returns an initialized expiring cache.
|
||||
func NewExpiring() *Expiring {
|
||||
return NewExpiringWithClock(utilclock.RealClock{})
|
||||
}
|
||||
|
||||
// NewExpiringWithClock is like NewExpiring but allows passing in a custom
|
||||
// clock for testing.
|
||||
func NewExpiringWithClock(clock utilclock.Clock) *Expiring {
|
||||
return &Expiring{
|
||||
clock: clock,
|
||||
cache: make(map[interface{}]entry),
|
||||
}
|
||||
}
|
||||
|
||||
// Expiring is a map whose entries expire after a per-entry timeout.
|
||||
type Expiring struct {
|
||||
clock utilclock.Clock
|
||||
|
||||
// mu protects the below fields
|
||||
mu sync.RWMutex
|
||||
// cache is the internal map that backs the cache.
|
||||
cache map[interface{}]entry
|
||||
// generation is used as a cheap resource version for cache entries. Cleanups
|
||||
// are scheduled with a key and generation. When the cleanup runs, it first
|
||||
// compares its generation with the current generation of the entry. It
|
||||
// deletes the entry iff the generation matches. This prevents cleanups
|
||||
// scheduled for earlier versions of an entry from deleting later versions of
|
||||
// an entry when Set() is called multiple times with the same key.
|
||||
//
|
||||
// The integer value of the generation of an entry is meaningless.
|
||||
generation uint64
|
||||
|
||||
heap expiringHeap
|
||||
}
|
||||
|
||||
type entry struct {
|
||||
val interface{}
|
||||
expiry time.Time
|
||||
generation uint64
|
||||
}
|
||||
|
||||
// Get looks up an entry in the cache.
|
||||
func (c *Expiring) Get(key interface{}) (val interface{}, ok bool) {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
e, ok := c.cache[key]
|
||||
if !ok || !c.clock.Now().Before(e.expiry) {
|
||||
return nil, false
|
||||
}
|
||||
return e.val, true
|
||||
}
|
||||
|
||||
// Set sets a key/value/expiry entry in the map, overwriting any previous entry
|
||||
// with the same key. The entry expires at the given expiry time, but its TTL
|
||||
// may be lengthened or shortened by additional calls to Set(). Garbage
|
||||
// collection of expired entries occurs during calls to Set(), however calls to
|
||||
// Get() will not return expired entries that have not yet been garbage
|
||||
// collected.
|
||||
func (c *Expiring) Set(key interface{}, val interface{}, ttl time.Duration) {
|
||||
now := c.clock.Now()
|
||||
expiry := now.Add(ttl)
|
||||
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
c.generation++
|
||||
|
||||
c.cache[key] = entry{
|
||||
val: val,
|
||||
expiry: expiry,
|
||||
generation: c.generation,
|
||||
}
|
||||
|
||||
// Run GC inline before pushing the new entry.
|
||||
c.gc(now)
|
||||
|
||||
heap.Push(&c.heap, &expiringHeapEntry{
|
||||
key: key,
|
||||
expiry: expiry,
|
||||
generation: c.generation,
|
||||
})
|
||||
}
|
||||
|
||||
// Delete deletes an entry in the map.
|
||||
func (c *Expiring) Delete(key interface{}) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
c.del(key, 0)
|
||||
}
|
||||
|
||||
// del deletes the entry for the given key. The generation argument is the
|
||||
// generation of the entry that should be deleted. If the generation has been
|
||||
// changed (e.g. if a set has occurred on an existing element but the old
|
||||
// cleanup still runs), this is a noop. If the generation argument is 0, the
|
||||
// entry's generation is ignored and the entry is deleted.
|
||||
//
|
||||
// del must be called under the write lock.
|
||||
func (c *Expiring) del(key interface{}, generation uint64) {
|
||||
e, ok := c.cache[key]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if generation != 0 && generation != e.generation {
|
||||
return
|
||||
}
|
||||
delete(c.cache, key)
|
||||
}
|
||||
|
||||
// Len returns the number of items in the cache.
|
||||
func (c *Expiring) Len() int {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
return len(c.cache)
|
||||
}
|
||||
|
||||
func (c *Expiring) gc(now time.Time) {
|
||||
for {
|
||||
// Return from gc if the heap is empty or the next element is not yet
|
||||
// expired.
|
||||
//
|
||||
// heap[0] is a peek at the next element in the heap, which is not obvious
|
||||
// from looking at the (*expiringHeap).Pop() implmentation below.
|
||||
// heap.Pop() swaps the first entry with the last entry of the heap, then
|
||||
// calls (*expiringHeap).Pop() which returns the last element.
|
||||
if len(c.heap) == 0 || now.Before(c.heap[0].expiry) {
|
||||
return
|
||||
}
|
||||
cleanup := heap.Pop(&c.heap).(*expiringHeapEntry)
|
||||
c.del(cleanup.key, cleanup.generation)
|
||||
}
|
||||
}
|
||||
|
||||
type expiringHeapEntry struct {
|
||||
key interface{}
|
||||
expiry time.Time
|
||||
generation uint64
|
||||
}
|
||||
|
||||
// expiringHeap is a min-heap ordered by expiration time of its entries. The
|
||||
// expiring cache uses this as a priority queue to efficiently organize entries
|
||||
// which will be garbage collected once they expire.
|
||||
type expiringHeap []*expiringHeapEntry
|
||||
|
||||
var _ heap.Interface = &expiringHeap{}
|
||||
|
||||
func (cq expiringHeap) Len() int {
|
||||
return len(cq)
|
||||
}
|
||||
|
||||
func (cq expiringHeap) Less(i, j int) bool {
|
||||
return cq[i].expiry.Before(cq[j].expiry)
|
||||
}
|
||||
|
||||
func (cq expiringHeap) Swap(i, j int) {
|
||||
cq[i], cq[j] = cq[j], cq[i]
|
||||
}
|
||||
|
||||
func (cq *expiringHeap) Push(c interface{}) {
|
||||
*cq = append(*cq, c.(*expiringHeapEntry))
|
||||
}
|
||||
|
||||
func (cq *expiringHeap) Pop() interface{} {
|
||||
c := (*cq)[cq.Len()-1]
|
||||
*cq = (*cq)[:cq.Len()-1]
|
||||
return c
|
||||
}
|
||||
|
|
@ -19,6 +19,7 @@ package diff
|
|||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
|
||||
|
|
@ -116,3 +117,41 @@ func ObjectGoPrintSideBySide(a, b interface{}) string {
|
|||
w.Flush()
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// IgnoreUnset is an option that ignores fields that are unset on the right
|
||||
// hand side of a comparison. This is useful in testing to assert that an
|
||||
// object is a derivative.
|
||||
func IgnoreUnset() cmp.Option {
|
||||
return cmp.Options{
|
||||
// ignore unset fields in v2
|
||||
cmp.FilterPath(func(path cmp.Path) bool {
|
||||
_, v2 := path.Last().Values()
|
||||
switch v2.Kind() {
|
||||
case reflect.Slice, reflect.Map:
|
||||
if v2.IsNil() || v2.Len() == 0 {
|
||||
return true
|
||||
}
|
||||
case reflect.String:
|
||||
if v2.Len() == 0 {
|
||||
return true
|
||||
}
|
||||
case reflect.Interface, reflect.Ptr:
|
||||
if v2.IsNil() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}, cmp.Ignore()),
|
||||
// ignore map entries that aren't set in v2
|
||||
cmp.FilterPath(func(path cmp.Path) bool {
|
||||
switch i := path.Last().(type) {
|
||||
case cmp.MapIndex:
|
||||
if _, v2 := i.Values(); !v2.IsValid() {
|
||||
fmt.Println("E")
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}, cmp.Ignore()),
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -66,11 +66,36 @@ func Unmarshal(data []byte, v interface{}) error {
|
|||
// If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64
|
||||
return convertSliceNumbers(*v, 0)
|
||||
|
||||
case *interface{}:
|
||||
// Build a decoder from the given data
|
||||
decoder := json.NewDecoder(bytes.NewBuffer(data))
|
||||
// Preserve numbers, rather than casting to float64 automatically
|
||||
decoder.UseNumber()
|
||||
// Run the decode
|
||||
if err := decoder.Decode(v); err != nil {
|
||||
return err
|
||||
}
|
||||
// If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64
|
||||
return convertInterfaceNumbers(v, 0)
|
||||
|
||||
default:
|
||||
return json.Unmarshal(data, v)
|
||||
}
|
||||
}
|
||||
|
||||
func convertInterfaceNumbers(v *interface{}, depth int) error {
|
||||
var err error
|
||||
switch v2 := (*v).(type) {
|
||||
case json.Number:
|
||||
*v, err = convertNumber(v2)
|
||||
case map[string]interface{}:
|
||||
err = convertMapNumbers(v2, depth+1)
|
||||
case []interface{}:
|
||||
err = convertSliceNumbers(v2, depth+1)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// convertMapNumbers traverses the map, converting any json.Number values to int64 or float64.
|
||||
// values which are map[string]interface{} or []interface{} are recursively visited
|
||||
func convertMapNumbers(m map[string]interface{}, depth int) error {
|
||||
|
|
|
|||
|
|
@ -82,7 +82,7 @@ var stackCreator = regexp.MustCompile(`(?m)^created by (.*)\n\s+(.*):(\d+) \+0x[
|
|||
func extractStackCreator() (string, int, bool) {
|
||||
stack := debug.Stack()
|
||||
matches := stackCreator.FindStringSubmatch(string(stack))
|
||||
if matches == nil || len(matches) != 4 {
|
||||
if len(matches) != 4 {
|
||||
return "", 0, false
|
||||
}
|
||||
line, err := strconv.Atoi(matches[3])
|
||||
|
|
|
|||
|
|
@ -55,6 +55,12 @@ func JoinPreservingTrailingSlash(elem ...string) string {
|
|||
return result
|
||||
}
|
||||
|
||||
// IsTimeout returns true if the given error is a network timeout error
|
||||
func IsTimeout(err error) bool {
|
||||
neterr, ok := err.(net.Error)
|
||||
return ok && neterr != nil && neterr.Timeout()
|
||||
}
|
||||
|
||||
// IsProbableEOF returns true if the given error resembles a connection termination
|
||||
// scenario that would justify assuming that the watch is empty.
|
||||
// These errors are what the Go http stack returns back to us which are general
|
||||
|
|
|
|||
|
|
@ -36,6 +36,18 @@ const (
|
|||
familyIPv6 AddressFamily = 6
|
||||
)
|
||||
|
||||
type AddressFamilyPreference []AddressFamily
|
||||
|
||||
var (
|
||||
preferIPv4 = AddressFamilyPreference{familyIPv4, familyIPv6}
|
||||
preferIPv6 = AddressFamilyPreference{familyIPv6, familyIPv4}
|
||||
)
|
||||
|
||||
const (
|
||||
// LoopbackInterfaceName is the default name of the loopback interface
|
||||
LoopbackInterfaceName = "lo"
|
||||
)
|
||||
|
||||
const (
|
||||
ipv4RouteFile = "/proc/net/route"
|
||||
ipv6RouteFile = "/proc/net/ipv6_route"
|
||||
|
|
@ -53,7 +65,7 @@ type RouteFile struct {
|
|||
parse func(input io.Reader) ([]Route, error)
|
||||
}
|
||||
|
||||
// noRoutesError can be returned by ChooseBindAddress() in case of no routes
|
||||
// noRoutesError can be returned in case of no routes
|
||||
type noRoutesError struct {
|
||||
message string
|
||||
}
|
||||
|
|
@ -254,7 +266,7 @@ func getIPFromInterface(intfName string, forFamily AddressFamily, nw networkInte
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
// memberOF tells if the IP is of the desired family. Used for checking interface addresses.
|
||||
// memberOf tells if the IP is of the desired family. Used for checking interface addresses.
|
||||
func memberOf(ip net.IP, family AddressFamily) bool {
|
||||
if ip.To4() != nil {
|
||||
return family == familyIPv4
|
||||
|
|
@ -265,8 +277,8 @@ func memberOf(ip net.IP, family AddressFamily) bool {
|
|||
|
||||
// chooseIPFromHostInterfaces looks at all system interfaces, trying to find one that is up that
|
||||
// has a global unicast address (non-loopback, non-link local, non-point2point), and returns the IP.
|
||||
// Searches for IPv4 addresses, and then IPv6 addresses.
|
||||
func chooseIPFromHostInterfaces(nw networkInterfacer) (net.IP, error) {
|
||||
// addressFamilies determines whether it prefers IPv4 or IPv6
|
||||
func chooseIPFromHostInterfaces(nw networkInterfacer, addressFamilies AddressFamilyPreference) (net.IP, error) {
|
||||
intfs, err := nw.Interfaces()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -274,7 +286,7 @@ func chooseIPFromHostInterfaces(nw networkInterfacer) (net.IP, error) {
|
|||
if len(intfs) == 0 {
|
||||
return nil, fmt.Errorf("no interfaces found on host.")
|
||||
}
|
||||
for _, family := range []AddressFamily{familyIPv4, familyIPv6} {
|
||||
for _, family := range addressFamilies {
|
||||
klog.V(4).Infof("Looking for system interface with a global IPv%d address", uint(family))
|
||||
for _, intf := range intfs {
|
||||
if !isInterfaceUp(&intf) {
|
||||
|
|
@ -321,15 +333,19 @@ func chooseIPFromHostInterfaces(nw networkInterfacer) (net.IP, error) {
|
|||
// IP of the interface with a gateway on it (with priority given to IPv4). For a node
|
||||
// with no internet connection, it returns error.
|
||||
func ChooseHostInterface() (net.IP, error) {
|
||||
return chooseHostInterface(preferIPv4)
|
||||
}
|
||||
|
||||
func chooseHostInterface(addressFamilies AddressFamilyPreference) (net.IP, error) {
|
||||
var nw networkInterfacer = networkInterface{}
|
||||
if _, err := os.Stat(ipv4RouteFile); os.IsNotExist(err) {
|
||||
return chooseIPFromHostInterfaces(nw)
|
||||
return chooseIPFromHostInterfaces(nw, addressFamilies)
|
||||
}
|
||||
routes, err := getAllDefaultRoutes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return chooseHostInterfaceFromRoute(routes, nw)
|
||||
return chooseHostInterfaceFromRoute(routes, nw, addressFamilies)
|
||||
}
|
||||
|
||||
// networkInterfacer defines an interface for several net library functions. Production
|
||||
|
|
@ -377,10 +393,10 @@ func getAllDefaultRoutes() ([]Route, error) {
|
|||
}
|
||||
|
||||
// chooseHostInterfaceFromRoute cycles through each default route provided, looking for a
|
||||
// global IP address from the interface for the route. Will first look all each IPv4 route for
|
||||
// an IPv4 IP, and then will look at each IPv6 route for an IPv6 IP.
|
||||
func chooseHostInterfaceFromRoute(routes []Route, nw networkInterfacer) (net.IP, error) {
|
||||
for _, family := range []AddressFamily{familyIPv4, familyIPv6} {
|
||||
// global IP address from the interface for the route. addressFamilies determines whether it
|
||||
// prefers IPv4 or IPv6
|
||||
func chooseHostInterfaceFromRoute(routes []Route, nw networkInterfacer, addressFamilies AddressFamilyPreference) (net.IP, error) {
|
||||
for _, family := range addressFamilies {
|
||||
klog.V(4).Infof("Looking for default routes with IPv%d addresses", uint(family))
|
||||
for _, route := range routes {
|
||||
if route.Family != family {
|
||||
|
|
@ -401,12 +417,19 @@ func chooseHostInterfaceFromRoute(routes []Route, nw networkInterfacer) (net.IP,
|
|||
return nil, fmt.Errorf("unable to select an IP from default routes.")
|
||||
}
|
||||
|
||||
// If bind-address is usable, return it directly
|
||||
// If bind-address is not usable (unset, 0.0.0.0, or loopback), we will use the host's default
|
||||
// interface.
|
||||
func ChooseBindAddress(bindAddress net.IP) (net.IP, error) {
|
||||
// ResolveBindAddress returns the IP address of a daemon, based on the given bindAddress:
|
||||
// If bindAddress is unset, it returns the host's default IP, as with ChooseHostInterface().
|
||||
// If bindAddress is unspecified or loopback, it returns the default IP of the same
|
||||
// address family as bindAddress.
|
||||
// Otherwise, it just returns bindAddress.
|
||||
func ResolveBindAddress(bindAddress net.IP) (net.IP, error) {
|
||||
addressFamilies := preferIPv4
|
||||
if bindAddress != nil && memberOf(bindAddress, familyIPv6) {
|
||||
addressFamilies = preferIPv6
|
||||
}
|
||||
|
||||
if bindAddress == nil || bindAddress.IsUnspecified() || bindAddress.IsLoopback() {
|
||||
hostIP, err := ChooseHostInterface()
|
||||
hostIP, err := chooseHostInterface(addressFamilies)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -414,3 +437,21 @@ func ChooseBindAddress(bindAddress net.IP) (net.IP, error) {
|
|||
}
|
||||
return bindAddress, nil
|
||||
}
|
||||
|
||||
// ChooseBindAddressForInterface choose a global IP for a specific interface, with priority given to IPv4.
|
||||
// This is required in case of network setups where default routes are present, but network
|
||||
// interfaces use only link-local addresses (e.g. as described in RFC5549).
|
||||
// e.g when using BGP to announce a host IP over link-local ip addresses and this ip address is attached to the lo interface.
|
||||
func ChooseBindAddressForInterface(intfName string) (net.IP, error) {
|
||||
var nw networkInterfacer = networkInterface{}
|
||||
for _, family := range preferIPv4 {
|
||||
ip, err := getIPFromInterface(intfName, family, nw)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ip != nil {
|
||||
return ip, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("unable to select an IP from %s network interface", intfName)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1014,7 +1014,7 @@ func validatePatchWithSetOrderList(patchList, setOrderList interface{}, mergeKey
|
|||
setOrderIndex++
|
||||
}
|
||||
// If patchIndex is inbound but setOrderIndex if out of bound mean there are items mismatching between the patch list and setElementOrder list.
|
||||
// the second check is is a sanity check, and should always be true if the first is true.
|
||||
// the second check is a sanity check, and should always be true if the first is true.
|
||||
if patchIndex < len(nonDeleteList) && setOrderIndex >= len(typedSetOrderList) {
|
||||
return fmt.Errorf("The order in patch list:\n%v\n doesn't match %s list:\n%v\n", typedPatchList, setElementOrderDirectivePrefix, setOrderList)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -204,7 +204,7 @@ func Forbidden(field *Path, detail string) *Error {
|
|||
// Invalid, but the returned error will not include the too-long
|
||||
// value.
|
||||
func TooLong(field *Path, value interface{}, maxLength int) *Error {
|
||||
return &Error{ErrorTypeTooLong, field.String(), value, fmt.Sprintf("must have at most %d characters", maxLength)}
|
||||
return &Error{ErrorTypeTooLong, field.String(), value, fmt.Sprintf("must have at most %d bytes", maxLength)}
|
||||
}
|
||||
|
||||
// TooMany returns a *Error indicating "too many". This is used to
|
||||
|
|
|
|||
|
|
@ -70,7 +70,11 @@ func IsQualifiedName(value string) []string {
|
|||
return errs
|
||||
}
|
||||
|
||||
// IsFullyQualifiedName checks if the name is fully qualified.
|
||||
// IsFullyQualifiedName checks if the name is fully qualified. This is similar
|
||||
// to IsFullyQualifiedDomainName but requires a minimum of 3 segments instead of
|
||||
// 2 and does not accept a trailing . as valid.
|
||||
// TODO: This function is deprecated and preserved until all callers migrate to
|
||||
// IsFullyQualifiedDomainName; please don't add new callers.
|
||||
func IsFullyQualifiedName(fldPath *field.Path, name string) field.ErrorList {
|
||||
var allErrors field.ErrorList
|
||||
if len(name) == 0 {
|
||||
|
|
@ -85,6 +89,26 @@ func IsFullyQualifiedName(fldPath *field.Path, name string) field.ErrorList {
|
|||
return allErrors
|
||||
}
|
||||
|
||||
// IsFullyQualifiedDomainName checks if the domain name is fully qualified. This
|
||||
// is similar to IsFullyQualifiedName but only requires a minimum of 2 segments
|
||||
// instead of 3 and accepts a trailing . as valid.
|
||||
func IsFullyQualifiedDomainName(fldPath *field.Path, name string) field.ErrorList {
|
||||
var allErrors field.ErrorList
|
||||
if len(name) == 0 {
|
||||
return append(allErrors, field.Required(fldPath, ""))
|
||||
}
|
||||
if strings.HasSuffix(name, ".") {
|
||||
name = name[:len(name)-1]
|
||||
}
|
||||
if errs := IsDNS1123Subdomain(name); len(errs) > 0 {
|
||||
return append(allErrors, field.Invalid(fldPath, name, strings.Join(errs, ",")))
|
||||
}
|
||||
if len(strings.Split(name, ".")) < 2 {
|
||||
return append(allErrors, field.Invalid(fldPath, name, "should be a domain with at least two segments separated by dots"))
|
||||
}
|
||||
return allErrors
|
||||
}
|
||||
|
||||
const labelValueFmt string = "(" + qualifiedNameFmt + ")?"
|
||||
const labelValueErrMsg string = "a valid label must be an empty string or consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character"
|
||||
|
||||
|
|
@ -285,6 +309,26 @@ func IsValidIP(value string) []string {
|
|||
return nil
|
||||
}
|
||||
|
||||
// IsValidIPv4Address tests that the argument is a valid IPv4 address.
|
||||
func IsValidIPv4Address(fldPath *field.Path, value string) field.ErrorList {
|
||||
var allErrors field.ErrorList
|
||||
ip := net.ParseIP(value)
|
||||
if ip == nil || ip.To4() == nil {
|
||||
allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid IPv4 address"))
|
||||
}
|
||||
return allErrors
|
||||
}
|
||||
|
||||
// IsValidIPv6Address tests that the argument is a valid IPv6 address.
|
||||
func IsValidIPv6Address(fldPath *field.Path, value string) field.ErrorList {
|
||||
var allErrors field.ErrorList
|
||||
ip := net.ParseIP(value)
|
||||
if ip == nil || ip.To4() != nil {
|
||||
allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid IPv6 address"))
|
||||
}
|
||||
return allErrors
|
||||
}
|
||||
|
||||
const percentFmt string = "[0-9]+%"
|
||||
const percentErrMsg string = "a valid percent string must be a numeric string followed by an ending '%'"
|
||||
|
||||
|
|
|
|||
|
|
@ -113,7 +113,7 @@ func (sw *StreamWatcher) receive() {
|
|||
case io.ErrUnexpectedEOF:
|
||||
klog.V(1).Infof("Unexpected EOF during watch stream event decoding: %v", err)
|
||||
default:
|
||||
if net.IsProbableEOF(err) {
|
||||
if net.IsProbableEOF(err) || net.IsTimeout(err) {
|
||||
klog.V(5).Infof("Unable to decode an event from the watch stream: %v", err)
|
||||
} else {
|
||||
sw.result <- Event{
|
||||
|
|
|
|||
|
|
@ -20,6 +20,7 @@ import (
|
|||
"io"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
// ResourcePrinterFunc is a function that can print objects
|
||||
|
|
@ -35,3 +36,19 @@ type ResourcePrinter interface {
|
|||
// Print receives a runtime object, formats it and prints it to a writer.
|
||||
PrintObj(runtime.Object, io.Writer) error
|
||||
}
|
||||
|
||||
// PrintOptions struct defines a struct for various print options
|
||||
type PrintOptions struct {
|
||||
NoHeaders bool
|
||||
WithNamespace bool
|
||||
WithKind bool
|
||||
Wide bool
|
||||
ShowLabels bool
|
||||
Kind schema.GroupKind
|
||||
ColumnLabels []string
|
||||
|
||||
SortBy string
|
||||
|
||||
// indicates if it is OK to ignore missing keys for rendering an output template.
|
||||
AllowMissingKeys bool
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,574 @@
|
|||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package printers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/liggitt/tabwriter"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/duration"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
)
|
||||
|
||||
var _ ResourcePrinter = &HumanReadablePrinter{}
|
||||
|
||||
type printHandler struct {
|
||||
columnDefinitions []metav1beta1.TableColumnDefinition
|
||||
printFunc reflect.Value
|
||||
}
|
||||
|
||||
var (
|
||||
statusHandlerEntry = &printHandler{
|
||||
columnDefinitions: statusColumnDefinitions,
|
||||
printFunc: reflect.ValueOf(printStatus),
|
||||
}
|
||||
|
||||
statusColumnDefinitions = []metav1beta1.TableColumnDefinition{
|
||||
{Name: "Status", Type: "string"},
|
||||
{Name: "Reason", Type: "string"},
|
||||
{Name: "Message", Type: "string"},
|
||||
}
|
||||
|
||||
defaultHandlerEntry = &printHandler{
|
||||
columnDefinitions: objectMetaColumnDefinitions,
|
||||
printFunc: reflect.ValueOf(printObjectMeta),
|
||||
}
|
||||
|
||||
objectMetaColumnDefinitions = []metav1beta1.TableColumnDefinition{
|
||||
{Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]},
|
||||
{Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]},
|
||||
}
|
||||
|
||||
withEventTypePrefixColumns = []string{"EVENT"}
|
||||
withNamespacePrefixColumns = []string{"NAMESPACE"} // TODO(erictune): print cluster name too.
|
||||
)
|
||||
|
||||
// HumanReadablePrinter is an implementation of ResourcePrinter which attempts to provide
|
||||
// more elegant output. It is not threadsafe, but you may call PrintObj repeatedly; headers
|
||||
// will only be printed if the object type changes. This makes it useful for printing items
|
||||
// received from watches.
|
||||
type HumanReadablePrinter struct {
|
||||
options PrintOptions
|
||||
lastType interface{}
|
||||
lastColumns []metav1beta1.TableColumnDefinition
|
||||
printedHeaders bool
|
||||
}
|
||||
|
||||
// NewTablePrinter creates a printer suitable for calling PrintObj().
|
||||
func NewTablePrinter(options PrintOptions) ResourcePrinter {
|
||||
printer := &HumanReadablePrinter{
|
||||
options: options,
|
||||
}
|
||||
return printer
|
||||
}
|
||||
|
||||
func printHeader(columnNames []string, w io.Writer) error {
|
||||
if _, err := fmt.Fprintf(w, "%s\n", strings.Join(columnNames, "\t")); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// PrintObj prints the obj in a human-friendly format according to the type of the obj.
|
||||
func (h *HumanReadablePrinter) PrintObj(obj runtime.Object, output io.Writer) error {
|
||||
|
||||
w, found := output.(*tabwriter.Writer)
|
||||
if !found {
|
||||
w = GetNewTabWriter(output)
|
||||
output = w
|
||||
defer w.Flush()
|
||||
}
|
||||
|
||||
var eventType string
|
||||
if event, isEvent := obj.(*metav1.WatchEvent); isEvent {
|
||||
eventType = event.Type
|
||||
obj = event.Object.Object
|
||||
}
|
||||
|
||||
// Parameter "obj" is a table from server; print it.
|
||||
// display tables following the rules of options
|
||||
if table, ok := obj.(*metav1beta1.Table); ok {
|
||||
// Do not print headers if this table has no column definitions, or they are the same as the last ones we printed
|
||||
localOptions := h.options
|
||||
if h.printedHeaders && (len(table.ColumnDefinitions) == 0 || reflect.DeepEqual(table.ColumnDefinitions, h.lastColumns)) {
|
||||
localOptions.NoHeaders = true
|
||||
}
|
||||
|
||||
if len(table.ColumnDefinitions) == 0 {
|
||||
// If this table has no column definitions, use the columns from the last table we printed for decoration and layout.
|
||||
// This is done when receiving tables in watch events to save bandwidth.
|
||||
table.ColumnDefinitions = h.lastColumns
|
||||
} else if !reflect.DeepEqual(table.ColumnDefinitions, h.lastColumns) {
|
||||
// If this table has column definitions, remember them for future use.
|
||||
h.lastColumns = table.ColumnDefinitions
|
||||
h.printedHeaders = false
|
||||
}
|
||||
|
||||
if len(table.Rows) > 0 {
|
||||
h.printedHeaders = true
|
||||
}
|
||||
|
||||
if err := decorateTable(table, localOptions); err != nil {
|
||||
return err
|
||||
}
|
||||
if len(eventType) > 0 {
|
||||
if err := addColumns(beginning, table,
|
||||
[]metav1beta1.TableColumnDefinition{{Name: "Event", Type: "string"}},
|
||||
[]cellValueFunc{func(metav1beta1.TableRow) (interface{}, error) { return formatEventType(eventType), nil }},
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return printTable(table, output, localOptions)
|
||||
}
|
||||
|
||||
// Could not find print handler for "obj"; use the default or status print handler.
|
||||
// Print with the default or status handler, and use the columns from the last time
|
||||
var handler *printHandler
|
||||
if _, isStatus := obj.(*metav1.Status); isStatus {
|
||||
handler = statusHandlerEntry
|
||||
} else {
|
||||
handler = defaultHandlerEntry
|
||||
}
|
||||
|
||||
includeHeaders := h.lastType != handler && !h.options.NoHeaders
|
||||
|
||||
if h.lastType != nil && h.lastType != handler && !h.options.NoHeaders {
|
||||
fmt.Fprintln(output)
|
||||
}
|
||||
|
||||
if err := printRowsForHandlerEntry(output, handler, eventType, obj, h.options, includeHeaders); err != nil {
|
||||
return err
|
||||
}
|
||||
h.lastType = handler
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// printTable prints a table to the provided output respecting the filtering rules for options
|
||||
// for wide columns and filtered rows. It filters out rows that are Completed. You should call
|
||||
// decorateTable if you receive a table from a remote server before calling printTable.
|
||||
func printTable(table *metav1beta1.Table, output io.Writer, options PrintOptions) error {
|
||||
if !options.NoHeaders {
|
||||
// avoid printing headers if we have no rows to display
|
||||
if len(table.Rows) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
first := true
|
||||
for _, column := range table.ColumnDefinitions {
|
||||
if !options.Wide && column.Priority != 0 {
|
||||
continue
|
||||
}
|
||||
if first {
|
||||
first = false
|
||||
} else {
|
||||
fmt.Fprint(output, "\t")
|
||||
}
|
||||
fmt.Fprint(output, strings.ToUpper(column.Name))
|
||||
}
|
||||
fmt.Fprintln(output)
|
||||
}
|
||||
for _, row := range table.Rows {
|
||||
first := true
|
||||
for i, cell := range row.Cells {
|
||||
if i >= len(table.ColumnDefinitions) {
|
||||
// https://issue.k8s.io/66379
|
||||
// don't panic in case of bad output from the server, with more cells than column definitions
|
||||
break
|
||||
}
|
||||
column := table.ColumnDefinitions[i]
|
||||
if !options.Wide && column.Priority != 0 {
|
||||
continue
|
||||
}
|
||||
if first {
|
||||
first = false
|
||||
} else {
|
||||
fmt.Fprint(output, "\t")
|
||||
}
|
||||
if cell != nil {
|
||||
fmt.Fprint(output, cell)
|
||||
}
|
||||
}
|
||||
fmt.Fprintln(output)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type cellValueFunc func(metav1beta1.TableRow) (interface{}, error)
|
||||
|
||||
type columnAddPosition int
|
||||
|
||||
const (
|
||||
beginning columnAddPosition = 1
|
||||
end columnAddPosition = 2
|
||||
)
|
||||
|
||||
func addColumns(pos columnAddPosition, table *metav1beta1.Table, columns []metav1beta1.TableColumnDefinition, valueFuncs []cellValueFunc) error {
|
||||
if len(columns) != len(valueFuncs) {
|
||||
return fmt.Errorf("cannot prepend columns, unmatched value functions")
|
||||
}
|
||||
if len(columns) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Compute the new rows
|
||||
newRows := make([][]interface{}, len(table.Rows))
|
||||
for i := range table.Rows {
|
||||
newCells := make([]interface{}, 0, len(columns)+len(table.Rows[i].Cells))
|
||||
|
||||
if pos == end {
|
||||
// If we're appending, start with the existing cells,
|
||||
// then add nil cells to match the number of columns
|
||||
newCells = append(newCells, table.Rows[i].Cells...)
|
||||
for len(newCells) < len(table.ColumnDefinitions) {
|
||||
newCells = append(newCells, nil)
|
||||
}
|
||||
}
|
||||
|
||||
// Compute cells for new columns
|
||||
for _, f := range valueFuncs {
|
||||
newCell, err := f(table.Rows[i])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newCells = append(newCells, newCell)
|
||||
}
|
||||
|
||||
if pos == beginning {
|
||||
// If we're prepending, add existing cells
|
||||
newCells = append(newCells, table.Rows[i].Cells...)
|
||||
}
|
||||
|
||||
// Remember the new cells for this row
|
||||
newRows[i] = newCells
|
||||
}
|
||||
|
||||
// All cells successfully computed, now replace columns and rows
|
||||
newColumns := make([]metav1beta1.TableColumnDefinition, 0, len(columns)+len(table.ColumnDefinitions))
|
||||
switch pos {
|
||||
case beginning:
|
||||
newColumns = append(newColumns, columns...)
|
||||
newColumns = append(newColumns, table.ColumnDefinitions...)
|
||||
case end:
|
||||
newColumns = append(newColumns, table.ColumnDefinitions...)
|
||||
newColumns = append(newColumns, columns...)
|
||||
default:
|
||||
return fmt.Errorf("invalid column add position: %v", pos)
|
||||
}
|
||||
table.ColumnDefinitions = newColumns
|
||||
for i := range table.Rows {
|
||||
table.Rows[i].Cells = newRows[i]
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// decorateTable takes a table and attempts to add label columns and the
|
||||
// namespace column. It will fill empty columns with nil (if the object
|
||||
// does not expose metadata). It returns an error if the table cannot
|
||||
// be decorated.
|
||||
func decorateTable(table *metav1beta1.Table, options PrintOptions) error {
|
||||
width := len(table.ColumnDefinitions) + len(options.ColumnLabels)
|
||||
if options.WithNamespace {
|
||||
width++
|
||||
}
|
||||
if options.ShowLabels {
|
||||
width++
|
||||
}
|
||||
|
||||
columns := table.ColumnDefinitions
|
||||
|
||||
nameColumn := -1
|
||||
if options.WithKind && !options.Kind.Empty() {
|
||||
for i := range columns {
|
||||
if columns[i].Format == "name" && columns[i].Type == "string" {
|
||||
nameColumn = i
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if width != len(table.ColumnDefinitions) {
|
||||
columns = make([]metav1beta1.TableColumnDefinition, 0, width)
|
||||
if options.WithNamespace {
|
||||
columns = append(columns, metav1beta1.TableColumnDefinition{
|
||||
Name: "Namespace",
|
||||
Type: "string",
|
||||
})
|
||||
}
|
||||
columns = append(columns, table.ColumnDefinitions...)
|
||||
for _, label := range formatLabelHeaders(options.ColumnLabels) {
|
||||
columns = append(columns, metav1beta1.TableColumnDefinition{
|
||||
Name: label,
|
||||
Type: "string",
|
||||
})
|
||||
}
|
||||
if options.ShowLabels {
|
||||
columns = append(columns, metav1beta1.TableColumnDefinition{
|
||||
Name: "Labels",
|
||||
Type: "string",
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
rows := table.Rows
|
||||
|
||||
includeLabels := len(options.ColumnLabels) > 0 || options.ShowLabels
|
||||
if includeLabels || options.WithNamespace || nameColumn != -1 {
|
||||
for i := range rows {
|
||||
row := rows[i]
|
||||
|
||||
if nameColumn != -1 {
|
||||
row.Cells[nameColumn] = fmt.Sprintf("%s/%s", strings.ToLower(options.Kind.String()), row.Cells[nameColumn])
|
||||
}
|
||||
|
||||
var m metav1.Object
|
||||
if obj := row.Object.Object; obj != nil {
|
||||
if acc, err := meta.Accessor(obj); err == nil {
|
||||
m = acc
|
||||
}
|
||||
}
|
||||
// if we can't get an accessor, fill out the appropriate columns with empty spaces
|
||||
if m == nil {
|
||||
if options.WithNamespace {
|
||||
r := make([]interface{}, 1, width)
|
||||
row.Cells = append(r, row.Cells...)
|
||||
}
|
||||
for j := 0; j < width-len(row.Cells); j++ {
|
||||
row.Cells = append(row.Cells, nil)
|
||||
}
|
||||
rows[i] = row
|
||||
continue
|
||||
}
|
||||
|
||||
if options.WithNamespace {
|
||||
r := make([]interface{}, 1, width)
|
||||
r[0] = m.GetNamespace()
|
||||
row.Cells = append(r, row.Cells...)
|
||||
}
|
||||
if includeLabels {
|
||||
row.Cells = appendLabelCells(row.Cells, m.GetLabels(), options)
|
||||
}
|
||||
rows[i] = row
|
||||
}
|
||||
}
|
||||
|
||||
table.ColumnDefinitions = columns
|
||||
table.Rows = rows
|
||||
return nil
|
||||
}
|
||||
|
||||
// printRowsForHandlerEntry prints the incremental table output (headers if the current type is
|
||||
// different from lastType) including all the rows in the object. It returns the current type
|
||||
// or an error, if any.
|
||||
func printRowsForHandlerEntry(output io.Writer, handler *printHandler, eventType string, obj runtime.Object, options PrintOptions, includeHeaders bool) error {
|
||||
var results []reflect.Value
|
||||
|
||||
args := []reflect.Value{reflect.ValueOf(obj), reflect.ValueOf(options)}
|
||||
results = handler.printFunc.Call(args)
|
||||
if !results[1].IsNil() {
|
||||
return results[1].Interface().(error)
|
||||
}
|
||||
|
||||
if includeHeaders {
|
||||
var headers []string
|
||||
for _, column := range handler.columnDefinitions {
|
||||
if column.Priority != 0 && !options.Wide {
|
||||
continue
|
||||
}
|
||||
headers = append(headers, strings.ToUpper(column.Name))
|
||||
}
|
||||
headers = append(headers, formatLabelHeaders(options.ColumnLabels)...)
|
||||
// LABELS is always the last column.
|
||||
headers = append(headers, formatShowLabelsHeader(options.ShowLabels)...)
|
||||
// prepend namespace header
|
||||
if options.WithNamespace {
|
||||
headers = append(withNamespacePrefixColumns, headers...)
|
||||
}
|
||||
// prepend event type header
|
||||
if len(eventType) > 0 {
|
||||
headers = append(withEventTypePrefixColumns, headers...)
|
||||
}
|
||||
printHeader(headers, output)
|
||||
}
|
||||
|
||||
if results[1].IsNil() {
|
||||
rows := results[0].Interface().([]metav1beta1.TableRow)
|
||||
printRows(output, eventType, rows, options)
|
||||
return nil
|
||||
}
|
||||
return results[1].Interface().(error)
|
||||
}
|
||||
|
||||
var formattedEventType = map[string]string{
|
||||
string(watch.Added): "ADDED ",
|
||||
string(watch.Modified): "MODIFIED",
|
||||
string(watch.Deleted): "DELETED ",
|
||||
string(watch.Error): "ERROR ",
|
||||
}
|
||||
|
||||
func formatEventType(eventType string) string {
|
||||
if formatted, ok := formattedEventType[eventType]; ok {
|
||||
return formatted
|
||||
}
|
||||
return string(eventType)
|
||||
}
|
||||
|
||||
// printRows writes the provided rows to output.
|
||||
func printRows(output io.Writer, eventType string, rows []metav1beta1.TableRow, options PrintOptions) {
|
||||
for _, row := range rows {
|
||||
if len(eventType) > 0 {
|
||||
fmt.Fprint(output, formatEventType(eventType))
|
||||
fmt.Fprint(output, "\t")
|
||||
}
|
||||
if options.WithNamespace {
|
||||
if obj := row.Object.Object; obj != nil {
|
||||
if m, err := meta.Accessor(obj); err == nil {
|
||||
fmt.Fprint(output, m.GetNamespace())
|
||||
}
|
||||
}
|
||||
fmt.Fprint(output, "\t")
|
||||
}
|
||||
|
||||
for i, cell := range row.Cells {
|
||||
if i != 0 {
|
||||
fmt.Fprint(output, "\t")
|
||||
} else {
|
||||
// TODO: remove this once we drop the legacy printers
|
||||
if options.WithKind && !options.Kind.Empty() {
|
||||
fmt.Fprintf(output, "%s/%s", strings.ToLower(options.Kind.String()), cell)
|
||||
continue
|
||||
}
|
||||
}
|
||||
fmt.Fprint(output, cell)
|
||||
}
|
||||
|
||||
hasLabels := len(options.ColumnLabels) > 0
|
||||
if obj := row.Object.Object; obj != nil && (hasLabels || options.ShowLabels) {
|
||||
if m, err := meta.Accessor(obj); err == nil {
|
||||
for _, value := range labelValues(m.GetLabels(), options) {
|
||||
output.Write([]byte("\t"))
|
||||
output.Write([]byte(value))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
output.Write([]byte("\n"))
|
||||
}
|
||||
}
|
||||
|
||||
func formatLabelHeaders(columnLabels []string) []string {
|
||||
formHead := make([]string, len(columnLabels))
|
||||
for i, l := range columnLabels {
|
||||
p := strings.Split(l, "/")
|
||||
formHead[i] = strings.ToUpper((p[len(p)-1]))
|
||||
}
|
||||
return formHead
|
||||
}
|
||||
|
||||
// headers for --show-labels=true
|
||||
func formatShowLabelsHeader(showLabels bool) []string {
|
||||
if showLabels {
|
||||
return []string{"LABELS"}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// labelValues returns a slice of value columns matching the requested print options.
|
||||
func labelValues(itemLabels map[string]string, opts PrintOptions) []string {
|
||||
var values []string
|
||||
for _, key := range opts.ColumnLabels {
|
||||
values = append(values, itemLabels[key])
|
||||
}
|
||||
if opts.ShowLabels {
|
||||
values = append(values, labels.FormatLabels(itemLabels))
|
||||
}
|
||||
return values
|
||||
}
|
||||
|
||||
// appendLabelCells returns a slice of value columns matching the requested print options.
|
||||
// Intended for use with tables.
|
||||
func appendLabelCells(values []interface{}, itemLabels map[string]string, opts PrintOptions) []interface{} {
|
||||
for _, key := range opts.ColumnLabels {
|
||||
values = append(values, itemLabels[key])
|
||||
}
|
||||
if opts.ShowLabels {
|
||||
values = append(values, labels.FormatLabels(itemLabels))
|
||||
}
|
||||
return values
|
||||
}
|
||||
|
||||
func printStatus(obj runtime.Object, options PrintOptions) ([]metav1beta1.TableRow, error) {
|
||||
status, ok := obj.(*metav1.Status)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("expected *v1.Status, got %T", obj)
|
||||
}
|
||||
return []metav1beta1.TableRow{{
|
||||
Object: runtime.RawExtension{Object: obj},
|
||||
Cells: []interface{}{status.Status, status.Reason, status.Message},
|
||||
}}, nil
|
||||
}
|
||||
|
||||
func printObjectMeta(obj runtime.Object, options PrintOptions) ([]metav1beta1.TableRow, error) {
|
||||
if meta.IsListType(obj) {
|
||||
rows := make([]metav1beta1.TableRow, 0, 16)
|
||||
err := meta.EachListItem(obj, func(obj runtime.Object) error {
|
||||
nestedRows, err := printObjectMeta(obj, options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rows = append(rows, nestedRows...)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return rows, nil
|
||||
}
|
||||
|
||||
rows := make([]metav1beta1.TableRow, 0, 1)
|
||||
m, err := meta.Accessor(obj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
row := metav1beta1.TableRow{
|
||||
Object: runtime.RawExtension{Object: obj},
|
||||
}
|
||||
row.Cells = append(row.Cells, m.GetName(), translateTimestampSince(m.GetCreationTimestamp()))
|
||||
rows = append(rows, row)
|
||||
return rows, nil
|
||||
}
|
||||
|
||||
// translateTimestampSince returns the elapsed time since timestamp in
|
||||
// human-readable approximation.
|
||||
func translateTimestampSince(timestamp metav1.Time) string {
|
||||
if timestamp.IsZero() {
|
||||
return "<unknown>"
|
||||
}
|
||||
|
||||
return duration.HumanDuration(time.Since(timestamp.Time))
|
||||
}
|
||||
|
|
@ -0,0 +1,36 @@
|
|||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package printers
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/liggitt/tabwriter"
|
||||
)
|
||||
|
||||
const (
|
||||
tabwriterMinWidth = 6
|
||||
tabwriterWidth = 4
|
||||
tabwriterPadding = 3
|
||||
tabwriterPadChar = ' '
|
||||
tabwriterFlags = tabwriter.RememberWidths
|
||||
)
|
||||
|
||||
// GetNewTabWriter returns a tabwriter that translates tabbed columns in input into properly aligned text.
|
||||
func GetNewTabWriter(output io.Writer) *tabwriter.Writer {
|
||||
return tabwriter.NewWriter(output, tabwriterMinWidth, tabwriterWidth, tabwriterPadding, tabwriterPadChar, tabwriterFlags)
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue